hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c609a08a9938ec4ee47659bea14f75c2679c6673
| 2,800
|
py
|
Python
|
server/server/db/user.py
|
Panl/jianshi
|
6f1d1f1bf44e9b7bca895729d13aea0a0db92a2e
|
[
"Apache-2.0"
] | null | null | null |
server/server/db/user.py
|
Panl/jianshi
|
6f1d1f1bf44e9b7bca895729d13aea0a0db92a2e
|
[
"Apache-2.0"
] | null | null | null |
server/server/db/user.py
|
Panl/jianshi
|
6f1d1f1bf44e9b7bca895729d13aea0a0db92a2e
|
[
"Apache-2.0"
] | 2
|
2021-03-21T09:27:19.000Z
|
2021-05-25T05:34:11.000Z
|
#!/usr/bin/python
# coding: utf-8
import time
import pymysql
import pymysql.cursors
from flask import Flask, g
from server import app
from server.util import safetyutils
# @app.route("/user")
# def user():
# return "root from User, app.route works"
def _conn(cursorclass=pymysql.cursors.Cursor):
return pymysql.connect(host='192.168.33.10',
user='emma',
password='emma',
db='jianshi',
charset='utf8mb4',
cursorclass=cursorclass)
def _get_conn(cursorclass=pymysql.cursors.Cursor):
with app.app_context():
if not hasattr(g, 'db_conn'):
g.db_conn=_conn(cursorclass)
return g.db_conn
def clear_user_table():
try:
with _get_conn().cursor() as cursor:
sql = "drop table `User`"
cursor.execute(sql)
finally:
_get_conn().close()
def init_db():
"""Initializes the database."""
try:
with _get_conn().cursor() as cursor:
# execute schema sql file
with app.open_resource('db/schema/0001/user.sql', mode='r') as f:
sql = f.read()
print sql
result = cursor.execute(sql)
print result
finally:
print _get_conn().close()
def create_user(name, password):
if name is None or password is None:
return False
name_hash = abs(hash(name))
password = safetyutils.get_hash_password(password)
conn = _get_conn(pymysql.cursors.DictCursor)
time_created = int(time.time())
new_user_id = -1
try:
with conn.cursor() as cursor:
sql = "insert into `User` (`name`, `name_hash`, `password`, `time_created`, `time_modified`) values (%s, %s, %s, %s, %s)"
result = cursor.execute(sql, (str(name), str(name_hash), str(password), str(time_created), '0'))
new_user_id = cursor.lastrowid
conn.commit()
finally:
conn.close()
return new_user_id
def login(name, password):
if name is None or password is None:
return False
conn = _get_conn(pymysql.cursors.DictCursor)
try:
with conn.cursor() as cursor:
sql = "select * from `User` where `name` = %s "
cursor.execute(sql, (str(name)))
_user = cursor.fetchone()
if not _user:
return False
else:
return safetyutils.verify_hash_password(_user['password'], password)
finally:
conn.close()
def delete_user(user_id):
"""Delete user."""
conn = _get_conn(pymysql.cursors.DictCursor)
try:
with conn.cursor() as cursor:
sql = "delete from `User` where `id` = %s"
cursor.execute(sql, (str(user_id)))
conn.commit()
finally:
conn.close()
def get_user(user_id):
user = None
conn = _get_conn(pymysql.cursors.DictCursor)
try:
with conn.cursor() as cursor:
sql = "select * from User where id = %s"
cursor.execute(sql, str(user_id))
user = cursor.fetchall()[0]
finally:
conn.close()
return user
| 24.137931
| 124
| 0.653214
|
94c152884c4a8697fdff37f4808e88e416e2fef3
| 22,211
|
py
|
Python
|
backpack/core/derivatives/basederivatives.py
|
jabader97/backpack
|
089daafa0d611e13901fd7ecf8a0d708ce7a5928
|
[
"MIT"
] | 395
|
2019-10-04T09:37:52.000Z
|
2022-03-29T18:00:56.000Z
|
backpack/core/derivatives/basederivatives.py
|
jabader97/backpack
|
089daafa0d611e13901fd7ecf8a0d708ce7a5928
|
[
"MIT"
] | 78
|
2019-10-11T18:56:43.000Z
|
2022-03-23T01:49:54.000Z
|
backpack/core/derivatives/basederivatives.py
|
jabader97/backpack
|
089daafa0d611e13901fd7ecf8a0d708ce7a5928
|
[
"MIT"
] | 50
|
2019-10-03T16:31:10.000Z
|
2022-03-15T19:36:14.000Z
|
"""Base classes for more flexible Jacobians and second-order information."""
import warnings
from abc import ABC
from typing import Callable, List, Tuple
from torch import Tensor
from torch.nn import Module
from backpack.core.derivatives import shape_check
class BaseDerivatives(ABC):
"""First- and second-order partial derivatives of unparameterized module.
Note:
Throughout the code, use these conventions if possible:
- `N`: batch size
- Vectors
- Layer input shape `[N, D_in]`
- Layer output shape `[N, D_out]`
- Images
- Layer input shape `[N, C_in, H_in, W_in]`
- Layer output shape `[N, C_out, H_out, W_out]`
- `V`: vectorization axis
Definition:
For simplicity, consider the vector case, i.e. a function which maps an
`[N, D_in]` `input` into an `[N, D_out]` `output`.
The input-output Jacobian `J` of is tensor of shape `[N, D_out, N_in, D_in]`.
Partial derivatives are ordered as
`J[i, j, k, l] = 𝜕output[i, j] / 𝜕input[k, l].
The transposed input-output Jacobian `Jᵀ` has shape `[N, D_in, N, D_out]`.
Partial derivatives are ordered as
`Jᵀ[i, j, k, l] = 𝜕output[k, l] / 𝜕input[i, j]`.
In general, feature dimension indices `j, l` are product indices.
"""
@shape_check.jac_mat_prod_accept_vectors
@shape_check.jac_mat_prod_check_shapes
def jac_mat_prod(
self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor
) -> Tensor:
"""Apply Jacobian of the output w.r.t. input to a matrix.
It is assumed that the module input has shape `[N, *]`, while the output is
of shape `[N, •]`. Both `*`, `•` denote arbitrary shapes.
Apply Jacobian to all slices among the vectorization axis.
`result[v, n, •] = ∑ₖ ∑_* J[n, •, k, *] mat[v, n, *]`.
Args:
module: Extended module.
g_inp: Gradients of the module w.r.t. its inputs.
g_out: Gradients of the module w.r.t. its outputs.
mat: Matrix the Jacobian will be applied to. Must have
shape `[V, N, *]`.
Returns:
Jacobian-matrix product. Has shape [V, N, *].
Note:
- The Jacobian can be applied without knowledge about backpropagated
derivatives. Both `g_inp` and `g_out` are usually not required and
can be set to `None`.
"""
return self._jac_mat_prod(module, g_inp, g_out, mat)
def _jac_mat_prod(
self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor
) -> Tensor:
raise NotImplementedError
@shape_check.jac_t_mat_prod_accept_vectors
@shape_check.jac_t_mat_prod_check_shapes
def jac_t_mat_prod(
self,
module: Module,
g_inp: Tuple[Tensor],
g_out: Tuple[Tensor],
mat: Tensor,
subsampling: List[int] = None,
) -> Tensor:
"""Apply transposed input-ouput Jacobian of module output to a matrix.
Implicit application of Jᵀ:
result[v, ̃n, ̃c, ̃w, ...]
= ∑_{n, c, w} Jᵀ[̃n, ̃c, ̃w, ..., n, c, w, ...] mat[v, n, c, w, ...].
Args:
module: module which derivative is calculated
g_inp: input gradients
g_out: output gradients
mat: Matrix the transposed Jacobian will be applied to.
Must have shape ``[V, *module.output.shape]``; but if used with
sub-sampling, the batch dimension is replaced by ``len(subsampling)``.
subsampling: Indices of samples along the output's batch dimension that
should be considered. Defaults to ``None`` (use all samples).
Returns:
Transposed Jacobian-matrix product.
Has shape ``[V, *module.input0.shape]``; but if used with sub-sampling,
the batch dimension is replaced by ``len(subsampling)``.
"""
return self._jac_t_mat_prod(module, g_inp, g_out, mat, subsampling=subsampling)
def _jac_t_mat_prod(
self,
module: Module,
g_inp: Tuple[Tensor],
g_out: Tuple[Tensor],
mat: Tensor,
subsampling: List[int] = None,
) -> Tensor:
raise NotImplementedError
# TODO Add shape check
# TODO Use new convention
def ea_jac_t_mat_jac_prod(
self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor
) -> Tensor:
"""Expectation approximation of outer product with input-output Jacobian.
Used for backpropagation in KFRA.
For `yₙ = f(xₙ) n=1,...,n`, compute `E(Jₙᵀ mat Jₙ) = 1/n ∑ₙ Jₙᵀ mat Jₙ`.
In index notation, let `output[n]=f(input[n]) n = 1,...,n`. Then,
`result[i,j]
= 1/n ∑ₙₖₗ (𝜕output[n,k] / 𝜕input[n,i]) mat[k,l] (𝜕output[n,j] / 𝜕input[n,l])
Args:
module: Extended module.
g_inp: Gradients of the module w.r.t. its inputs.
g_out: Gradients of the module w.r.t. its outputs.
mat: Matrix of shape `[D_out, D_out]`.
# noqa: DAR202
Returns:
Matrix of shape `[D_in, D_in]`.
Note:
- This operation can be applied without knowledge about backpropagated
derivatives. Both `g_inp` and `g_out` are usually not required and
can be set to `None`.
Raises:
NotImplementedError: if not overwritten
"""
raise NotImplementedError
def hessian_is_zero(self, module: Module) -> bool:
"""Returns whether Hessian is zero.
I.e. whether ``∂²output[i] / ∂input[j] ∂input[k] = 0 ∀ i,j,k``.
Args:
module: current module to evaluate
# noqa: DAR202
Returns:
whether Hessian is zero
Raises:
NotImplementedError: if not overwritten
"""
raise NotImplementedError
def hessian_is_diagonal(self, module: Module) -> bool:
"""Is `∂²output[i] / ∂input[j] ∂input[k]` nonzero only if `i = j = k`.
The Hessian diagonal is only defined for layers that preserve the size
of their input.
Must be implemented by descendants that don't implement ``hessian_is_zero``.
Args:
module: current module to evaluate
# noqa: DAR202
Returns:
whether Hessian is diagonal
Raises:
NotImplementedError: if not overwritten
"""
raise NotImplementedError
# FIXME Currently returns `∂²output[i] / ∂input[i]² * g_out[0][i]`,
# which s the residual matrix diagonal, rather than the Hessian diagonal
def hessian_diagonal(
self, module: Module, g_in: Tuple[Tensor], g_out: Tuple[Tensor]
) -> Tensor:
"""Return the Hessian diagonal `∂²output[i] / ∂input[i]²`.
Only required if `hessian_is_diagonal` returns `True`.
The Hessian diagonal is only defined for layers that preserve the size
of their input.
Args:
module: Module whose output-input Hessian diagonal is computed.
g_in: Gradients w.r.t. the module input.
g_out: Gradients w.r.t. the module output.
# noqa: DAR202
Returns:
Hessian diagonal. Has same shape as module input.
Raises:
NotImplementedError: if not overwritten
"""
raise NotImplementedError
def hessian_is_psd(self) -> bool:
"""Is `∂²output[i] / ∂input[j] ∂input[k]` positive semidefinite (PSD).
# noqa: DAR202
Returns:
whether hessian is positive semi definite
Raises:
NotImplementedError: if not overwritten
"""
raise NotImplementedError
@shape_check.residual_mat_prod_accept_vectors
@shape_check.residual_mat_prod_check_shapes
def residual_mat_prod(
self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor
) -> Tensor:
"""Multiply with the residual term.
Performs mat → [∑_{k} Hz_k(x) 𝛿z_k] mat.
Args:
module: module
g_inp: input gradients
g_out: output gradients
mat: matrix to multiply
Returns:
product
Note:
This function only has to be implemented if the residual is not
zero and not diagonal (for instance, `BatchNorm`).
"""
return self._residual_mat_prod(module, g_inp, g_out, mat)
def _residual_mat_prod(
self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor
) -> Tensor:
raise NotImplementedError
@staticmethod
def _reshape_like(mat: Tensor, shape: Tuple[int]) -> Tensor:
"""Reshape as like with trailing and additional 0th dimension.
If like is [N, C, H, ...], returns shape [-1, N, C, H, ...]
Args:
mat: Matrix to reshape.
shape: Trailing target shape.
Returns:
reshaped matrix
"""
return mat.reshape(-1, *shape)
@classmethod
def reshape_like_input(
cls, mat: Tensor, module: Module, subsampling: List[int] = None
) -> Tensor:
"""Reshapes matrix according to input.
Args:
mat: matrix to reshape
module: module which input shape is used
subsampling: Indices of active samples. ``None`` means use all samples.
Returns:
reshaped matrix
"""
shape = list(module.input0.shape)
if subsampling is not None:
shape[0] = len(subsampling)
return cls._reshape_like(mat, shape)
@classmethod
def reshape_like_output(cls, mat: Tensor, module: Module) -> Tensor:
"""Reshapes matrix like output.
Args:
mat: matrix to reshape
module: module which output is used
Returns:
reshaped matrix
"""
return cls._reshape_like(mat, module.output.shape)
class BaseParameterDerivatives(BaseDerivatives, ABC):
"""First- and second order partial derivatives of a module with parameters.
Assumptions (true for `nn.Linear`, `nn.Conv(Transpose)Nd`, `nn.BatchNormNd`):
- Parameters are saved as `.weight` and `.bias` fields in a module
- The output is linear in the model parameters
Shape conventions:
------------------
Weight [C_w, H_w, W_w, ...] (usually 1d, 2d, 4d)
Bias [C_b, ...] (usually 1d)
For most layers, these shapes correspond to shapes of the module input or output.
"""
@shape_check.param_mjp_accept_vectors
def param_mjp(
self,
param_str: str,
module: Module,
g_inp: Tuple[Tensor],
g_out: Tuple[Tensor],
mat: Tensor,
sum_batch: bool = True,
subsampling: List[int] = None,
) -> Tensor:
"""Compute matrix-Jacobian products (MJPs) of the module w.r.t. a parameter.
Handles both vector and matrix inputs. Preserves input format in output.
Internally calls out to ``_{param_str}_jac_t_mat_prod`` function that must be
implemented by descendants. It follows the same signature, but does not have
the ``param_str`` argument.
Args:
param_str: Attribute name under which the parameter is stored in the module.
module: Module whose Jacobian will be applied. Must provide access to IO.
g_inp: Gradients w.r.t. module input.
g_out: Gradients w.r.t. module output.
mat: Matrix the Jacobian will be applied to. Has shape
``[V, *module.output.shape]`` (matrix case) or same shape as
``module.output`` (vector case). If used with subsampling, has dimension
len(subsampling) instead of batch size along the batch axis.
sum_batch: Sum out the MJP's batch axis. Default: ``True``.
subsampling: Indices of samples along the output's batch dimension that
should be considered. Defaults to ``None`` (use all samples).
Returns:
Matrix-Jacobian products. Has shape ``[V, *param_shape]`` when batch
summation is enabled (same shape as parameter in the vector case). Without
batch summation, the result has shape ``[V, N, *param_shape]`` (vector case
has shape ``[N, *param_shape]``). If used with subsampling, the batch size N
is replaced by len(subsampling).
Raises:
NotImplementedError: if required method is not implemented by derivatives class
"""
# input check
shape_check.shape_like_output(mat, module, subsampling=subsampling)
method_name = f"_{param_str}_jac_t_mat_prod"
mjp = getattr(self, method_name, None)
if mjp is None:
raise NotImplementedError(
f"Computation requires implementation of {method_name}, but {self} "
f"(defining derivatives of {module}) does not implement it."
)
mjp_out = mjp(
module, g_inp, g_out, mat, sum_batch=sum_batch, subsampling=subsampling
)
# output check
shape_check.check_like_with_sum_batch(
mjp_out, module, param_str, sum_batch=sum_batch
)
shape_check.check_same_V_dim(mjp_out, mat)
return mjp_out
@shape_check.bias_jac_mat_prod_accept_vectors
@shape_check.bias_jac_mat_prod_check_shapes
def bias_jac_mat_prod(
self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor
) -> Tensor:
"""Apply Jacobian of the output w.r.t. bias to a matrix.
Args:
module: module to perform derivatives on
g_inp: input gradients
g_out: output gradients
mat: Matrix the Jacobian will be applied to.
Must have shape [V, C_b, ...].
Returns:
Jacobian-matrix product. Has shape [V, N, C_out, H_out, ...].
"""
return self._bias_jac_mat_prod(module, g_inp, g_out, mat)
def _bias_jac_mat_prod(
self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor
) -> Tensor:
raise NotImplementedError
@shape_check.weight_jac_mat_prod_accept_vectors
@shape_check.weight_jac_mat_prod_check_shapes
def weight_jac_mat_prod(
self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor
) -> Tensor:
"""Apply Jacobian of the output w.r.t. weight to a matrix.
Args:
module: module to perform derivatives on
g_inp: input gradients
g_out: output gradients
mat: Matrix the Jacobian will be applied to.
Must have shape [V, C_w, H_w, ...].
Returns:
Jacobian-matrix product.
Has shape [V, N, C_out, H_out, ...].
"""
return self._weight_jac_mat_prod(module, g_inp, g_out, mat)
def _weight_jac_mat_prod(
self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor
) -> Tensor:
raise NotImplementedError
class BaseLossDerivatives(BaseDerivatives, ABC):
"""Second- order partial derivatives of loss functions."""
# TODO Add shape check
def sqrt_hessian(
self,
module: Module,
g_inp: Tuple[Tensor],
g_out: Tuple[Tensor],
subsampling: List[int] = None,
) -> Tensor:
"""Symmetric factorization ('sqrt') of the loss Hessian.
The Hessian factorization is returned in format ``Hs = [D, N, D]``, where
``Hs[:, n, :]`` is the Hessian factorization for the ``n``th sample, i.e.
``Hs[:, n, :]ᵀ Hs[:, n, :]`` is the Hessian w.r.t. to the ``n``th sample.
Args:
module: Loss layer whose factorized Hessian will be computed.
g_inp: Gradients w.r.t. module input.
g_out: Gradients w.r.t. module output.
subsampling: Indices of data samples to be considered. Default of ``None``
uses all data in the mini-batch.
Returns:
Symmetric factorization of the loss Hessian for each sample. If the input
to the loss has shape ``[N, D]``, this is a tensor of shape ``[D, N, D]``;
if used with sub-sampling, ``N`` is replaced by ``len(subsampling)``.
For fixed ``n``, squaring the matrix implied by the slice ``[:, n, :]``
results in the loss Hessian w.r.t. to sample ``n``.
"""
self._check_2nd_order_make_sense(module, g_out)
return self._sqrt_hessian(module, g_inp, g_out, subsampling=subsampling)
def _sqrt_hessian(
self,
module: Module,
g_inp: Tuple[Tensor],
g_out: Tuple[Tensor],
subsampling: List[int] = None,
) -> Tensor:
raise NotImplementedError
# TODO Add shape check
def sqrt_hessian_sampled(
self,
module: Module,
g_inp: Tuple[Tensor],
g_out: Tuple[Tensor],
mc_samples: int = 1,
subsampling: List[int] = None,
) -> Tensor:
"""A Monte-Carlo sampled symmetric factorization of the loss Hessian.
The Hessian factorization is returned in format ``Hs = [M, N, D]``, where
``Hs[:, n, :]`` approximates the Hessian factorization for the ``n``th sample,
i.e. ``Hs[:, n, :]ᵀ Hs[:, n, :]ᵀ`` approximates the Hessian w.r.t. to sample
``n``.
Args:
module: Loss layer whose factorized Hessian will be computed.
g_inp: Gradients w.r.t. module input.
g_out: Gradients w.r.t. module output.
mc_samples: Number of samples used for MC approximation.
subsampling: Indices of data samples to be considered. Default of ``None``
uses all data in the mini-batch.
Returns:
Symmetric factorization of the loss Hessian for each sample. If the input
to the loss has shape ``[N, D]``, this is a tensor of shape ``[M, N, D]``
when using ``M`` MC samples; if used with sub-sampling, ``N`` is replaced
by ``len(subsampling)``. For fixed ``n``, squaring the matrix implied by the
slice ``[:, n, :]`` approximates the loss Hessian w.r.t. to sample ``n``.
"""
self._check_2nd_order_make_sense(module, g_out)
return self._sqrt_hessian_sampled(
module, g_inp, g_out, mc_samples=mc_samples, subsampling=subsampling
)
def _sqrt_hessian_sampled(
self,
module: Module,
g_inp: Tuple[Tensor],
g_out: Tuple[Tensor],
mc_samples: int = 1,
subsampling=None,
) -> Tensor:
raise NotImplementedError
@shape_check.make_hessian_mat_prod_accept_vectors
@shape_check.make_hessian_mat_prod_check_shapes
def make_hessian_mat_prod(
self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor]
) -> Callable[[Tensor], Tensor]:
"""Multiplication of the input Hessian with a matrix.
Return a function that maps mat to H * mat.
Args:
module: module to perform derivatives on
g_inp: input gradients
g_out: output gradients
Returns:
function that maps mat to H * mat
"""
self._check_2nd_order_make_sense(module, g_out)
return self._make_hessian_mat_prod(module, g_inp, g_out)
def _make_hessian_mat_prod(
self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor]
) -> Callable[[Tensor], Tensor]:
raise NotImplementedError
# TODO Add shape check
def sum_hessian(
self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor]
) -> Tensor:
"""Loss Hessians, summed over the batch dimension.
Args:
module: module to perform derivatives on
g_inp: input gradients
g_out: output gradients
Returns:
sum of hessians
"""
self._check_2nd_order_make_sense(module, g_out)
return self._sum_hessian(module, g_inp, g_out)
def _sum_hessian(
self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor]
) -> Tensor:
raise NotImplementedError
def _check_2nd_order_make_sense(self, module: Module, g_out: Tuple[Tensor]) -> None:
"""Verify conditions for 2nd-order extensions to be working.
2nd-order extensions are only guaranteed to work if the `loss`,
on which `backward()` is called, is a scalar that has not been
modified further after passing through the loss function module.
Args:
module: module to perform derivatives on
g_out: output gradients
"""
self._check_output_is_scalar(module)
self._check_loss_has_not_been_modified(module, g_out)
@classmethod
def _check_output_is_scalar(cls, module: Module) -> None:
"""Raise an exception is the module output is not a scalar.
Args:
module: module to perform derivatives on
Raises:
ValueError: if output is not scalar
"""
if module.output.numel() != 1:
raise ValueError(
"Output must be scalar. Got {}".format(module.output.shape)
)
@classmethod
def _check_loss_has_not_been_modified(
cls, module: Module, g_out: Tuple[Tensor]
) -> None:
"""Raise a warning if the module output seems to have been changed.
Args:
module: module to perform derivatives on
g_out: output gradients
"""
grad_out_is_identity = g_out is None or (g_out[0] == 1.0).all().item()
if not grad_out_is_identity:
warnings.warn(
"The output of {} seems to have been modified.".format(module)
+ " Backpack might give wrong second-order information."
+ " Make sure you call backward() on the output of a loss"
+ " function module from torch.nn",
UserWarning,
)
| 35.709003
| 91
| 0.600873
|
3a308f19f79fb2ed9cc5dcd557ab43105ed854bb
| 1,041
|
py
|
Python
|
default_filled_in_text.py
|
PCSailor/python_openpyxl_dcflog
|
ee10a3cde550b0d76fd033912de32af38d010589
|
[
"MIT"
] | null | null | null |
default_filled_in_text.py
|
PCSailor/python_openpyxl_dcflog
|
ee10a3cde550b0d76fd033912de32af38d010589
|
[
"MIT"
] | null | null | null |
default_filled_in_text.py
|
PCSailor/python_openpyxl_dcflog
|
ee10a3cde550b0d76fd033912de32af38d010589
|
[
"MIT"
] | null | null | null |
''' From Page 11 '''
# Yes or No values 9 and 696969
sheet.cell(row=row, column=col).value = 'Yes / No'
sheet.cell(row=row, column=col).font = Font(size = 9, color='696969')
# ✓ X values 8 and DCDCDC
sheet.cell(row=row, column=col).value = '✓ X'
sheet.cell(row=row, column=col).font = Font(size=8, color='DCDCDC')
# RH% 8 and 696969
sheet.cell(row=row, column=col).value = '%RH'
sheet.cell(row=row, column=col).font = Font(size=8, color='696969')
# Hz 8 and 696969
sheet.cell(row=row, column=col).value = 'Hz'
sheet.cell(row=row, column=col).font = Font(size=8, color='696969')
# D/P 8 and 696969
sheet.cell(row=row, column=col).value = 'D/P'
sheet.cell(row=row, column=col).font = Font(size=8, color='696969')
# Colored Cells
# Dark Grey
sheet.cell(row=row, column=col).fill = PatternFill(fgColor='C0C0C0', fill_type = 'solid')
# Light Grey
sheet.cell(row=row, column=col).fill = PatternFill(fgColor='C0C0C0', fill_type = 'solid')
| 41.64
| 89
| 0.618636
|
4b03791addf9e6148e43e61439fc88518eae5dff
| 3,823
|
py
|
Python
|
zentral/contrib/nagios/views.py
|
arubdesu/zentral
|
ac0fe663f6e1c27f9a9f55a7500a87e6ac7d9190
|
[
"Apache-2.0"
] | 634
|
2015-10-30T00:55:40.000Z
|
2022-03-31T02:59:00.000Z
|
zentral/contrib/nagios/views.py
|
arubdesu/zentral
|
ac0fe663f6e1c27f9a9f55a7500a87e6ac7d9190
|
[
"Apache-2.0"
] | 145
|
2015-11-06T00:17:33.000Z
|
2022-03-16T13:30:31.000Z
|
zentral/contrib/nagios/views.py
|
arubdesu/zentral
|
ac0fe663f6e1c27f9a9f55a7500a87e6ac7d9190
|
[
"Apache-2.0"
] | 103
|
2015-11-07T07:08:49.000Z
|
2022-03-18T17:34:36.000Z
|
import logging
import os.path
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.urls import reverse_lazy
from django.views.generic import View, ListView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from zentral.conf import settings
from zentral.utils.api_views import APIAuthError, JSONPostAPIView
from .events import post_nagios_event
from .forms import NagiosInstanceForm
from .models import NagiosInstance
logger = logging.getLogger('zentral.contrib.nagios.views')
# setup > nagios instances
class NagiosInstancesView(LoginRequiredMixin, ListView):
model = NagiosInstance
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["setup"] = True
nagios_instances_count = len(ctx["object_list"])
if nagios_instances_count == 0 or nagios_instances_count > 1:
suffix = "s"
else:
suffix = ""
ctx["title"] = "{} nagios instance{}".format(nagios_instances_count, suffix)
return ctx
class CreateNagiosInstanceView(LoginRequiredMixin, CreateView):
model = NagiosInstance
form_class = NagiosInstanceForm
success_url = reverse_lazy("nagios:nagios_instances")
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["setup"] = True
ctx["title"] = "Create nagios instance"
return ctx
class DownloadNagiosInstanceEventHandlerView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
nagios_instance = get_object_or_404(NagiosInstance, pk=kwargs["pk"])
base_dir = os.path.dirname(os.path.abspath(__file__))
event_handler = os.path.join(base_dir, "event_handlers", "zentral_event_handlers_py27.py")
with open(event_handler, "r") as script_src_f:
script_src = script_src_f.read()
script_src = script_src.replace("%SECRET%", nagios_instance.secret)
script_src = script_src.replace("%TLS_HOSTNAME%", settings["api"]["tls_hostname"])
fullchain = ""
if settings['api'].get("distribute_tls_server_certs", True):
fullchain = settings["api"]["tls_fullchain"]
script_src = script_src.replace("%FULLCHAIN%", fullchain)
response = HttpResponse(script_src, content_type="text/x-python")
response['Content-Disposition'] = 'attachment; filename="{}"'.format(os.path.basename(script_src_f.name))
return response
class UpdateNagiosInstanceView(LoginRequiredMixin, UpdateView):
model = NagiosInstance
form_class = NagiosInstanceForm
success_url = reverse_lazy("nagios:nagios_instances")
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["setup"] = True
ctx["title"] = "Update nagios instance"
return ctx
class DeleteNagiosInstanceView(LoginRequiredMixin, DeleteView):
model = NagiosInstance
success_url = reverse_lazy("nagios:nagios_instances")
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["setup"] = True
ctx["title"] = "Delete nagios instance"
return ctx
# API
class PostEventView(JSONPostAPIView):
def check_request_secret(self, request, *args, **kwargs):
secret = request.META.get("HTTP_ZENTRAL_API_SECRET", None)
if not secret:
raise APIAuthError
try:
self.nagios_instance = NagiosInstance.objects.select_related("business_unit").get(secret=secret)
except NagiosInstance.DoesNotExist:
raise APIAuthError
def do_post(self, data):
post_nagios_event(self.nagios_instance, self.user_agent, self.ip, data)
return {}
| 36.409524
| 113
| 0.702066
|
606846719b944176e8ad5241e354a8124654c358
| 28,864
|
py
|
Python
|
pysnmp-with-texts/CISCO-SCAS-BB-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/CISCO-SCAS-BB-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/CISCO-SCAS-BB-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module CISCO-SCAS-BB-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-SCAS-BB-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:11:29 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion")
linkIndex, spvIndex, linkModuleIndex, pmoduleIndex = mibBuilder.importSymbols("PCUBE-SE-MIB", "linkIndex", "spvIndex", "linkModuleIndex", "pmoduleIndex")
pcubeWorkgroup, pcubeModules = mibBuilder.importSymbols("PCUBE-SMI", "pcubeWorkgroup", "pcubeModules")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance")
ObjectIdentity, Unsigned32, iso, Counter32, Counter64, Integer32, ModuleIdentity, TimeTicks, Gauge32, IpAddress, MibIdentifier, Bits, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Unsigned32", "iso", "Counter32", "Counter64", "Integer32", "ModuleIdentity", "TimeTicks", "Gauge32", "IpAddress", "MibIdentifier", "Bits", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
pcubeEngageMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 5655, 2, 4))
pcubeEngageMIB.setRevisions(('2006-05-10 00:00', '2004-12-21 00:00', '2004-07-01 00:00', '2002-07-03 20:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: pcubeEngageMIB.setRevisionsDescriptions(('MIB revised as a part of integration into Cisco SNMP MIB standard. Changed contacts, Added OBJECT-GROUPS, Added MODULE-COMPLIANCE.', "Main SNMP MIB for P-cube's Engage Applictaion, revised for Engage 2.5.", "Main SNMP MIB for P-cube's Engage Applictaion.", 'This MIB provides runtime status and monitoring capabilities for the SCAS BB application.',))
if mibBuilder.loadTexts: pcubeEngageMIB.setLastUpdated('200605100000Z')
if mibBuilder.loadTexts: pcubeEngageMIB.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: pcubeEngageMIB.setContactInfo('Cisco Systems Customer Service Postal: 170 W Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: cs-sce@cisco.com')
if mibBuilder.loadTexts: pcubeEngageMIB.setDescription('Main SNMP MIB for Cisco SCAS BB application which runs over Service Control Engine devices.')
pcubeEngageObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 5655, 4, 2))
pcubeEngageConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 5655, 2, 4, 3))
pcubeEngageGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 5655, 2, 4, 3, 1))
pcubeEngageCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 5655, 2, 4, 3, 2))
serviceGrp = MibIdentifier((1, 3, 6, 1, 4, 1, 5655, 4, 2, 1))
linkGrp = MibIdentifier((1, 3, 6, 1, 4, 1, 5655, 4, 2, 2))
packageGrp = MibIdentifier((1, 3, 6, 1, 4, 1, 5655, 4, 2, 3))
subscriberGrp = MibIdentifier((1, 3, 6, 1, 4, 1, 5655, 4, 2, 4))
serviceCounterGrp = MibIdentifier((1, 3, 6, 1, 4, 1, 5655, 4, 2, 5))
serviceTable = MibIdentifier((1, 3, 6, 1, 4, 1, 5655, 4, 2, 1, 1))
linkServiceUsageTable = MibTable((1, 3, 6, 1, 4, 1, 5655, 4, 2, 2, 1), )
if mibBuilder.loadTexts: linkServiceUsageTable.setStatus('current')
if mibBuilder.loadTexts: linkServiceUsageTable.setDescription('The Link Service-usage table provides information per link per global-scope service-counter.')
linkServiceUsageEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5655, 4, 2, 2, 1, 1), ).setIndexNames((0, "PCUBE-SE-MIB", "linkModuleIndex"), (0, "PCUBE-SE-MIB", "linkIndex"), (0, "CISCO-SCAS-BB-MIB", "globalScopeServiceCounterIndex"))
if mibBuilder.loadTexts: linkServiceUsageEntry.setStatus('current')
if mibBuilder.loadTexts: linkServiceUsageEntry.setDescription('A linkServiceUsageTable entry.')
linkServiceUsageUpVolume = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 2, 1, 1, 1), Counter32()).setUnits('KBytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: linkServiceUsageUpVolume.setStatus('current')
if mibBuilder.loadTexts: linkServiceUsageUpVolume.setDescription('The link service-counter upstream volume.')
linkServiceUsageDownVolume = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 2, 1, 1, 2), Counter32()).setUnits('KBytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: linkServiceUsageDownVolume.setStatus('current')
if mibBuilder.loadTexts: linkServiceUsageDownVolume.setDescription('The link service-counter downstream volume.')
linkServiceUsageNumSessions = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 2, 1, 1, 3), Counter32()).setUnits('sessions').setMaxAccess("readonly")
if mibBuilder.loadTexts: linkServiceUsageNumSessions.setStatus('current')
if mibBuilder.loadTexts: linkServiceUsageNumSessions.setDescription('The link service-counter.')
linkServiceUsageDuration = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 2, 1, 1, 4), Counter32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: linkServiceUsageDuration.setStatus('current')
if mibBuilder.loadTexts: linkServiceUsageDuration.setDescription('The link service-counter aggregated session duration.')
linkServiceUsageConcurrentSessions = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 2, 1, 1, 5), Counter32()).setUnits('sessions').setMaxAccess("readonly")
if mibBuilder.loadTexts: linkServiceUsageConcurrentSessions.setStatus('current')
if mibBuilder.loadTexts: linkServiceUsageConcurrentSessions.setDescription('The link service-counter concurrent sessions.')
linkServiceUsageActiveSubscribers = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 2, 1, 1, 6), Counter32()).setUnits('subscribers').setMaxAccess("readonly")
if mibBuilder.loadTexts: linkServiceUsageActiveSubscribers.setStatus('current')
if mibBuilder.loadTexts: linkServiceUsageActiveSubscribers.setDescription('The link service-counter amount of active subscribers.')
linkServiceUpDroppedPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 2, 1, 1, 7), Counter32()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: linkServiceUpDroppedPackets.setStatus('current')
if mibBuilder.loadTexts: linkServiceUpDroppedPackets.setDescription('The link service-counter number of dropped packets on the upstream, when accelerate packet drop is enabled this counter will always return 0xFFFFFFFF.')
linkServiceDownDroppedPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 2, 1, 1, 8), Counter32()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: linkServiceDownDroppedPackets.setStatus('current')
if mibBuilder.loadTexts: linkServiceDownDroppedPackets.setDescription('The link service-counter number of dropped packets on the downstream when accelerate packet drop is enabled this counter will always return 0xFFFFFFFF.')
linkServiceUpDroppedBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 2, 1, 1, 9), Counter32()).setUnits('bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: linkServiceUpDroppedBytes.setStatus('current')
if mibBuilder.loadTexts: linkServiceUpDroppedBytes.setDescription('The link service-counter number of dropped bytes on the upstream when accelerate packet drop is enabled this counter will always return 0xFFFFFFFF.')
linkServiceDownDroppedBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 2, 1, 1, 10), Counter32()).setUnits('bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: linkServiceDownDroppedBytes.setStatus('current')
if mibBuilder.loadTexts: linkServiceDownDroppedBytes.setDescription('The link service-counter number of dropped bytes on the downstream when accelerate packet drop is enabled this counter will always return 0xFFFFFFFF.')
packageCounterTable = MibTable((1, 3, 6, 1, 4, 1, 5655, 4, 2, 3, 1), )
if mibBuilder.loadTexts: packageCounterTable.setStatus('current')
if mibBuilder.loadTexts: packageCounterTable.setDescription('This table provides information per each package-counter configured into the system.')
packageCounterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5655, 4, 2, 3, 1, 1), ).setIndexNames((0, "PCUBE-SE-MIB", "pmoduleIndex"), (0, "CISCO-SCAS-BB-MIB", "packageCounterIndex"))
if mibBuilder.loadTexts: packageCounterEntry.setStatus('current')
if mibBuilder.loadTexts: packageCounterEntry.setDescription('A packageCounterTable entry.')
packageCounterIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 3, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)))
if mibBuilder.loadTexts: packageCounterIndex.setStatus('current')
if mibBuilder.loadTexts: packageCounterIndex.setDescription('The package-counter index.')
packageCounterStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: packageCounterStatus.setStatus('current')
if mibBuilder.loadTexts: packageCounterStatus.setDescription('The package-counter status.')
packageCounterName = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 3, 1, 1, 3), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: packageCounterName.setStatus('current')
if mibBuilder.loadTexts: packageCounterName.setDescription('The name of the package-counter.')
packageCounterActiveSubscribers = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 3, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: packageCounterActiveSubscribers.setStatus('current')
if mibBuilder.loadTexts: packageCounterActiveSubscribers.setDescription('The total amount of active subscribers of the package-counter.')
packageServiceUsageTable = MibTable((1, 3, 6, 1, 4, 1, 5655, 4, 2, 3, 2), )
if mibBuilder.loadTexts: packageServiceUsageTable.setStatus('current')
if mibBuilder.loadTexts: packageServiceUsageTable.setDescription('The Package Service Usage table contains counters per package-counter per global-scope service-counter.')
packageServiceUsageEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5655, 4, 2, 3, 2, 1), ).setIndexNames((0, "PCUBE-SE-MIB", "pmoduleIndex"), (0, "CISCO-SCAS-BB-MIB", "packageCounterIndex"), (0, "CISCO-SCAS-BB-MIB", "globalScopeServiceCounterIndex"))
if mibBuilder.loadTexts: packageServiceUsageEntry.setStatus('current')
if mibBuilder.loadTexts: packageServiceUsageEntry.setDescription('A packageServiceUsageTable entry.')
packageServiceUsageUpVolume = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 3, 2, 1, 1), Counter32()).setUnits('KBytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: packageServiceUsageUpVolume.setStatus('current')
if mibBuilder.loadTexts: packageServiceUsageUpVolume.setDescription('The package-counter service-counter upstream volume.')
packageServiceUsageDownVolume = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 3, 2, 1, 2), Counter32()).setUnits('KBytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: packageServiceUsageDownVolume.setStatus('current')
if mibBuilder.loadTexts: packageServiceUsageDownVolume.setDescription('The package-counter service-counter downstream volume.')
packageServiceUsageNumSessions = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 3, 2, 1, 3), Counter32()).setUnits('sessions').setMaxAccess("readonly")
if mibBuilder.loadTexts: packageServiceUsageNumSessions.setStatus('current')
if mibBuilder.loadTexts: packageServiceUsageNumSessions.setDescription('The package-counter service-counter number of sessions.')
packageServiceUsageDuration = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 3, 2, 1, 4), Counter32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: packageServiceUsageDuration.setStatus('current')
if mibBuilder.loadTexts: packageServiceUsageDuration.setDescription('The package-counter service-counter aggregated session duration.')
packageServiceUsageConcurrentSessions = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 3, 2, 1, 5), Counter32()).setUnits('sessions').setMaxAccess("readonly")
if mibBuilder.loadTexts: packageServiceUsageConcurrentSessions.setStatus('current')
if mibBuilder.loadTexts: packageServiceUsageConcurrentSessions.setDescription('The package-counter service-counter concurrent sessions.')
packageServiceUsageActiveSubscribers = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 3, 2, 1, 6), Counter32()).setUnits('subscribers').setMaxAccess("readonly")
if mibBuilder.loadTexts: packageServiceUsageActiveSubscribers.setStatus('current')
if mibBuilder.loadTexts: packageServiceUsageActiveSubscribers.setDescription('The package-counter service-counter amount of active subscribers.')
packageServiceUpDroppedPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 3, 2, 1, 7), Counter32()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: packageServiceUpDroppedPackets.setStatus('current')
if mibBuilder.loadTexts: packageServiceUpDroppedPackets.setDescription('The package-counter service number of dropped packets on the upstream when accelerate packet drop is enabled this counter will always return 0xFFFFFFFF.')
packageServiceDownDroppedPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 3, 2, 1, 8), Counter32()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: packageServiceDownDroppedPackets.setStatus('current')
if mibBuilder.loadTexts: packageServiceDownDroppedPackets.setDescription('The package-counter service number of dropped packets on the downstream when accelerate packet drop is enabled this counter will always return 0xFFFFFFFF.')
packageServiceUpDroppedBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 3, 2, 1, 9), Counter32()).setUnits('bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: packageServiceUpDroppedBytes.setStatus('current')
if mibBuilder.loadTexts: packageServiceUpDroppedBytes.setDescription('The package-counter service number of dropped bytes on the upstream when accelerate packet drop is enabled this counter will always return 0xFFFFFFFF.')
packageServiceDownDroppedBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 3, 2, 1, 10), Counter32()).setUnits('bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: packageServiceDownDroppedBytes.setStatus('current')
if mibBuilder.loadTexts: packageServiceDownDroppedBytes.setDescription('The package-counter service number of dropped bytes on the downstream when accelerate packet drop is enabled this counter will always return 0xFFFFFFFF.')
subscribersTable = MibTable((1, 3, 6, 1, 4, 1, 5655, 4, 2, 4, 1), )
if mibBuilder.loadTexts: subscribersTable.setStatus('current')
if mibBuilder.loadTexts: subscribersTable.setDescription('This table provides information for each subscriber.')
subscribersEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5655, 4, 2, 4, 1, 1), ).setIndexNames((0, "PCUBE-SE-MIB", "pmoduleIndex"), (0, "PCUBE-SE-MIB", "spvIndex"))
if mibBuilder.loadTexts: subscribersEntry.setStatus('current')
if mibBuilder.loadTexts: subscribersEntry.setDescription('A subscribersTable entry.')
subscriberPackageIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 4, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: subscriberPackageIndex.setStatus('current')
if mibBuilder.loadTexts: subscriberPackageIndex.setDescription('The subscriber package index.')
subscriberServiceUsageTable = MibTable((1, 3, 6, 1, 4, 1, 5655, 4, 2, 4, 2), )
if mibBuilder.loadTexts: subscriberServiceUsageTable.setStatus('current')
if mibBuilder.loadTexts: subscriberServiceUsageTable.setDescription('This table provides information for each subscriber for each subscriber-scope service-counter.')
subscriberServiceUsageEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5655, 4, 2, 4, 2, 1), ).setIndexNames((0, "PCUBE-SE-MIB", "pmoduleIndex"), (0, "PCUBE-SE-MIB", "spvIndex"), (0, "CISCO-SCAS-BB-MIB", "subscriberScopeServiceCounterIndex"))
if mibBuilder.loadTexts: subscriberServiceUsageEntry.setStatus('current')
if mibBuilder.loadTexts: subscriberServiceUsageEntry.setDescription('A subscriberServiceUsageTable entry.')
subscriberServiceUsageUpVolume = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 4, 2, 1, 1), Counter32()).setUnits('KBytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: subscriberServiceUsageUpVolume.setStatus('current')
if mibBuilder.loadTexts: subscriberServiceUsageUpVolume.setDescription('The upstream volume.')
subscriberServiceUsageDownVolume = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 4, 2, 1, 2), Counter32()).setUnits('KBytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: subscriberServiceUsageDownVolume.setStatus('current')
if mibBuilder.loadTexts: subscriberServiceUsageDownVolume.setDescription('The downstream volume.')
subscriberServiceUsageNumSessions = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 4, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setUnits('sessions').setMaxAccess("readonly")
if mibBuilder.loadTexts: subscriberServiceUsageNumSessions.setStatus('current')
if mibBuilder.loadTexts: subscriberServiceUsageNumSessions.setDescription('The number of sessions.')
subscriberServiceUsageDuration = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 4, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: subscriberServiceUsageDuration.setStatus('current')
if mibBuilder.loadTexts: subscriberServiceUsageDuration.setDescription('Aggregated session duration.')
globalScopeServiceCounterTable = MibTable((1, 3, 6, 1, 4, 1, 5655, 4, 2, 5, 1), )
if mibBuilder.loadTexts: globalScopeServiceCounterTable.setStatus('current')
if mibBuilder.loadTexts: globalScopeServiceCounterTable.setDescription('The Global-scope Service-counter table consists of data regarding each service-counter configured into the system, used by the link and package entries.')
globalScopeServiceCounterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5655, 4, 2, 5, 1, 1), ).setIndexNames((0, "PCUBE-SE-MIB", "pmoduleIndex"), (0, "CISCO-SCAS-BB-MIB", "globalScopeServiceCounterIndex"))
if mibBuilder.loadTexts: globalScopeServiceCounterEntry.setStatus('current')
if mibBuilder.loadTexts: globalScopeServiceCounterEntry.setDescription('A globalScopeServiceCounterEntry entry.')
globalScopeServiceCounterIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 5, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)))
if mibBuilder.loadTexts: globalScopeServiceCounterIndex.setStatus('current')
if mibBuilder.loadTexts: globalScopeServiceCounterIndex.setDescription('The global-scope service-counter index.')
globalScopeServiceCounterStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 5, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: globalScopeServiceCounterStatus.setStatus('current')
if mibBuilder.loadTexts: globalScopeServiceCounterStatus.setDescription('The global-scope service-counter status.')
globalScopeServiceCounterName = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 5, 1, 1, 3), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: globalScopeServiceCounterName.setStatus('current')
if mibBuilder.loadTexts: globalScopeServiceCounterName.setDescription('The name of the global-scope service-counter.')
subscriberScopeServiceCounterTable = MibTable((1, 3, 6, 1, 4, 1, 5655, 4, 2, 5, 2), )
if mibBuilder.loadTexts: subscriberScopeServiceCounterTable.setStatus('current')
if mibBuilder.loadTexts: subscriberScopeServiceCounterTable.setDescription('The Subscriber-Scope Service-counter table consists of data regarding each service-counter definition configured into the system, used by the subscriber entries.')
subscriberScopeServiceCounterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5655, 4, 2, 5, 2, 1), ).setIndexNames((0, "PCUBE-SE-MIB", "pmoduleIndex"), (0, "CISCO-SCAS-BB-MIB", "subscriberScopeServiceCounterIndex"))
if mibBuilder.loadTexts: subscriberScopeServiceCounterEntry.setStatus('current')
if mibBuilder.loadTexts: subscriberScopeServiceCounterEntry.setDescription('A subscriberScopeServiceCounterEntry entry.')
subscriberScopeServiceCounterIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 5, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)))
if mibBuilder.loadTexts: subscriberScopeServiceCounterIndex.setStatus('current')
if mibBuilder.loadTexts: subscriberScopeServiceCounterIndex.setDescription('The subscriber-scope service-counter index.')
subscriberScopeServiceCounterStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 5, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: subscriberScopeServiceCounterStatus.setStatus('current')
if mibBuilder.loadTexts: subscriberScopeServiceCounterStatus.setDescription('The subscriber-scope service-counter status.')
subscriberScopeServiceCounterName = MibTableColumn((1, 3, 6, 1, 4, 1, 5655, 4, 2, 5, 2, 1, 3), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: subscriberScopeServiceCounterName.setStatus('current')
if mibBuilder.loadTexts: subscriberScopeServiceCounterName.setDescription('The name of the subscriber-scope service-counter.')
pcubeEngageCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 5655, 2, 4, 3, 2, 1)).setObjects(("CISCO-SCAS-BB-MIB", "pcubeLinkGroup"), ("CISCO-SCAS-BB-MIB", "pcubePackageGroup"), ("CISCO-SCAS-BB-MIB", "pcubeSubscriberGroup"), ("CISCO-SCAS-BB-MIB", "pcubeServiceCounterGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
pcubeEngageCompliance = pcubeEngageCompliance.setStatus('current')
if mibBuilder.loadTexts: pcubeEngageCompliance.setDescription('A compliance statement defined in this MIB module, for SCABB SNMP agents.')
pcubeLinkGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 5655, 2, 4, 3, 1, 2)).setObjects(("CISCO-SCAS-BB-MIB", "linkServiceUsageUpVolume"), ("CISCO-SCAS-BB-MIB", "linkServiceUsageDownVolume"), ("CISCO-SCAS-BB-MIB", "linkServiceUsageNumSessions"), ("CISCO-SCAS-BB-MIB", "linkServiceUsageDuration"), ("CISCO-SCAS-BB-MIB", "linkServiceUsageConcurrentSessions"), ("CISCO-SCAS-BB-MIB", "linkServiceUsageActiveSubscribers"), ("CISCO-SCAS-BB-MIB", "linkServiceUpDroppedPackets"), ("CISCO-SCAS-BB-MIB", "linkServiceDownDroppedPackets"), ("CISCO-SCAS-BB-MIB", "linkServiceUpDroppedBytes"), ("CISCO-SCAS-BB-MIB", "linkServiceDownDroppedBytes"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
pcubeLinkGroup = pcubeLinkGroup.setStatus('current')
if mibBuilder.loadTexts: pcubeLinkGroup.setDescription('Link related inforamtion.')
pcubePackageGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 5655, 2, 4, 3, 1, 3)).setObjects(("CISCO-SCAS-BB-MIB", "packageCounterStatus"), ("CISCO-SCAS-BB-MIB", "packageCounterName"), ("CISCO-SCAS-BB-MIB", "packageCounterActiveSubscribers"), ("CISCO-SCAS-BB-MIB", "packageServiceUsageUpVolume"), ("CISCO-SCAS-BB-MIB", "packageServiceUsageDownVolume"), ("CISCO-SCAS-BB-MIB", "packageServiceUsageNumSessions"), ("CISCO-SCAS-BB-MIB", "packageServiceUsageDuration"), ("CISCO-SCAS-BB-MIB", "packageServiceUsageConcurrentSessions"), ("CISCO-SCAS-BB-MIB", "packageServiceUsageActiveSubscribers"), ("CISCO-SCAS-BB-MIB", "packageServiceUpDroppedPackets"), ("CISCO-SCAS-BB-MIB", "packageServiceDownDroppedPackets"), ("CISCO-SCAS-BB-MIB", "packageServiceUpDroppedBytes"), ("CISCO-SCAS-BB-MIB", "packageServiceDownDroppedBytes"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
pcubePackageGroup = pcubePackageGroup.setStatus('current')
if mibBuilder.loadTexts: pcubePackageGroup.setDescription('Package related information.')
pcubeSubscriberGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 5655, 2, 4, 3, 1, 4)).setObjects(("CISCO-SCAS-BB-MIB", "subscriberPackageIndex"), ("CISCO-SCAS-BB-MIB", "subscriberServiceUsageUpVolume"), ("CISCO-SCAS-BB-MIB", "subscriberServiceUsageDownVolume"), ("CISCO-SCAS-BB-MIB", "subscriberServiceUsageNumSessions"), ("CISCO-SCAS-BB-MIB", "subscriberServiceUsageDuration"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
pcubeSubscriberGroup = pcubeSubscriberGroup.setStatus('current')
if mibBuilder.loadTexts: pcubeSubscriberGroup.setDescription('Subscriber related information.')
pcubeServiceCounterGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 5655, 2, 4, 3, 1, 5)).setObjects(("CISCO-SCAS-BB-MIB", "globalScopeServiceCounterStatus"), ("CISCO-SCAS-BB-MIB", "globalScopeServiceCounterName"), ("CISCO-SCAS-BB-MIB", "subscriberScopeServiceCounterStatus"), ("CISCO-SCAS-BB-MIB", "subscriberScopeServiceCounterName"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
pcubeServiceCounterGroup = pcubeServiceCounterGroup.setStatus('current')
if mibBuilder.loadTexts: pcubeServiceCounterGroup.setDescription('Service related information.')
mibBuilder.exportSymbols("CISCO-SCAS-BB-MIB", subscriberServiceUsageEntry=subscriberServiceUsageEntry, globalScopeServiceCounterStatus=globalScopeServiceCounterStatus, linkServiceUsageConcurrentSessions=linkServiceUsageConcurrentSessions, packageCounterEntry=packageCounterEntry, packageServiceUpDroppedPackets=packageServiceUpDroppedPackets, linkGrp=linkGrp, linkServiceUsageTable=linkServiceUsageTable, packageServiceUsageUpVolume=packageServiceUsageUpVolume, packageCounterName=packageCounterName, subscriberServiceUsageTable=subscriberServiceUsageTable, serviceCounterGrp=serviceCounterGrp, subscriberServiceUsageUpVolume=subscriberServiceUsageUpVolume, subscriberGrp=subscriberGrp, packageServiceUpDroppedBytes=packageServiceUpDroppedBytes, pcubeEngageCompliance=pcubeEngageCompliance, globalScopeServiceCounterTable=globalScopeServiceCounterTable, packageServiceUsageDuration=packageServiceUsageDuration, subscriberServiceUsageDuration=subscriberServiceUsageDuration, subscriberScopeServiceCounterIndex=subscriberScopeServiceCounterIndex, pcubeLinkGroup=pcubeLinkGroup, subscriberScopeServiceCounterEntry=subscriberScopeServiceCounterEntry, subscriberScopeServiceCounterTable=subscriberScopeServiceCounterTable, packageServiceUsageActiveSubscribers=packageServiceUsageActiveSubscribers, pcubeEngageGroups=pcubeEngageGroups, serviceGrp=serviceGrp, linkServiceUsageDownVolume=linkServiceUsageDownVolume, packageCounterIndex=packageCounterIndex, linkServiceUsageNumSessions=linkServiceUsageNumSessions, packageServiceUsageTable=packageServiceUsageTable, pcubeEngageObjs=pcubeEngageObjs, PYSNMP_MODULE_ID=pcubeEngageMIB, subscribersEntry=subscribersEntry, packageServiceDownDroppedPackets=packageServiceDownDroppedPackets, linkServiceUsageUpVolume=linkServiceUsageUpVolume, packageCounterTable=packageCounterTable, globalScopeServiceCounterEntry=globalScopeServiceCounterEntry, subscriberScopeServiceCounterName=subscriberScopeServiceCounterName, linkServiceUpDroppedBytes=linkServiceUpDroppedBytes, pcubeEngageConformance=pcubeEngageConformance, packageCounterStatus=packageCounterStatus, globalScopeServiceCounterName=globalScopeServiceCounterName, globalScopeServiceCounterIndex=globalScopeServiceCounterIndex, linkServiceUsageEntry=linkServiceUsageEntry, pcubeEngageCompliances=pcubeEngageCompliances, linkServiceDownDroppedPackets=linkServiceDownDroppedPackets, packageServiceUsageDownVolume=packageServiceUsageDownVolume, linkServiceUpDroppedPackets=linkServiceUpDroppedPackets, packageServiceUsageEntry=packageServiceUsageEntry, packageServiceDownDroppedBytes=packageServiceDownDroppedBytes, packageServiceUsageNumSessions=packageServiceUsageNumSessions, subscriberScopeServiceCounterStatus=subscriberScopeServiceCounterStatus, linkServiceDownDroppedBytes=linkServiceDownDroppedBytes, packageGrp=packageGrp, linkServiceUsageDuration=linkServiceUsageDuration, pcubePackageGroup=pcubePackageGroup, pcubeSubscriberGroup=pcubeSubscriberGroup, subscribersTable=subscribersTable, subscriberPackageIndex=subscriberPackageIndex, subscriberServiceUsageDownVolume=subscriberServiceUsageDownVolume, serviceTable=serviceTable, packageCounterActiveSubscribers=packageCounterActiveSubscribers, subscriberServiceUsageNumSessions=subscriberServiceUsageNumSessions, pcubeServiceCounterGroup=pcubeServiceCounterGroup, packageServiceUsageConcurrentSessions=packageServiceUsageConcurrentSessions, pcubeEngageMIB=pcubeEngageMIB, linkServiceUsageActiveSubscribers=linkServiceUsageActiveSubscribers)
| 140.8
| 3,485
| 0.797291
|
aece5f6d2aa689ab152a6e6f8b3b694e300232cd
| 1,644
|
py
|
Python
|
tests/schema/test_blueprint.py
|
KarthickNamakkalKrishnan/eloquent
|
0638b688d5fd0c1a46b7471dd465eeb4c2f84666
|
[
"MIT"
] | 47
|
2015-03-19T02:11:36.000Z
|
2022-03-29T07:16:42.000Z
|
tests/schema/test_blueprint.py
|
KarthickNamakkalKrishnan/eloquent
|
0638b688d5fd0c1a46b7471dd465eeb4c2f84666
|
[
"MIT"
] | 20
|
2015-03-16T02:56:51.000Z
|
2015-05-24T17:51:29.000Z
|
tests/schema/test_blueprint.py
|
sdispater/eloquent
|
0638b688d5fd0c1a46b7471dd465eeb4c2f84666
|
[
"MIT"
] | 4
|
2018-08-29T13:42:50.000Z
|
2021-03-14T11:28:31.000Z
|
# -*- coding: utf-8 -*-
from flexmock import flexmock, flexmock_teardown
from eloquent.schema import Blueprint
from eloquent.schema.grammars import SchemaGrammar
from eloquent.connections import Connection
from .. import EloquentTestCase
class SchemaBuilderTestCase(EloquentTestCase):
def tearDown(self):
flexmock_teardown()
def test_to_sql_runs_commands_from_blueprint(self):
conn = flexmock(Connection(None))
conn.should_receive('statement').once().with_args('foo')
conn.should_receive('statement').once().with_args('bar')
grammar = flexmock(SchemaGrammar())
blueprint = flexmock(Blueprint('table'))
blueprint.should_receive('to_sql').once().with_args(conn, grammar).and_return(['foo', 'bar'])
blueprint.build(conn, grammar)
def test_index_default_names(self):
blueprint = Blueprint('users')
blueprint.unique(['foo', 'bar'])
commands = blueprint.get_commands()
self.assertEqual('users_foo_bar_unique', commands[0].index)
blueprint = Blueprint('users')
blueprint.index('foo')
commands = blueprint.get_commands()
self.assertEqual('users_foo_index', commands[0].index)
def test_drop_index_default_names(self):
blueprint = Blueprint('users')
blueprint.drop_unique(['foo', 'bar'])
commands = blueprint.get_commands()
self.assertEqual('users_foo_bar_unique', commands[0].index)
blueprint = Blueprint('users')
blueprint.drop_index(['foo'])
commands = blueprint.get_commands()
self.assertEqual('users_foo_index', commands[0].index)
| 35.73913
| 101
| 0.68674
|
82ba9f6e8b88723d509073cab21d8b85488659fa
| 3,621
|
py
|
Python
|
cscs-checks/apps/python/numpy_check.py
|
hpc-unibe-ch/reframe
|
07f97e25cf4e7319782c37dd1923f7e70a368b99
|
[
"BSD-3-Clause"
] | null | null | null |
cscs-checks/apps/python/numpy_check.py
|
hpc-unibe-ch/reframe
|
07f97e25cf4e7319782c37dd1923f7e70a368b99
|
[
"BSD-3-Clause"
] | null | null | null |
cscs-checks/apps/python/numpy_check.py
|
hpc-unibe-ch/reframe
|
07f97e25cf4e7319782c37dd1923f7e70a368b99
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2016-2020 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import reframe as rfm
import reframe.utility.sanity as sn
class NumpyBaseTest(rfm.RunOnlyRegressionTest):
def __init__(self):
self.descr = 'Test a few typical numpy operations'
self.valid_prog_environs = ['PrgEnv-gnu']
self.modules = ['numpy/1.17.2-CrayGNU-19.10']
self.reference = {
'daint:gpu': {
'dot': (0.4, None, 0.05, 'seconds'),
'svd': (0.37, None, 0.05, 'seconds'),
'cholesky': (0.12, None, 0.05, 'seconds'),
'eigendec': (3.5, None, 0.05, 'seconds'),
'inv': (0.21, None, 0.05, 'seconds'),
},
'daint:mc': {
'dot': (0.3, None, 0.05, 'seconds'),
'svd': (0.35, None, 0.05, 'seconds'),
'cholesky': (0.1, None, 0.05, 'seconds'),
'eigendec': (4.14, None, 0.05, 'seconds'),
'inv': (0.16, None, 0.05, 'seconds'),
},
'dom:gpu': {
'dot': (0.4, None, 0.05, 'seconds'),
'svd': (0.37, None, 0.05, 'seconds'),
'cholesky': (0.12, None, 0.05, 'seconds'),
'eigendec': (3.5, None, 0.05, 'seconds'),
'inv': (0.21, None, 0.05, 'seconds'),
},
'dom:mc': {
'dot': (0.3, None, 0.05, 'seconds'),
'svd': (0.35, None, 0.05, 'seconds'),
'cholesky': (0.1, None, 0.05, 'seconds'),
'eigendec': (4.14, None, 0.05, 'seconds'),
'inv': (0.16, None, 0.05, 'seconds'),
},
}
self.perf_patterns = {
'dot': sn.extractsingle(
r'^Dotted two 4096x4096 matrices in\s+(?P<dot>\S+)\s+s',
self.stdout, 'dot', float),
'svd': sn.extractsingle(
r'^SVD of a 2048x1024 matrix in\s+(?P<svd>\S+)\s+s',
self.stdout, 'svd', float),
'cholesky': sn.extractsingle(
r'^Cholesky decomposition of a 2048x2048 matrix in'
r'\s+(?P<cholesky>\S+)\s+s',
self.stdout, 'cholesky', float),
'eigendec': sn.extractsingle(
r'^Eigendecomposition of a 2048x2048 matrix in'
r'\s+(?P<eigendec>\S+)\s+s',
self.stdout, 'eigendec', float),
'inv': sn.extractsingle(
r'^Inversion of a 2048x2048 matrix in\s+(?P<inv>\S+)\s+s',
self.stdout, 'inv', float)
}
self.sanity_patterns = sn.assert_found(r'Numpy version:\s+\S+',
self.stdout)
self.variables = {
'OMP_NUM_THREADS': '$SLURM_CPUS_PER_TASK',
}
self.executable = 'python'
self.executable_opts = ['np_ops.py']
self.num_tasks_per_node = 1
self.use_multithreading = False
self.tags = {'production'}
self.maintainers = ['RS', 'TR']
@rfm.required_version('>=2.16')
@rfm.simple_test
class NumpyHaswellTest(NumpyBaseTest):
def __init__(self):
super().__init__()
self.valid_systems = ['daint:gpu', 'dom:gpu']
self.num_cpus_per_task = 12
@rfm.required_version('>=2.16')
@rfm.simple_test
class NumpyBroadwellTest(NumpyBaseTest):
def __init__(self):
super().__init__()
self.valid_systems = ['daint:mc', 'dom:mc']
self.num_cpus_per_task = 36
| 38.935484
| 76
| 0.499033
|
e02d832e64986b5d3c5d22e12f5ac0a8df1dd039
| 3,908
|
py
|
Python
|
rpkiclientweb/config.py
|
ties/rpki-client-web
|
afce9bb2e8deeaab8cb6c4ff3fd26a1cde8563d9
|
[
"MIT"
] | 1
|
2021-11-23T12:43:43.000Z
|
2021-11-23T12:43:43.000Z
|
rpkiclientweb/config.py
|
ties/rpki-client-web
|
afce9bb2e8deeaab8cb6c4ff3fd26a1cde8563d9
|
[
"MIT"
] | 18
|
2021-04-08T19:21:04.000Z
|
2022-03-11T14:41:30.000Z
|
rpkiclientweb/config.py
|
ties/rpki-client-web
|
afce9bb2e8deeaab8cb6c4ff3fd26a1cde8563d9
|
[
"MIT"
] | 1
|
2021-04-08T18:52:04.000Z
|
2021-04-08T18:52:04.000Z
|
"""
Config file support.
TODO: Consider using https://pypi.org/project/voluptuous/ or
https://docs.python-cerberus.org/en/stable/
"""
import json
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Dict, List, Optional
from .util import validate
LOG = logging.getLogger(__name__)
@dataclass
class Configuration:
"""Configuration object."""
jitter: int
"""verbosity."""
verbosity: int
""" Cache directory for rpki-client. """
cache_dir: Path
""" Output directory. """
output_dir: Path
""" Interval between rpki-client runs. """
interval: int
""" Timeout before rpki-client is killed. """
timeout: int
""" host to listen on. """
host: str
""" port to listen on. """
port: int
""" Path to rpki-client. """
rpki_client: Path
"""deadline: DEADLINE env var is passed with Unix timestamp of [deadline] after run starts"""
deadline: Optional[int] = None
"""Optional path to rsync binary or wrapper. """
rsync_command: Optional[Path] = None
""" Additional rpki-client options. """
additional_opts: List[str] = field(default_factory=list)
""" Paths of Trust Anchor Locator files. """
trust_anchor_locators: List[Path] = field(default_factory=list)
def __init__(
self, conf: Dict, jitter: Optional[int] = None, verbosity: Optional[int] = None
) -> None:
LOG.info("Configuration: %s", conf)
if jitter is not None:
self.jitter = conf.get("jitter", 600) if jitter == -1 else jitter
else:
self.jitter = conf.get("jitter", 600)
self.verbosity = int(conf.get("verbosity", 1) if not verbosity else verbosity)
self.cache_dir = Path(conf["cache_dir"]).resolve()
validate(
self.cache_dir.is_dir(),
"Cache directory '{}' is not a directory",
str(self.cache_dir),
)
self.output_dir = Path(conf["output_dir"]).resolve()
validate(
self.output_dir.is_dir(),
"Output directory '{}' is not a directory",
str(self.output_dir),
)
self.interval = conf.get("interval", None)
validate(self.interval is not None, "interval needs to be set")
validate(self.interval > 0, "Interval needs to be a positive integer")
self.deadline = conf.get("deadline", -1)
validate(
self.deadline <= self.interval,
f"deadline needs to be below interval ({self.interval}) or use missing or -1 to disable",
)
self.timeout = conf.get("timeout", None)
validate(self.timeout is not None, "timeout needs to be set")
validate(self.timeout <= self.interval, "timeout needs to be below interval")
self.host = conf.get("host", "localhost")
self.port = conf.get("port", 8888)
validate(self.port > 0, "Port should be > 0")
self.rpki_client = Path(conf["rpki_client"]).resolve()
validate(
self.rpki_client.is_file(),
"rpki-client binary should be a file - {} is not.",
str(self.rpki_client),
)
if conf.get("rsync_command", None):
self.rsync_command = Path(conf["rsync_command"]).resolve()
validate(
self.rsync_command.is_file(),
"rsync command ({}) should be a file",
str(self.rsync_command),
)
self.additional_opts = conf.get("additional_opts", [])
self.trust_anchor_locators = [
Path(ta).resolve() for ta in conf.get("trust_anchor_locators", [])
]
validate(
len(self.trust_anchor_locators) > 0, "trust_anchor_locators are required."
)
for ta in self.trust_anchor_locators:
validate(ta.is_file(), "trust anchor locator ({}) should be a file", ta)
| 31.264
| 101
| 0.603122
|
801b87236520aafd003a2862a0a26a9a49c372d5
| 19,066
|
py
|
Python
|
server/auvsi_suas/models/moving_obstacle_test.py
|
dcat52/interop
|
b016b2c25e468e21649bdb7475d828198b5e6958
|
[
"Apache-2.0"
] | null | null | null |
server/auvsi_suas/models/moving_obstacle_test.py
|
dcat52/interop
|
b016b2c25e468e21649bdb7475d828198b5e6958
|
[
"Apache-2.0"
] | null | null | null |
server/auvsi_suas/models/moving_obstacle_test.py
|
dcat52/interop
|
b016b2c25e468e21649bdb7475d828198b5e6958
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for the moving_obstacle module."""
import datetime
import time
from auvsi_suas.models import units
from auvsi_suas.models.aerial_position import AerialPosition
from auvsi_suas.models.gps_position import GpsPosition
from auvsi_suas.models.moving_obstacle import MovingObstacle
from auvsi_suas.models.time_period import TimePeriod
from auvsi_suas.models.uas_telemetry import UasTelemetry
from auvsi_suas.models.waypoint import Waypoint
from auvsi_suas.patches.simplekml_patch import Kml
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils import timezone
from auvsi_suas.models import distance
TESTDATA_COMPETITION_DIST = [(-76.428709, 38.145306, -76.426375, 38.146146,
0.22446), (-76.428537, 38.145399, -76.427818,
38.144686, 0.10045),
(-76.434261, 38.142471, -76.418876, 38.147838,
1.46914)]
# (lat, lon, rad, alt)
TESTDATA_MOVOBST_CONTAINSPOS_OBJ = (-76, 38, 100, 200)
# (lat, lon, alt)
TESTDATA_MOVOBST_CONTAINSPOS_INSIDE = [
(-76, 38, 100),
(-76, 38, 300),
(-76.0002, 38, 200),
(-76, 38.0003, 200)
] # yapf: disable
TESTDATA_MOVOBST_CONTAINSPOS_OUTSIDE = [
(-76, 38, 99),
(-76, 38, 301),
(-76.0003, 38, 200),
(-76, 38.004, 200)
] # yapf: disable
TESTDATA_MOVOBST_PATHS = [
# Test 2 points
[(38.142233, -76.434082, 300),
(38.141878, -76.425198, 700)],
# Test 3 points
[(38.142233, -76.434082, 300),
(38.141878, -76.425198, 700),
(38.144599, -76.428186, 100)],
# Test 3 points with a consecutive duplicate
[(38.142233, -76.434082, 300),
(38.141878, -76.425198, 700),
(38.141878, -76.425198, 700),
(38.144599, -76.428186, 100)],
# Test 4 points
[(38.145574, -76.428492, 100),
(38.149164, -76.427113, 750),
(38.148662, -76.431517, 300),
(38.146143, -76.426727, 500)],
# Test 5 points
[(38.145405, -76.428310, 100),
(38.146582, -76.424099, 200),
(38.144662, -76.427634, 300),
(38.147729, -76.419185, 200),
(38.147573, -76.420832, 100),
(38.148522, -76.419507, 750)]
] # yapf: disable
TESTDATA_MOVOBST_EVALCOLLISION = (
# Obst radius and speed
100, 200,
# Positions (lat, lon, alt)
[(38, -76, 100),
(38.1, -76.1, 200)],
# Time, Inside pos, outside pos
[(0.0,
[(38, -76, 100),
(38, -76, 0),
(38, -76, 200),
(38.0001, -76, 100),
(38, -76.0001, 100)],
[(38.1, -76.1, 200),
(38.1, -76.1, 100),
(38.1, -76.1, 300),
(38.002, -76.002, 100),
(38, -76, 201)]),
(137.526986,
[(38.1, -76.1, 200),
(38.1, -76.1, 225),
(38.1, -76.1, 175)],
[(38.1, -76.1, 350),
(38, -76, 100),
(38.1, -76.1, 50)])
]
) # yapf: disable
TESTDATA_MOVOBST_INTERP = (
# Obst radius and speed
30, 41.5,
# Obstacle positions (lat, lon, alt)
[(38.14524210878, -76.427522, 100),
(38.14504989122, -76.427522, 100)],
# Time, Inside pos, outside pos
[(True, [(38.14524210878, -76.427522, 100),
(38.14524210878, -76.427522, 20)]),
(True, [(38.145148000, -76.427645000, 90),
(38.145144000, -76.427400000, 90)]),
(False, [(38.145148000, -76.427645000, 140),
(38.145144000, -76.427400000, 140)]),
(False, [(38.145148000, -76.427645000, 100),
(38.14534021755, -76.427645, 100)])]
) # yapf: disable
class TestMovingObstacle(TestCase):
"""Tests the MovingObstacle model."""
def setUp(self):
"""Create the obstacles for testing."""
# Obstacle with no waypoints
obst_no_wpt = MovingObstacle()
obst_no_wpt.speed_avg = 1
obst_no_wpt.sphere_radius = 1
obst_no_wpt.save()
self.obst_no_wpt = obst_no_wpt
# Obstacle with single waypoint
self.single_wpt_lat = 40
self.single_wpt_lon = 76
self.single_wpt_alt = 100
obst_single_wpt = MovingObstacle()
obst_single_wpt.speed_avg = 1
obst_single_wpt.sphere_radius = 1
obst_single_wpt.save()
single_gpos = GpsPosition()
single_gpos.latitude = self.single_wpt_lat
single_gpos.longitude = self.single_wpt_lon
single_gpos.save()
single_apos = AerialPosition()
single_apos.gps_position = single_gpos
single_apos.altitude_msl = self.single_wpt_alt
single_apos.save()
single_wpt = Waypoint()
single_wpt.position = single_apos
single_wpt.order = 1
single_wpt.save()
obst_single_wpt.waypoints.add(single_wpt)
self.obst_single_wpt = obst_single_wpt
# Obstacles with predefined path
self.obstacles = []
for path in TESTDATA_MOVOBST_PATHS:
cur_obst = MovingObstacle()
cur_obst.speed_avg = 68
cur_obst.sphere_radius = 10
cur_obst.save()
for pt_id in range(len(path)):
(lat, lon, alt) = path[pt_id]
cur_gpos = GpsPosition()
cur_gpos.latitude = lat
cur_gpos.longitude = lon
cur_gpos.save()
cur_apos = AerialPosition()
cur_apos.gps_position = cur_gpos
cur_apos.altitude_msl = alt
cur_apos.save()
cur_wpt = Waypoint()
cur_wpt.position = cur_apos
cur_wpt.order = pt_id
cur_wpt.save()
cur_obst.waypoints.add(cur_wpt)
cur_obst.save()
self.obstacles.append(cur_obst)
def test_unicode(self):
"""Tests the unicode method executes."""
obst = MovingObstacle()
obst.speed_avg = 10
obst.sphere_radius = 100
obst.save()
for _ in range(3):
pos = GpsPosition()
pos.latitude = 10
pos.longitude = 100
pos.save()
apos = AerialPosition()
apos.altitude_msl = 1000
apos.gps_position = pos
apos.save()
wpt = Waypoint()
wpt.position = apos
wpt.order = 10
wpt.save()
obst.waypoints.add(wpt)
self.assertTrue(obst.__unicode__())
def test_get_waypoint_travel_time_invalid_inputs(self):
"""Tests proper invalid input handling."""
obstacle = MovingObstacle()
obstacle.speed_avg = 1
self.assertIsNone(obstacle.get_waypoint_travel_time(None, 1, 1))
self.assertIsNone(obstacle.get_waypoint_travel_time([], 1, 1))
self.assertIsNone(obstacle.get_waypoint_travel_time([None], 1, 1))
self.assertIsNone(
obstacle.get_waypoint_travel_time([None, None], None, 1))
self.assertIsNone(
obstacle.get_waypoint_travel_time([None, None], 1, None))
self.assertIsNone(
obstacle.get_waypoint_travel_time([None, None], -1, 0))
self.assertIsNone(
obstacle.get_waypoint_travel_time([None, None], 0, -1))
self.assertIsNone(
obstacle.get_waypoint_travel_time([None, None], 2, 0))
self.assertIsNone(
obstacle.get_waypoint_travel_time([None, None], 0, 2))
obstacle.speed_avg = 0
self.assertIsNone(
obstacle.get_waypoint_travel_time([None, None], 0, 1))
def eval_travel_time(self, time_actual, time_received):
"""Evaluates whether the travel times are close enough."""
EVAL_THRESH = time_actual * 0.1
return abs(time_actual - time_received) < EVAL_THRESH
def test_get_waypoint_travel_time(self):
"""Tests travel time calc."""
test_spds = [1, 10, 100, 500]
for (lon2, lat2, lon1, lat1, dist_km) in TESTDATA_COMPETITION_DIST:
dist_ft = units.kilometers_to_feet(dist_km)
for speed in test_spds:
speed_fps = units.knots_to_feet_per_second(speed)
time = dist_ft / speed_fps
gpos1 = GpsPosition()
gpos1.latitude = lat1
gpos1.longitude = lon1
gpos1.save()
apos1 = AerialPosition()
apos1.gps_position = gpos1
apos1.altitude_msl = 0
apos1.save()
wpt1 = Waypoint()
wpt1.position = apos1
gpos2 = GpsPosition()
gpos2.latitude = lat2
gpos2.longitude = lon2
gpos2.save()
apos2 = AerialPosition()
apos2.gps_position = gpos2
apos2.altitude_msl = 0
apos2.save()
wpt2 = Waypoint()
wpt2.position = apos2
waypoints = [wpt1, wpt2]
obstacle = MovingObstacle()
obstacle.speed_avg = speed
self.assertTrue(
self.eval_travel_time(
obstacle.get_waypoint_travel_time(waypoints, 0, 1),
time))
def test_get_position_no_waypoints(self):
"""Tests position calc on no-"""
self.assertEqual(self.obst_no_wpt.get_position(), (0, 0, 0))
def test_get_position_one_waypoint(self):
"""Tests position calc on single waypoints."""
(lat, lon, alt) = self.obst_single_wpt.get_position()
self.assertEqual(lat, self.single_wpt_lat)
self.assertEqual(lon, self.single_wpt_lon)
self.assertEqual(alt, self.single_wpt_alt)
def test_get_position_changes(self):
"""Position of obstacle changes over time."""
# Pick an obstacle with more than one point
obstacle = self.obstacles[0]
original = obstacle.get_position()
time.sleep(0.1)
new = obstacle.get_position()
self.assertNotEqual(original, new)
def test_get_position_waypoints_plot(self):
"""Tests position calculation by saving plots of calculation.
Saves plots to test_output/auvsi_suas-MovingObstacle-getPosition-x.jpg.
On each run it first deletes the existing folder. This requires manual
inspection to validate correctness.
"""
if not settings.TEST_ENABLE_PLOTTING:
return
# Create directory for plot output
if not os.path.exists('data'):
os.mkdir('data')
if os.path.exists('data/test_output'):
shutil.rmtree('data/test_output')
os.mkdir('data/test_output')
# Create plot for each path
for obst_id in range(len(self.obstacles)):
cur_obst = self.obstacles[obst_id]
# Get waypoint positions as numpy array
waypoints = cur_obst.waypoints.order_by('order')
waypoint_travel_times = cur_obst.get_inter_waypoint_travel_times(
waypoints)
waypoint_times = cur_obst.get_waypoint_times(waypoint_travel_times)
total_time = waypoint_times[len(waypoint_times) - 1]
num_waypoints = len(waypoints)
wpt_latitudes = np.zeros(num_waypoints + 1)
wpt_longitudes = np.zeros(num_waypoints + 1)
wpt_altitudes = np.zeros(num_waypoints + 1)
for waypoint_id in range(num_waypoints + 1):
cur_id = waypoint_id % num_waypoints
# yapf: disable
wpt_latitudes[waypoint_id] = waypoints[cur_id].position.latitude
wpt_longitudes[waypoint_id] = waypoints[cur_id].position.longitude
wpt_altitudes[waypoint_id] = waypoints[cur_id].position.altitude_msl
# yapf: enable
# Create time series to represent samples at 10 Hz for 1.5 trips
time_pos = np.arange(0, 1.5 * total_time, 0.10)
# Sample position for the time series
latitudes = np.zeros(len(time_pos))
longitudes = np.zeros(len(time_pos))
altitudes = np.zeros(len(time_pos))
epoch = timezone.now().replace(
year=1970,
month=1,
day=1,
hour=0,
minute=0,
second=0,
microsecond=0)
for time_id in range(len(time_pos)):
cur_time_offset = time_pos[time_id]
cur_samp_time = epoch + datetime.timedelta(
seconds=cur_time_offset)
(lat, lon, alt) = cur_obst.get_position(cur_samp_time)
latitudes[time_id] = lat
longitudes[time_id] = lon
altitudes[time_id] = alt
# Create plot
plt.figure()
plt.subplot(311)
plt.plot(time_pos, latitudes, 'b', waypoint_times, wpt_latitudes,
'rx')
plt.subplot(312)
plt.plot(time_pos, longitudes, 'b', waypoint_times, wpt_longitudes,
'rx')
plt.subplot(313)
plt.plot(time_pos, altitudes, 'b', waypoint_times, wpt_altitudes,
'rx')
plt.savefig(
('data/test_output/'
'auvsi_suas-MovingObstacle-getPosition-%d.jpg' % obst_id))
def test_contains_pos(self):
"""Tests the inside obstacle method."""
# Form the test obstacle
obst = MovingObstacle()
obst.sphere_radius = TESTDATA_MOVOBST_CONTAINSPOS_OBJ[2]
# Run test points against obstacle
test_data = [(TESTDATA_MOVOBST_CONTAINSPOS_INSIDE, True),
(TESTDATA_MOVOBST_CONTAINSPOS_OUTSIDE, False)]
for (cur_data, cur_contains) in test_data:
for (lat, lon, alt) in cur_data:
gpos = GpsPosition()
gpos.latitude = lat
gpos.longitude = lon
gpos.save()
apos = AerialPosition()
apos.gps_position = gpos
apos.altitude_msl = alt
self.assertEqual(
obst.contains_pos(TESTDATA_MOVOBST_CONTAINSPOS_OBJ[0],
TESTDATA_MOVOBST_CONTAINSPOS_OBJ[1],
TESTDATA_MOVOBST_CONTAINSPOS_OBJ[3],
apos), cur_contains)
def test_evaluate_collision_with_uas(self):
"""Tests the collision with UAS method."""
# Get test data
user = User.objects.create_user('testuser', 'testemail@x.com',
'testpass')
user.save()
testdata = TESTDATA_MOVOBST_EVALCOLLISION
(obst_rad, obst_speed, obst_pos, log_details) = testdata
# Create the obstacle
obst = MovingObstacle()
obst.speed_avg = obst_speed
obst.sphere_radius = obst_rad
obst.save()
for pos_id in xrange(len(obst_pos)):
(lat, lon, alt) = obst_pos[pos_id]
gpos = GpsPosition()
gpos.latitude = lat
gpos.longitude = lon
gpos.save()
apos = AerialPosition()
apos.gps_position = gpos
apos.altitude_msl = alt
apos.save()
wpt = Waypoint()
wpt.order = pos_id
wpt.position = apos
wpt.save()
obst.waypoints.add(wpt)
obst.save()
# Create sets of logs
epoch = timezone.now().replace(
year=1970,
month=1,
day=1,
hour=0,
minute=0,
second=0,
microsecond=0)
inside_logs = []
outside_logs = []
for (time_sec, inside_pos, outside_pos) in log_details:
log_time = epoch + datetime.timedelta(seconds=time_sec)
logs_pos = [(inside_pos, inside_logs), (outside_pos, outside_logs)]
for (positions, log_list) in logs_pos:
for (lat, lon, alt) in positions:
log = self.create_log(lat, lon, alt, user, log_time)
log_list.append(log)
# Assert the obstacle correctly computes collisions
log_collisions = [(True, inside_logs), (False, outside_logs)]
for (inside, logs) in log_collisions:
self.assertEqual(obst.evaluate_collision_with_uas(logs), inside)
for log in logs:
self.assertEqual(
obst.evaluate_collision_with_uas([log]), inside)
def test_json(self):
"""Tests the JSON serialization method."""
for cur_obst in self.obstacles:
json_data = cur_obst.json()
self.assertTrue('latitude' in json_data)
self.assertTrue('longitude' in json_data)
self.assertTrue('altitude_msl' in json_data)
self.assertTrue('sphere_radius' in json_data)
self.assertEqual(json_data['sphere_radius'],
cur_obst.sphere_radius)
obst = self.obst_single_wpt
json_data = obst.json()
# yapf: disable
self.assertEqual(json_data['latitude'],
obst.waypoints.all()[0].position.gps_position.latitude)
self.assertEqual(json_data['longitude'],
obst.waypoints.all()[0].position.gps_position.longitude)
self.assertEqual(json_data['altitude_msl'],
obst.waypoints.all()[0].position.altitude_msl)
# yapf: enable
def test_kml(self):
"""
Tests the generation of kml data
The correct number of elements are generated
The meta-data tag is present
"""
for cur_obst in self.obstacles:
kml = Kml()
kml_mission = kml.newfolder(name='SubFolder')
cur_obst.kml([
TimePeriod(
timezone.now(),
timezone.now() + datetime.timedelta(seconds=10))
], kml_mission, kml.document)
result_kml = kml.kml()
self.assertEqual(101, result_kml.count('<gx:coord>'))
def create_log(self, lat, lon, alt, user, log_time=None):
pos = GpsPosition(latitude=lat, longitude=lon)
pos.save()
apos = AerialPosition(gps_position=pos, altitude_msl=alt)
apos.save()
log = UasTelemetry(
user=user,
uas_position=apos,
uas_heading=100, )
log.save()
if log_time:
log.timestamp = log_time
log.save()
return log
def test_json_time_changes(self):
"""json, called at different times, causes different locations"""
for o in self.obstacles:
d1 = o.json()
d2 = o.json()
self.assertNotEqual(d1, d2)
def test_json_time_freeze(self):
"""json, called at the same time, causes same locations"""
time = timezone.now()
for o in self.obstacles:
d1 = o.json(time=time)
d2 = o.json(time=time)
self.assertEqual(d1, d2)
| 37.45776
| 84
| 0.568342
|
1fc0cf77ca3c3e4f0cf9eedfd7048e1d43daac67
| 2,977
|
py
|
Python
|
test/azure/version-tolerant/Expected/AcceptanceTests/LroVersionTolerant/lroversiontolerant/aio/_configuration.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | null | null | null |
test/azure/version-tolerant/Expected/AcceptanceTests/LroVersionTolerant/lroversiontolerant/aio/_configuration.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | null | null | null |
test/azure/version-tolerant/Expected/AcceptanceTests/LroVersionTolerant/lroversiontolerant/aio/_configuration.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | 1
|
2022-03-28T08:58:03.000Z
|
2022-03-28T08:58:03.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class AutoRestLongRunningOperationTestServiceConfiguration(Configuration):
"""Configuration for AutoRestLongRunningOperationTestService.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
"""
def __init__(self, credential: "AsyncTokenCredential", **kwargs: Any) -> None:
super(AutoRestLongRunningOperationTestServiceConfiguration, self).__init__(**kwargs)
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
self.credential = credential
self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"])
kwargs.setdefault("sdk_moniker", "autorestlongrunningoperationtestservice/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(
self.credential, *self.credential_scopes, **kwargs
)
| 53.160714
| 107
| 0.716829
|
f246f083d94fc7c56e9c71e917651907f008a1cb
| 3,092
|
py
|
Python
|
examples/06-distributed-post/02-distributed_workflows_on_remote.py
|
jfthuong/pydpf-core
|
bf2895ebc546e0004f759289bfc9a23196559ac3
|
[
"MIT"
] | null | null | null |
examples/06-distributed-post/02-distributed_workflows_on_remote.py
|
jfthuong/pydpf-core
|
bf2895ebc546e0004f759289bfc9a23196559ac3
|
[
"MIT"
] | null | null | null |
examples/06-distributed-post/02-distributed_workflows_on_remote.py
|
jfthuong/pydpf-core
|
bf2895ebc546e0004f759289bfc9a23196559ac3
|
[
"MIT"
] | null | null | null |
"""
.. _ref_distributed_workflows_on_remote:
Connect workflows on different processes implicitly
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This example shows how distributed files can be read and post processed
on distributed processes. After remote post processing,
results a merged on the local process. In this example, different workflows are
directly created on different servers. Those workflows are then connected
together without having to care that they are on remote processes.
"""
###############################################################################
# Import dpf module and its examples files
from ansys.dpf import core as dpf
from ansys.dpf.core import examples
from ansys.dpf.core import operators as ops
###############################################################################
# Configure the servers
# ~~~~~~~~~~~~~~~~~~~~~~
# To make this example easier, we will start local servers here,
# but we could get connected to any existing servers on the network.
remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]
###############################################################################
# Create template workflows on remote servers
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# For the purpose of this example, we will create 2 workflows computing
# elemental nodal stresses on different servers. The second workflow will
# multiply by 2.0 the stresses. A last workflow will merge the outputs
files = examples.download_distributed_files()
###############################################################################
# first workflow S
workflow1 = dpf.Workflow(server=remote_servers[0])
model = dpf.Model(files[0], server=remote_servers[0])
stress1 = model.results.stress()
workflow1.add_operator(stress1)
workflow1.set_output_name("out1", stress1.outputs.fields_container)
###############################################################################
# second workflow S*2.0
workflow2 = dpf.Workflow(server=remote_servers[1])
model = dpf.Model(files[1], server=remote_servers[1])
stress2 = model.results.stress()
mul = stress2 * 2.0
workflow2.add_operator(mul)
workflow2.set_output_name("out2", mul.outputs.fields_container)
###############################################################################
# third workflow merge
local_workflow = dpf.Workflow()
merge = ops.utility.merge_fields_containers()
nodal = ops.averaging.to_nodal_fc(merge)
local_workflow.add_operators([merge, nodal])
local_workflow.set_input_name("in1", merge, 0)
local_workflow.set_input_name("in2", merge, 1)
local_workflow.set_output_name("merged", nodal.outputs.fields_container)
###############################################################################
# Connect the workflows together and get the output
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
local_workflow.connect_with(workflow1, ("out1", "in1"))
local_workflow.connect_with(workflow2, ("out2", "in2"))
fc = local_workflow.get_output("merged", dpf.types.fields_container)
fc[0].meshed_region.plot(fc[0])
| 42.944444
| 99
| 0.60414
|
6ab114307664b3c5f0c84619553abc6e07111401
| 4,265
|
py
|
Python
|
homeassistant/components/plex/server.py
|
bendavid/home-assistant
|
d96cd4c4ea1999f87db5b660ec225ad26cb3d471
|
[
"Apache-2.0"
] | 3
|
2019-10-15T16:55:31.000Z
|
2020-02-18T21:10:31.000Z
|
homeassistant/components/plex/server.py
|
bendavid/home-assistant
|
d96cd4c4ea1999f87db5b660ec225ad26cb3d471
|
[
"Apache-2.0"
] | 3
|
2021-03-19T04:20:52.000Z
|
2021-09-08T01:22:32.000Z
|
homeassistant/components/plex/server.py
|
bendavid/home-assistant
|
d96cd4c4ea1999f87db5b660ec225ad26cb3d471
|
[
"Apache-2.0"
] | 4
|
2019-10-15T21:03:53.000Z
|
2020-05-27T19:53:20.000Z
|
"""Shared class to maintain Plex server instances."""
import plexapi.myplex
import plexapi.playqueue
import plexapi.server
from requests import Session
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN
from homeassistant.const import CONF_TOKEN, CONF_URL, CONF_VERIFY_SSL
from .const import (
CONF_SERVER,
CONF_SHOW_ALL_CONTROLS,
CONF_USE_EPISODE_ART,
DEFAULT_VERIFY_SSL,
X_PLEX_DEVICE_NAME,
X_PLEX_PLATFORM,
X_PLEX_PRODUCT,
X_PLEX_VERSION,
)
from .errors import NoServersFound, ServerNotSpecified
# Set default headers sent by plexapi
plexapi.X_PLEX_DEVICE_NAME = X_PLEX_DEVICE_NAME
plexapi.X_PLEX_PLATFORM = X_PLEX_PLATFORM
plexapi.X_PLEX_PRODUCT = X_PLEX_PRODUCT
plexapi.X_PLEX_VERSION = X_PLEX_VERSION
plexapi.myplex.BASE_HEADERS = plexapi.reset_base_headers()
plexapi.server.BASE_HEADERS = plexapi.reset_base_headers()
class PlexServer:
"""Manages a single Plex server connection."""
def __init__(self, server_config, options=None):
"""Initialize a Plex server instance."""
self._plex_server = None
self._url = server_config.get(CONF_URL)
self._token = server_config.get(CONF_TOKEN)
self._server_name = server_config.get(CONF_SERVER)
self._verify_ssl = server_config.get(CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL)
self.options = options
def connect(self):
"""Connect to a Plex server directly, obtaining direct URL if necessary."""
def _set_missing_url():
account = plexapi.myplex.MyPlexAccount(token=self._token)
available_servers = [
(x.name, x.clientIdentifier)
for x in account.resources()
if "server" in x.provides
]
if not available_servers:
raise NoServersFound
if not self._server_name and len(available_servers) > 1:
raise ServerNotSpecified(available_servers)
server_choice = (
self._server_name if self._server_name else available_servers[0][0]
)
connections = account.resource(server_choice).connections
local_url = [x.httpuri for x in connections if x.local]
remote_url = [x.uri for x in connections if not x.local]
self._url = local_url[0] if local_url else remote_url[0]
def _connect_with_url():
session = None
if self._url.startswith("https") and not self._verify_ssl:
session = Session()
session.verify = False
self._plex_server = plexapi.server.PlexServer(
self._url, self._token, session
)
if self._token and not self._url:
_set_missing_url()
_connect_with_url()
def clients(self):
"""Pass through clients call to plexapi."""
return self._plex_server.clients()
def sessions(self):
"""Pass through sessions call to plexapi."""
return self._plex_server.sessions()
@property
def friendly_name(self):
"""Return name of connected Plex server."""
return self._plex_server.friendlyName
@property
def machine_identifier(self):
"""Return unique identifier of connected Plex server."""
return self._plex_server.machineIdentifier
@property
def url_in_use(self):
"""Return URL used for connected Plex server."""
return self._plex_server._baseurl # pylint: disable=W0212
@property
def use_episode_art(self):
"""Return use_episode_art option."""
return self.options[MP_DOMAIN][CONF_USE_EPISODE_ART]
@property
def show_all_controls(self):
"""Return show_all_controls option."""
return self.options[MP_DOMAIN][CONF_SHOW_ALL_CONTROLS]
@property
def library(self):
"""Return library attribute from server object."""
return self._plex_server.library
def playlist(self, title):
"""Return playlist from server object."""
return self._plex_server.playlist(title)
def create_playqueue(self, media, **kwargs):
"""Create playqueue on Plex server."""
return plexapi.playqueue.PlayQueue.create(self._plex_server, media, **kwargs)
| 33.849206
| 85
| 0.669402
|
bb62a89284b11d3b790e0cc8c9f335ffc2c4a851
| 843
|
py
|
Python
|
daskms/tests/test_expression.py
|
ratt-ru/dask-ms
|
becd3572f86a0ad78b55540f25fce6e129976a29
|
[
"BSD-3-Clause"
] | 7
|
2019-08-23T03:44:53.000Z
|
2021-05-06T00:51:18.000Z
|
daskms/tests/test_expression.py
|
ska-sa/dask-ms
|
ce33e7aad36eeb7c2c79093622b9776186856304
|
[
"BSD-3-Clause"
] | 76
|
2019-08-20T14:34:05.000Z
|
2022-02-10T13:21:29.000Z
|
daskms/tests/test_expression.py
|
ratt-ru/dask-ms
|
becd3572f86a0ad78b55540f25fce6e129976a29
|
[
"BSD-3-Clause"
] | 4
|
2019-10-15T13:35:19.000Z
|
2021-03-23T14:52:23.000Z
|
from daskms import xds_from_ms
from daskms.expressions import data_column_expr
from numpy.testing import assert_array_equal
def test_expressions(ms):
datasets = xds_from_ms(ms)
for i, ds in enumerate(datasets):
dims = ds.DATA.dims
datasets[i] = ds.assign(DIR1_DATA=(dims, ds.DATA.data),
DIR2_DATA=(dims, ds.DATA.data),
DIR3_DATA=(dims, ds.DATA.data))
results = [
ds.DATA.data / (
-ds.DIR1_DATA.data +
ds.DIR2_DATA.data +
ds.DIR3_DATA.data) * 4
for ds in datasets
]
string = "DATA / (-DIR1_DATA + DIR2_DATA + DIR3_DATA)*4"
expressions = data_column_expr(string, datasets)
for i, (ds, expr) in enumerate(zip(datasets, expressions)):
assert_array_equal(results[i], expr)
| 29.068966
| 63
| 0.601423
|
c85fcef7dc41ead4a4e26091083021ee8b118fc0
| 6,948
|
py
|
Python
|
tests/components/mqtt/test_device_tracker.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 22,481
|
2020-03-02T13:09:59.000Z
|
2022-03-31T23:34:28.000Z
|
tests/components/mqtt/test_device_tracker.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
tests/components/mqtt/test_device_tracker.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 11,411
|
2020-03-02T14:19:20.000Z
|
2022-03-31T22:46:07.000Z
|
"""The tests for the MQTT device tracker platform."""
from unittest.mock import patch
import pytest
from homeassistant.components.device_tracker.const import DOMAIN, SOURCE_TYPE_BLUETOOTH
from homeassistant.const import CONF_PLATFORM, STATE_HOME, STATE_NOT_HOME
from homeassistant.setup import async_setup_component
from tests.common import async_fire_mqtt_message
@pytest.fixture(autouse=True)
def setup_comp(hass, mqtt_mock):
"""Set up mqtt component."""
pass
async def test_ensure_device_tracker_platform_validation(hass):
"""Test if platform validation was done."""
async def mock_setup_scanner(hass, config, see, discovery_info=None):
"""Check that Qos was added by validation."""
assert "qos" in config
with patch(
"homeassistant.components.mqtt.device_tracker.async_setup_scanner",
autospec=True,
side_effect=mock_setup_scanner,
) as mock_sp:
dev_id = "paulus"
topic = "/location/paulus"
assert await async_setup_component(
hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "mqtt", "devices": {dev_id: topic}}}
)
assert mock_sp.call_count == 1
async def test_new_message(hass, mock_device_tracker_conf):
"""Test new message."""
dev_id = "paulus"
entity_id = f"{DOMAIN}.{dev_id}"
topic = "/location/paulus"
location = "work"
hass.config.components = {"mqtt", "zone"}
assert await async_setup_component(
hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "mqtt", "devices": {dev_id: topic}}}
)
async_fire_mqtt_message(hass, topic, location)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == location
async def test_single_level_wildcard_topic(hass, mock_device_tracker_conf):
"""Test single level wildcard topic."""
dev_id = "paulus"
entity_id = f"{DOMAIN}.{dev_id}"
subscription = "/location/+/paulus"
topic = "/location/room/paulus"
location = "work"
hass.config.components = {"mqtt", "zone"}
assert await async_setup_component(
hass,
DOMAIN,
{DOMAIN: {CONF_PLATFORM: "mqtt", "devices": {dev_id: subscription}}},
)
async_fire_mqtt_message(hass, topic, location)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == location
async def test_multi_level_wildcard_topic(hass, mock_device_tracker_conf):
"""Test multi level wildcard topic."""
dev_id = "paulus"
entity_id = f"{DOMAIN}.{dev_id}"
subscription = "/location/#"
topic = "/location/room/paulus"
location = "work"
hass.config.components = {"mqtt", "zone"}
assert await async_setup_component(
hass,
DOMAIN,
{DOMAIN: {CONF_PLATFORM: "mqtt", "devices": {dev_id: subscription}}},
)
async_fire_mqtt_message(hass, topic, location)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == location
async def test_single_level_wildcard_topic_not_matching(hass, mock_device_tracker_conf):
"""Test not matching single level wildcard topic."""
dev_id = "paulus"
entity_id = f"{DOMAIN}.{dev_id}"
subscription = "/location/+/paulus"
topic = "/location/paulus"
location = "work"
hass.config.components = {"mqtt", "zone"}
assert await async_setup_component(
hass,
DOMAIN,
{DOMAIN: {CONF_PLATFORM: "mqtt", "devices": {dev_id: subscription}}},
)
async_fire_mqtt_message(hass, topic, location)
await hass.async_block_till_done()
assert hass.states.get(entity_id) is None
async def test_multi_level_wildcard_topic_not_matching(hass, mock_device_tracker_conf):
"""Test not matching multi level wildcard topic."""
dev_id = "paulus"
entity_id = f"{DOMAIN}.{dev_id}"
subscription = "/location/#"
topic = "/somewhere/room/paulus"
location = "work"
hass.config.components = {"mqtt", "zone"}
assert await async_setup_component(
hass,
DOMAIN,
{DOMAIN: {CONF_PLATFORM: "mqtt", "devices": {dev_id: subscription}}},
)
async_fire_mqtt_message(hass, topic, location)
await hass.async_block_till_done()
assert hass.states.get(entity_id) is None
async def test_matching_custom_payload_for_home_and_not_home(
hass, mock_device_tracker_conf
):
"""Test custom payload_home sets state to home and custom payload_not_home sets state to not_home."""
dev_id = "paulus"
entity_id = f"{DOMAIN}.{dev_id}"
topic = "/location/paulus"
payload_home = "present"
payload_not_home = "not present"
hass.config.components = {"mqtt", "zone"}
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_PLATFORM: "mqtt",
"devices": {dev_id: topic},
"payload_home": payload_home,
"payload_not_home": payload_not_home,
}
},
)
async_fire_mqtt_message(hass, topic, payload_home)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_HOME
async_fire_mqtt_message(hass, topic, payload_not_home)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_NOT_HOME
async def test_not_matching_custom_payload_for_home_and_not_home(
hass, mock_device_tracker_conf
):
"""Test not matching payload does not set state to home or not_home."""
dev_id = "paulus"
entity_id = f"{DOMAIN}.{dev_id}"
topic = "/location/paulus"
payload_home = "present"
payload_not_home = "not present"
payload_not_matching = "test"
hass.config.components = {"mqtt", "zone"}
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_PLATFORM: "mqtt",
"devices": {dev_id: topic},
"payload_home": payload_home,
"payload_not_home": payload_not_home,
}
},
)
async_fire_mqtt_message(hass, topic, payload_not_matching)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state != STATE_HOME
assert hass.states.get(entity_id).state != STATE_NOT_HOME
async def test_matching_source_type(hass, mock_device_tracker_conf):
"""Test setting source type."""
dev_id = "paulus"
entity_id = f"{DOMAIN}.{dev_id}"
topic = "/location/paulus"
source_type = SOURCE_TYPE_BLUETOOTH
location = "work"
hass.config.components = {"mqtt", "zone"}
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_PLATFORM: "mqtt",
"devices": {dev_id: topic},
"source_type": source_type,
}
},
)
async_fire_mqtt_message(hass, topic, location)
await hass.async_block_till_done()
assert hass.states.get(entity_id).attributes["source_type"] == SOURCE_TYPE_BLUETOOTH
| 31.87156
| 105
| 0.664508
|
f0f4b8adf244e09b4186e4ecb62fe81a5d833c3a
| 3,583
|
py
|
Python
|
solution/retrieval/elastic_engine/api.py
|
taeukkkim/temp
|
91c90fe5da4678424d8aacacbf15773dc624021d
|
[
"MIT"
] | 5
|
2021-11-10T09:44:42.000Z
|
2022-03-20T06:14:42.000Z
|
solution/retrieval/elastic_engine/api.py
|
taeukkkim/temp
|
91c90fe5da4678424d8aacacbf15773dc624021d
|
[
"MIT"
] | null | null | null |
solution/retrieval/elastic_engine/api.py
|
taeukkkim/temp
|
91c90fe5da4678424d8aacacbf15773dc624021d
|
[
"MIT"
] | 7
|
2021-11-10T23:54:03.000Z
|
2022-01-03T02:55:50.000Z
|
from typing import Any, Dict
from datasets import Dataset
from elasticsearch import Elasticsearch, helpers
from solution.args import DataArguments
from .base import ElasticSearchBase
class ESRetrieval(ElasticSearchBase):
def __init__(self, args: DataArguments):
es = Elasticsearch(args.es_host_address,
timeout=args.es_timeout,
max_retries=args.es_max_retries,
retry_on_timeout=args.es_retry_on_timeout)
super().__init__(args, es)
def retrieve(self, query_or_dataset, topk=1, eval_mode=True) -> Any:
""" Retrieve top-k documents using elastic search given dataset """
results = self.get_relevant_doc(query_or_dataset, topk)
doc_scores, doc_indices, doc_contexts = results
if isinstance(query_or_dataset, str):
doc_scores = doc_scores[0]
doc_indices = doc_indices[0]
print("[Search query]\n", query_or_dataset, "\n")
for i in range(topk):
print("Top-%d passage with score %.4f" %
(i + 1, doc_scores[i]))
print(self.get(doc_indices[i]), end="\n\n")
return (doc_scores, [self.get(doc_indices[i]) for i in range(topk)])
elif isinstance(query_or_dataset, Dataset):
cqas = self.get_dataframe_result(query_or_dataset,
doc_scores,
doc_indices,
doc_contexts,)
return self.dataframe_to_dataset(cqas, eval_mode)
elif isinstance(query_or_dataset, list):
return (doc_scores, doc_contexts)
def get(self, id) -> str:
""" Get documents using id """
doc = self.engine.get(index=self.index_name, id=id)
return doc["_source"]["document_text"]
@property
def count(self) -> int:
""" Return number of documents """
return self.engine.count(index=self.index_name)["count"]
def analyze(self, query) -> Any:
""" Analyze query text usign analyzer tokenizer """
body = {"analyzer": "my_analyzer", "text": query}
return self.engine.indices.analyze(index=self.index_name, body=body)
def make_query(self, query, topk) -> Dict:
""" Given query and top-k parameter, make query dictionary used for retrieval """
return {"query": {"match": {"document_text": query}}, "size": topk}
def get_relevant_doc(self, query_or_dataset, topk) -> Any:
""" Get relevant document using elastic search api """
if isinstance(query_or_dataset, Dataset):
query = query_or_dataset["question"]
elif isinstance(query_or_dataset, str):
query = [query_or_dataset]
elif isinstance(query_or_dataset, list):
query = query_or_dataset
else:
raise NotImplementedError
body = []
for i in range(len(query)*2):
if i % 2 == 0:
body.append({"index": self.index_name})
else:
body.append(self.make_query(query[i//2], topk))
response = self.engine.msearch(body=body)["responses"]
doc_scores = [[hit["_score"] for hit in res["hits"]["hits"]] for res in response]
doc_indices = [[hit["_id"] for hit in res["hits"]["hits"]] for res in response]
doc_contexts = [[hit["_source"]["document_text"] for hit in res["hits"]["hits"]] for res in response]
return doc_scores, doc_indices, doc_contexts
| 38.526882
| 109
| 0.595032
|
3e062a5eab3c67985f59c269eba1c07901768131
| 2,397
|
py
|
Python
|
synapse/federation/replication.py
|
mweinelt/synapse
|
42a9ea37e4c6ff9d91b530c40d366446b9fc2234
|
[
"Apache-2.0"
] | null | null | null |
synapse/federation/replication.py
|
mweinelt/synapse
|
42a9ea37e4c6ff9d91b530c40d366446b9fc2234
|
[
"Apache-2.0"
] | null | null | null |
synapse/federation/replication.py
|
mweinelt/synapse
|
42a9ea37e4c6ff9d91b530c40d366446b9fc2234
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This layer is responsible for replicating with remote home servers using
a given transport.
"""
from .federation_client import FederationClient
from .federation_server import FederationServer
from .transaction_queue import TransactionQueue
from .persistence import TransactionActions
import logging
logger = logging.getLogger(__name__)
class ReplicationLayer(FederationClient, FederationServer):
"""This layer is responsible for replicating with remote home servers over
the given transport. I.e., does the sending and receiving of PDUs to
remote home servers.
The layer communicates with the rest of the server via a registered
ReplicationHandler.
In more detail, the layer:
* Receives incoming data and processes it into transactions and pdus.
* Fetches any PDUs it thinks it might have missed.
* Keeps the current state for contexts up to date by applying the
suitable conflict resolution.
* Sends outgoing pdus wrapped in transactions.
* Fills out the references to previous pdus/transactions appropriately
for outgoing data.
"""
def __init__(self, hs, transport_layer):
self.server_name = hs.hostname
self.keyring = hs.get_keyring()
self.transport_layer = transport_layer
self.federation_client = self
self.store = hs.get_datastore()
self.handler = None
self.edu_handlers = {}
self.query_handlers = {}
self._clock = hs.get_clock()
self.transaction_actions = TransactionActions(self.store)
self._transaction_queue = TransactionQueue(hs, transport_layer)
self._order = 0
self.hs = hs
def __str__(self):
return "<ReplicationLayer(%s)>" % self.server_name
| 31.12987
| 78
| 0.717146
|
cb141103d4a454664bac8e8b370d8a5816046867
| 4,269
|
py
|
Python
|
meta_dataset/data/providers.py
|
zhoulinjun1994/meta-dataset
|
ff0eb7d242da85ff911a5ba6ab8ada30f2fa2582
|
[
"Apache-2.0"
] | null | null | null |
meta_dataset/data/providers.py
|
zhoulinjun1994/meta-dataset
|
ff0eb7d242da85ff911a5ba6ab8ada30f2fa2582
|
[
"Apache-2.0"
] | null | null | null |
meta_dataset/data/providers.py
|
zhoulinjun1994/meta-dataset
|
ff0eb7d242da85ff911a5ba6ab8ada30f2fa2582
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The Meta-Dataset Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Interfaces for data returned by the pipelines.
TODO(lamblinp): Integrate better with pipeline.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow.compat.v1 as tf
def compute_shot(way, labels):
"""Computes the `shot` of the episode containing labels.
Args:
way: An int constant tensor. The number of classes in the episode.
labels: A Tensor of labels of shape [batch_size].
Returns:
shots: An int 1D tensor: The number of support examples per class.
"""
class_ids = tf.reshape(tf.range(way), [way, 1])
class_labels = tf.reshape(labels, [1, -1])
is_equal = tf.equal(class_labels, class_ids)
return tf.reduce_sum(tf.cast(is_equal, tf.int32), axis=1)
def compute_unique_class_ids(class_ids):
"""Computes the unique class IDs of the episode containing `class_ids`.
Args:
class_ids: A 1D tensor representing class IDs, one per example in an
episode.
Returns:
A 1D tensor of the unique class IDs whose size is equal to the way of an
episode.
"""
return tf.unique(class_ids)[0]
class EpisodeDataset(
collections.namedtuple(
'EpisodeDataset', 'train_images, test_images, '
'train_labels, test_labels, train_class_ids, test_class_ids')):
"""Wraps an episode's data and facilitates creation of feed dict.
Args:
train_images: A Tensor of images for training.
test_images: A Tensor of images for testing.
train_labels: A 1D Tensor, the matching training labels (numbers between 0
and K-1, with K the number of classes involved in the episode).
test_labels: A 1D Tensor, the matching testing labels (numbers between 0
and K-1, with K the number of classes involved in the episode).
train_class_ids: A 1D Tensor, the matching training class ids (numbers
between 0 and N-1, with N the number of classes in the full dataset).
test_class_ids: A 1D Tensor, the matching testing class ids (numbers
between 0 and N-1, with N the number of classes in the full dataset).
"""
@property
def unique_class_ids(self):
return compute_unique_class_ids(
tf.concat((self.train_class_ids, self.test_class_ids), -1))
@property
def train_shots(self):
return compute_shot(self.way, self.train_labels)
@property
def test_shots(self):
return compute_shot(self.way, self.test_labels)
@property
def way(self):
return tf.size(self.unique_class_ids)
@property
def labels(self):
"""Return query labels to provide an episodic/batch-agnostic API."""
return self.test_labels
@property
def onehot_labels(self):
"""Return one-hot query labels to provide an episodic/batch-agnostic API."""
return self.onehot_test_labels
@property
def onehot_train_labels(self):
return tf.one_hot(self.train_labels, self.way)
@property
def onehot_test_labels(self):
return tf.one_hot(self.test_labels, self.way)
class Batch(collections.namedtuple('Batch', 'images, labels')):
"""Wraps an batch's data and facilitates creation of feed dict.
Args:
images: a Tensor of images of shape [self.batch_size] + image shape.
labels: a Tensor of labels of shape [self.batch_size].
"""
@property
def way(self):
"""Compute the way of the episode.
Returns:
way: An int constant tensor. The number of classes in the episode.
"""
episode_classes, _ = tf.unique(self.labels)
return tf.size(episode_classes)
@property
def onehot_labels(self):
return tf.to_int32(tf.one_hot(self.labels, self.way))
| 31.389706
| 80
| 0.720075
|
f09b190c089f2a528104eb0659164ef85492a533
| 1,011
|
py
|
Python
|
tests/test_datasets_sba_city_county.py
|
squatter1/skills-ml
|
0c856328b73740aa343ccdbe6c7ca8fcfb797b69
|
[
"MIT"
] | null | null | null |
tests/test_datasets_sba_city_county.py
|
squatter1/skills-ml
|
0c856328b73740aa343ccdbe6c7ca8fcfb797b69
|
[
"MIT"
] | null | null | null |
tests/test_datasets_sba_city_county.py
|
squatter1/skills-ml
|
0c856328b73740aa343ccdbe6c7ca8fcfb797b69
|
[
"MIT"
] | null | null | null |
import httpretty
import json
from mock import patch
from datasets.sba_city_county import county_lookup, URL
COUNTY_RESPONSE = json.dumps([
{
"county_name": "St. Clair",
"description": None,
"feat_class": "Populated Place",
"feature_id": "4609",
"fips_class": "C1",
"fips_county_cd": "163",
"full_county_name": "St. Clair County",
"link_title": None,
"url": "http://www.belleville.net/",
"name": "Belleville",
"primary_latitude": "38.52",
"primary_longitude": "-89.98",
"state_abbreviation": "IL",
"state_name": "Illinois"
}
])
@httpretty.activate
@patch('datasets.sba_city_county.STATE_CODES', ['IL'])
def test_county_lookup():
httpretty.register_uri(
httpretty.GET,
URL.format('IL'),
body=COUNTY_RESPONSE,
content_type='application/json'
)
lookup = county_lookup.__wrapped__()
assert lookup['IL'] == {'Belleville': ('163', 'St. Clair')}
| 25.923077
| 63
| 0.602374
|
1204b27c8bad1e136c9e9f39b147dfb7dd80ba91
| 2,720
|
py
|
Python
|
convert_to_tflite.py
|
idchlife/tf2-mobile-pose-estimation
|
fe1ba81da2112a7a79cfeb9e84c1fa3399509add
|
[
"Apache-2.0"
] | null | null | null |
convert_to_tflite.py
|
idchlife/tf2-mobile-pose-estimation
|
fe1ba81da2112a7a79cfeb9e84c1fa3399509add
|
[
"Apache-2.0"
] | null | null | null |
convert_to_tflite.py
|
idchlife/tf2-mobile-pose-estimation
|
fe1ba81da2112a7a79cfeb9e84c1fa3399509add
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Doyoung Gwak (tucan.dev@gmail.com)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================
#-*- coding: utf-8 -*-
import os.path
from path_manager import PROJ_HOME
from hourglass_model import HourglassModelBuilder
import tensorflow as tf
# ------------------------------------------------------
# ----------------- YOU MUST CHANGE --------------------
trained_model_file_name = "hg_1e9_20190403204228.hdf5"
# ------------------------------------------------------
# ------------------------------------------------------
def convert_model(model, model_file_path):
print('converting...')
# file path
file_name = os.path.splitext(os.path.basename(model_file_path))[0]
tflite_model_path = os.path.join(model_path, "tflite")
if not os.path.exists(tflite_model_path):
os.mkdir(tflite_model_path)
print("Create TFLite model directory:", tflite_model_path)
tflite_model_file_path = os.path.join(tflite_model_path, file_name + '.tflite')
print("TFLite model path:", tflite_model_file_path)
# Get the concrete function from the Keras model.
run_model = tf.function(lambda x: model(x))
# Save the concrete function.
concrete_func = run_model.get_concrete_function(
tf.TensorSpec(model.inputs[0].shape, model.inputs[0].dtype))
# Create converter with concrete function.
converter = tf.lite.TFLiteConverter.from_concrete_function(concrete_func)
#converter.post_training_quantize = True
# Convert!
tflite_model = converter.convert()
# Save tflite file
file = open(tflite_model_file_path, 'wb')
file.write(tflite_model)
print('end of converting')
output_path = os.path.join(PROJ_HOME, "outputs")
model_path = os.path.join(output_path, "models")
model_file_path = os.path.join(model_path, trained_model_file_name)
print("Model path:", model_path)
if os.path.isfile(model_file_path):
print(model_file_path)
# model = load_model(model_file_path)
model_builder = HourglassModelBuilder()
model_builder.build_model()
model = model_builder.model
model.load_weights(model_file_path)
convert_model(model, model_file_path)
else:
print('no model found')
| 31.627907
| 83
| 0.678309
|
323c553bd7fc14ff015cbee1d239aa4fa4b11d71
| 2,080
|
py
|
Python
|
frsclient/service/v2/api_collection_v2.py
|
xunmeibuyue/IntelligentPeephole
|
c3bebf8792f019c859539607846971f33fee7cc2
|
[
"Apache-2.0"
] | null | null | null |
frsclient/service/v2/api_collection_v2.py
|
xunmeibuyue/IntelligentPeephole
|
c3bebf8792f019c859539607846971f33fee7cc2
|
[
"Apache-2.0"
] | null | null | null |
frsclient/service/v2/api_collection_v2.py
|
xunmeibuyue/IntelligentPeephole
|
c3bebf8792f019c859539607846971f33fee7cc2
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from frsclient.service.v2.compare_service import CompareServiceV2
from frsclient.service.v2.detect_service import DetectServiceV2
from frsclient.service.v2.face_service import FaceServiceV2
from frsclient.service.v2.face_set_service import FaceSetServiceV2
from frsclient.service.v2.live_detect_service import LiveDetectServiceV2
from frsclient.service.v2.search_service import SearchServiceV2
class ApiCollectionV2(object):
"""
v2 api collection
"""
def __init__(self, service, project_id):
self._compare_service = CompareServiceV2(service, project_id)
self._detect_service = DetectServiceV2(service, project_id)
self._face_service = FaceServiceV2(service, project_id)
self._face_set_service = FaceSetServiceV2(service, project_id)
self._live_detect_service = LiveDetectServiceV2(service, project_id)
self._search_service = SearchServiceV2(service, project_id)
def get_detect_service(self):
"""Instantiates an object of 'DetectService' class.
:rtype: DetectServiceV2
"""
return self._detect_service
def get_compare_service(self):
"""Instantiates an object of 'CompareService' class.
:rtype: CompareServiceV2
"""
return self._compare_service
def get_live_detect_service(self):
"""Instantiates an object of 'LiveDetectService' class.
:rtype: LiveDetectServiceV2
"""
return self._live_detect_service
def get_search_service(self):
"""Instantiates an object of 'SearchService' class.
:rtype: SearchServiceV2
"""
return self._search_service
def get_face_set_service(self):
"""Instantiates an object of 'FaceSetService' class.
:rtype: FaceSetServiceV2
"""
return self._face_set_service
def get_face_service(self):
"""Instantiates an object of 'FaceService' class.
:rtype: FaceServiceV2
"""
return self._face_service
| 35.862069
| 77
| 0.687981
|
0dfaa417107c9ad282edb141fb90f6dea690fbe7
| 577
|
py
|
Python
|
tests/test_pyfiles.py
|
frodrigo/pyfiles
|
caae9bedc4e04458eba8b7da8210ba573206ccb2
|
[
"MIT"
] | null | null | null |
tests/test_pyfiles.py
|
frodrigo/pyfiles
|
caae9bedc4e04458eba8b7da8210ba573206ccb2
|
[
"MIT"
] | null | null | null |
tests/test_pyfiles.py
|
frodrigo/pyfiles
|
caae9bedc4e04458eba8b7da8210ba573206ccb2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `pyfiles` package."""
import pytest
from pyfiles import pyfiles
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
| 22.192308
| 78
| 0.694974
|
dd17e5041c74f75c67d919f208a9ad63250777e5
| 698
|
py
|
Python
|
Model/SqlAlchemy/Company/GrowingIO.py
|
825477418/XX
|
bf46e34749394002eec0fdc65e34c339ce022cab
|
[
"MIT"
] | null | null | null |
Model/SqlAlchemy/Company/GrowingIO.py
|
825477418/XX
|
bf46e34749394002eec0fdc65e34c339ce022cab
|
[
"MIT"
] | 1
|
2020-06-03T13:54:29.000Z
|
2020-06-03T13:54:29.000Z
|
Model/SqlAlchemy/Company/GrowingIO.py
|
825477418/XX
|
bf46e34749394002eec0fdc65e34c339ce022cab
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from XX.Model.SqlAlchemy.BaseModel import *
from sqlalchemy import Column, String
from sqlalchemy.dialects.mysql import INTEGER
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class Growing(Base, BaseModel):
__tablename__ = 'growing'
id = Column(INTEGER(11), primary_key=True)
kw = Column(String(30))
name = Column(String(180), unique=True)
def __init__(self, *arg, **kw):
self.id = kw.get("id", None)
self.kw = kw.get("kw", None)
self.metadata = kw.get("metadata", None)
self.name = kw.get("name", None)
if __name__ == '__main__':
createInitFunction(Growing)
| 25.851852
| 55
| 0.683381
|
385b0616f0bae6b14842fee96c7d44e841c502e4
| 3,396
|
py
|
Python
|
data/preprocess_original_balanced_except_eval_random_drop_v2.py
|
Nstats/pytorch_senti_analysis_ch
|
bb01cc508c37638670b26259a6ee35c4e857f2b6
|
[
"Apache-2.0"
] | 1
|
2019-09-29T02:26:14.000Z
|
2019-09-29T02:26:14.000Z
|
data/preprocess_original_balanced_except_eval_random_drop_v2.py
|
Nstats/pytorch_senti_analysis_ch
|
bb01cc508c37638670b26259a6ee35c4e857f2b6
|
[
"Apache-2.0"
] | 1
|
2021-06-02T00:24:55.000Z
|
2021-06-02T00:24:55.000Z
|
data/preprocess_original_balanced_except_eval_random_drop_v2.py
|
Nstats/pytorch_senti_analysis_ch
|
bb01cc508c37638670b26259a6ee35c4e857f2b6
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import os
import numpy as np
import random
k = 3
def balance_data(df):
'''
:param
df: a dataframe(['id', 'titla','content', 'label']) with unbalanced data
:return:
df_out: a dataframe(['id', 'titla','content', 'label']) with balanced data
'''
df_0 = df[df['label'] == 0]
df_1 = df[df['label'] == 1]
df_2 = df[df['label'] == 2]
print('len(df_0)=', len(df_0), 'len(df_1)=', len(df_1), 'len(df_2)=', len(df_2))
maxNum = max(len(df_0), len(df_1), len(df_2))
if len(df_0) < maxNum:
tmp = df_0.sample(n=len(df_0), replace=True).values
for i in range(len(tmp)):
context_len = len(tmp[i][2])
random_num = min(int(0.3*context_len), 5)
random_index = np.random.randint(0, context_len-random_num-1, random_num)
for j in random_index:
tmp[i][2] = tmp[i][2].replace(tmp[i][2][j], '', 1)
df = df.append(pd.DataFrame(tmp, columns=['id', 'title', 'content', 'label']))
df_out = df
print('Now we have {} training samples.'.format(df_out.shape[0]))
return df_out
train_df_v1 = pd.read_csv("./data/Train_DataSet.csv")
train_label_df_v1 = pd.read_csv("./data/Train_DataSet_Label.csv")
test_df_v1 = pd.read_csv("./data/Test_DataSet.csv")
train_df_v1 = train_df_v1.merge(train_label_df_v1, on='id', how='left')
train_df_v1['label'] = train_df_v1['label'].fillna(-1)
train_df_v1 = train_df_v1[train_df_v1['label'] != -1]
train_df_v1['label'] = train_df_v1['label'].astype(int)
test_df_v1['label'] = 0
test_df_v1['content'] = test_df_v1['content'].fillna('.')
train_df_v1['content'] = train_df_v1['content'].fillna('.')
test_df_v1['title'] = test_df_v1['title'].fillna('.')
train_df_v1['title'] = train_df_v1['title'].fillna('.')
train_df_v2 = pd.read_csv("./data/Second_DataSet.csv")
train_label_df_v2 = pd.read_csv("./data/Second_DataSet_Label.csv")
test_df_v2 = pd.read_csv("./data/Second_TestDataSet.csv")
train_df_v2 = train_df_v2.merge(train_label_df_v2, on='id', how='left')
train_df_v2['label'] = train_df_v2['label'].fillna(-1)
train_df_v2 = train_df_v2[train_df_v2['label'] != -1]
train_df_v2['label'] = train_df_v2['label'].astype(int)
test_df_v2['label'] = 0
test_df_v2['content'] = test_df_v2['content'].fillna('.')
train_df_v2['content'] = train_df_v2['content'].fillna('.')
test_df_v2['title'] = test_df_v2['title'].fillna('.')
train_df_v2['title'] = train_df_v2['title'].fillna('.')
train_df = train_df_v2
test_df = test_df_v2
index = set(range(train_df.shape[0]))
K_fold = []
for i in range(k):
if i == k-1:
tmp = index
else:
tmp = random.sample(index, int(1.0 / k * train_df.shape[0]))
index = index - set(tmp)
print("Number:", len(tmp))
K_fold.append(tmp)
for i in range(k):
print("Fold", i)
if os.path.exists('./data/data_{}'.format(i)):
os.system("rm -rf ./data/data_{}".format(i))
os.system("mkdir ./data/data_{}".format(i))
dev_index = list(K_fold[i])
train_index = []
for j in range(k):
if j != i:
train_index += K_fold[j]
train_df_balanced = balance_data(train_df.iloc[train_index])
train_df_balanced.to_csv("./data/data_{}/train.csv".format(i), index=False)
train_df.iloc[dev_index].to_csv("./data/data_{}/dev.csv".format(i), index=False)
test_df.to_csv("./data/data_{}/test.csv".format(i), index=False)
| 37.733333
| 86
| 0.641343
|
bd1d2e5a79b2c822e600e740fc4216cf8c37a42f
| 13,517
|
py
|
Python
|
_posts/00CodeNote/0.DS/questions/8.0ProgrammingExercises.py
|
ocholuo/jekyll-theme-chirpy
|
55d0748ad9ea08750ae40e3ee0fa1c811063bfa4
|
[
"MIT"
] | 2
|
2020-09-26T03:32:02.000Z
|
2020-10-09T06:01:48.000Z
|
_posts/00CodeNote/0.DS/questions/8.0ProgrammingExercises.py
|
ocholuo/jekyll-theme-chirpy
|
55d0748ad9ea08750ae40e3ee0fa1c811063bfa4
|
[
"MIT"
] | null | null | null |
_posts/00CodeNote/0.DS/questions/8.0ProgrammingExercises.py
|
ocholuo/jekyll-theme-chirpy
|
55d0748ad9ea08750ae40e3ee0fa1c811063bfa4
|
[
"MIT"
] | null | null | null |
# DsLearn
# 8.26. Programming Exercises
# https://runestone.academy/runestone/books/published/pythonds/Graphs/Exercises.html
class Vertex:
def __init__(self, key) -> None:
self.id = key
self.connectedTo = {}
self.color = 'white'
self.distance = 0
self.pred = None
def __str__(self) -> str:
return str(self.id) + ' connectedTo: ' + str([x.id for x in self.connectedTo])
def addNeighbor(self, nbr, weight=0):
self.connectedTo[nbr] = weight
def getConnections(self):
return self.connectedTo.keys()
def getId(self):
return self.id
def getWeight(self, nbr):
return self.connectedTo[nbr]
def setColor(self, color):
self.color = color
return self.color
def getColor(self):
return self.color
def setDistance(self, number):
self.distance = number
return self.distance
def getDistance(self):
return self.distance
def setPred(self, pred):
self.pred = pred
return self.pred
class GraphyAM:
def __init__(self) -> None:
self.vertList = {}
self.numVertices = 0
# #adjacency matrix of size 10x10 initialize with 0
self.am = [[0 for column in range(6)]for row in range(6)]
def __iter__(self):
return iter(self.vertList.values())
def __contains__(self, n):
return n in self.vertList
def getIndex(self, key):
if key not in self.vertList.keys():
print("Vertex {0} not present in Graph.".format(key))
index = -1
else:
newVertex = self.vertList[key]
return newVertex
# {"id":vertex}
def addVertex(self, key):
if key not in self.vertList.keys():
print("Vertex {0} not present in Graph, adding it automatically.".format(key))
newVertex = Vertex(key)
self.vertList[key] = newVertex
self.numVertices += 1
else:
newVertex = self.vertList[key]
return newVertex
def getVertex(self, n):
if n in self.vertList:
return self.vertList[n]
else: return None
def getVertices(self):
# returns the names of all of the vertices in the graph
return self.vertList.keys()
def addEdge(self, f,t, weight=0):
if f not in self.vertList:
newVertex=self.addVertex(f)
if t not in self.vertList:
newVertex=self.addVertex(t)
# action on the Vertex property
fvert=self.getVertex(f)
tvert=self.getVertex(t)
self.vertList[f].addNeighbor(tvert, weight)
# for index
n = 0
indexF = 0
indexT = 0
print(fvert.id, tvert.id)
for key in self.vertList.keys():
if fvert.id == key:
indexF = n
if tvert.id == key:
indexT = n
n+=1
print("indexF", indexF, "indexT", indexT)
self.am[indexT][indexF] = weight
def print_graph(self):
print("\n")
name_str = ''
for key in self.vertList.keys():
name_str += key + ' ,'
name_list = name_str.replace(" ,", "")
print(name_list)
row = 0
print(" " , name_str)
for i in self.am:
# print(row)
if row < self.numVertices:
print(name_list[row], i)
row += 1
else:
print("0", i)
class GraphyAL:
def __init__(self) -> None:
self.vertList = {}
self.numVertices = 0
def __iter__(self):
return iter(self.vertList.values())
def __contains__(self, n):
return n in self.vertList
# {"id":vertex}
def addVertex(self, key):
if key not in self.vertList.keys():
print("Vertex {0} not present in Graph, adding it automatically.".format(key))
newVertex = Vertex(key)
self.vertList[key] = newVertex
self.numVertices += 1
return self.vertList[key]
def getVertex(self, n):
if n in self.vertList.keys():
return n
else: return None
def getVertexitem(self, n):
if n in self.vertList.keys():
return self.vertList[n]
else: return None
def getVertices(self):
# returns the names of all of the vertices in the graph
return self.vertList.keys()
def addEdge(self, f,t, weight=0):
if f not in self.vertList.keys():
fvert = self.addVertex(f)
if t not in self.vertList.keys():
tvert = self.addVertex(t)
fvert = self.vertList[f]
tvert = self.vertList[t]
fvert.addNeighbor(tvert, weight)
def print_list(self):
print(self.vertList.keys())
print("From To Cost")
for id in self.vertList.keys():
vert = self.vertList[id]
connectList = vert.connectedTo
for i in connectList:
print(id, " ", i.id, " ", vert.getWeight(i))
# -------------------------------------- Excercises -------------------------------------------------
# Modify the depth first search function to produce a topological sort.
from pythonds.basic import Stack
def dfs_topo(g, vertextargetid):
vertex_list = g.vertList
vertextarget = vertex_list[vertextargetid]
stack_list = Stack()
stack_list.push(vertextarget)
output = Stack()
while not stack_list.isEmpty():
currentVert = stack_list.pop()
if currentVert.getColor() == 'white':
currentVert.setColor('grey')
childVert = currentVert.getConnections()
for vertexs in childVert:
if vertexs.getColor() == 'white':
stack_list.push(vertexs)
currentVert.setColor('black')
output.push(currentVert)
for i in range(output.size()):
print(output.pop())
# g = GraphyAL()
# g.addEdge('1', '2', 10)
# g.addEdge('2', '3', 7)
# g.addEdge('3', '4', 7)
# g.addEdge('4', '5', 7)
# g.addEdge('5', '6', 13)
# g.print_list()
# print(g.vertList)
# dfs_topo(g, '1')
# -------------------------------------- Excercises -------------------------------------------------
# Modify the depth first search to produce strongly connected components.
from pythonds.basic import Stack
def dfs_scc(g, vertextargetid):
vertex_list = g.vertList
vertextarget = vertex_list[vertextargetid]
stack_list = Stack()
stack_list.push(vertextarget)
output = Stack()
while not stack_list.isEmpty():
currentVert = stack_list.pop()
if currentVert.getColor() == 'white':
currentVert.setColor('grey')
childVert = currentVert.getConnections()
for vertexs in childVert:
if vertexs.getColor() == 'white':
stack_list.push(vertexs)
currentVert.setColor('black')
output.push(currentVert)
for i in range(output.size()):
print(output.pop())
# g = GraphyAL()
# g.addEdge('1', '2', 10)
# g.addEdge('1', '3', 15)
# g.addEdge('2', '3', 7)
# g.addEdge('3', '4', 7)
# g.addEdge('3', '6', 10)
# g.addEdge('4', '5', 7)
# g.addEdge('6', '4', 5)
# g.addEdge('1', '6', 5)
# g.addEdge('5', '6', 13)
# dfs_topo(g, '1')
# -------------------------------------- Excercises -------------------------------------------------
# Python program to find strongly connected components in a given
# directed graph using Tarjan's algorithm (single DFS)
# Complexity : O(V+E)
from pythonds.basic import Stack
def run(g):
articulationPoiny_list = [False] * g.numVertices
visitTime = [-1] * g.numVertices
lowTime = [-1] * g.numVertices
visited_list = []
time = 0
for i in range(g.numVertices):
if visitTime[i] == -1:
dfs_scc(g, i, time, visitTime, lowTime, articulationPoiny_list, visited_list)
def dfs_scc(g, i, time, visitTime, lowTime, articulationPoiny_list, visited_list):
print(i)
vertex_ids = list(g.getVertices())
vertex_id = vertex_ids[i]
vertex_s = g.getVertexitem(vertex_id)
visited_list.append(vertex_s)
visitTime[i] = time
lowTime[i] = time
time += 1
v_child = vertex_s.connectedTo
for v in v_child:
# child is where it come from, seen before
if v not in visited_list:
i += 1
dfs_scc(g, i, time, visitTime, lowTime, articulationPoiny_list, visited_list)
v.scc_parent = vertex_s
# elif:
# # meet back edge
# if v.parent != vertex_s:
# if (vertex_s.parent == None and vertex_s.child == 2) or vertex_s.visitTime <= v.parent.lowTime:
# articulationPoiny_list.append[v]
# else:
# vertex_s.lowTime = min(vertex_s.lowTime, v.lowTime)
# vertex_s.lowTime = min(vertex_s.lowTime, v.lowTime)
# if (vertex_s.parent == None and vertex_s.child == 2) or vertex_s.visitTime <= v.parent.lowTime:
# articulationPoiny_list.append[v]
g = GraphyAL()
g.addEdge('1', '2', 10)
g.addEdge('1', '3', 15)
g.addEdge('2', '3', 7)
g.addEdge('3', '4', 7)
g.addEdge('3', '6', 10)
g.addEdge('4', '5', 7)
g.addEdge('6', '4', 5)
g.addEdge('1', '6', 5)
g.addEdge('5', '6', 13)
# print(g.numVertices)
run(g)
# solution 2
# Python program to find strongly connected components in a given
# directed graph using Tarjan's algorithm (single DFS)
#Complexity : O(V+E)
from collections import defaultdict
#This class represents an directed graph
# using adjacency list representation
class Graph:
def __init__(self,vertices):
#No. of vertices
self.V= vertices
# default dictionary to store graph
self.graph = defaultdict(list)
self.Time = 0
# function to add an edge to graph
def addEdge(self,u,v):
self.graph[u].append(v)
'''A recursive function that find finds and prints strongly connected
components using DFS traversal
u --> The vertex to be visited next
disc[] --> Stores discovery times of visited vertices
low[] -- >> earliest visited vertex (the vertex with minimum
discovery time) that can be reached from subtree
rooted with current vertex
st -- >> To store all the connected ancestors (could be part
of SCC)
stackMember[] --> bit/index array for faster check whether
a node is in stack
'''
def SCCUtil(self,u, low, disc, stackMember, st):
print("-----------------u:", u)
# Initialize discovery time and low value
disc[u] = self.Time
low[u] = self.Time
self.Time += 1
stackMember[u] = True
st.append(u)
# Go through all vertices adjacent to this
for v in self.graph[u]:
print("---------v:", v)
# If v is not visited yet, then recur for it
if disc[v] == -1 :
self.SCCUtil(v, low, disc, stackMember, st)
# Check if the subtree rooted with v has a connection to
# one of the ancestors of u
# Case 1 (per above discussion on Disc and Low value)
low[u] = min(low[u], low[v])
# elif stackMember[v] == True:
# '''Update low value of 'u' only if 'v' is still in stack
# (i.e. it's a back edge, not cross edge).
# Case 2 (per above discussion on Disc and Low value) '''
# low[u] = min(low[u], disc[v])
low[u] = min(low[u], disc[v])
print("last check")
# head node found, pop the stack and print an SCC
w = -1 #To store stack extracted vertices
print("---------u:", u, "low[u]:", low[u], "disc[u]:", disc[u])
if low[u] == disc[u]:
while w != u:
w = st.pop()
print(w),
stackMember[w] = False
print("")
#The function to do DFS traversal.
# It uses recursive SCCUtil()
def SCC(self):
# Mark all the vertices as not visited
# and Initialize parent and visited,
# and ap(articulation point) arrays
disc = [-1] * (self.V)
low = [-1] * (self.V)
stackMember = [False] * (self.V)
st =[]
# Call the recursive helper function
# to find articulation points
# in DFS tree rooted with vertex 'i'
for i in range(self.V):
if disc[i] == -1:
self.SCCUtil(i, low, disc, stackMember, st)
# Create a graph given in the above diagram
g1 = Graph(5)
g1.addEdge(1, 0)
g1.addEdge(0, 2)
g1.addEdge(2, 1)
g1.addEdge(0, 3)
g1.addEdge(3, 4)
print("SSC in first graph ")
g1.SCC()
# -------------------------------------- Excercises -------------------------------------------------
# -------------------------------------- Excercises -------------------------------------------------
# -------------------------------------- Excercises -------------------------------------------------
# -------------------------------------- Excercises -------------------------------------------------
| 29.904867
| 125
| 0.531331
|
847085a4d2899f92c85e606bf771352ac117620b
| 372
|
py
|
Python
|
tests/app1/management/commands/dummy.py
|
tkhyn/django-mcmo
|
cef44217ef0dcb16ef9ffb0f6492a0be050d7668
|
[
"MIT"
] | null | null | null |
tests/app1/management/commands/dummy.py
|
tkhyn/django-mcmo
|
cef44217ef0dcb16ef9ffb0f6492a0be050d7668
|
[
"MIT"
] | null | null | null |
tests/app1/management/commands/dummy.py
|
tkhyn/django-mcmo
|
cef44217ef0dcb16ef9ffb0f6492a0be050d7668
|
[
"MIT"
] | null | null | null |
from optparse import make_option
from tests.app0.management.commands.dummy import Command as Command0
class Command(Command0):
option_list = (make_option('--dummy-one', action='store_true',
help="App1 dummy command option"),)
def handle_noargs(self, **options):
super(Command, self).handle_noargs(**options)
| 28.615385
| 69
| 0.653226
|
0d9a5b443618ba31614269bd6001e88c0a02ddf9
| 52,597
|
py
|
Python
|
alloy.py
|
DensoITLab/ispc
|
d4a8afd6e8fe04969a26f69bca7e0c4d6f3ecd99
|
[
"BSD-3-Clause"
] | 1
|
2017-02-18T12:59:45.000Z
|
2017-02-18T12:59:45.000Z
|
alloy.py
|
DensoITLab/ispc
|
d4a8afd6e8fe04969a26f69bca7e0c4d6f3ecd99
|
[
"BSD-3-Clause"
] | null | null | null |
alloy.py
|
DensoITLab/ispc
|
d4a8afd6e8fe04969a26f69bca7e0c4d6f3ecd99
|
[
"BSD-3-Clause"
] | 1
|
2019-10-02T02:38:58.000Z
|
2019-10-02T02:38:58.000Z
|
#!/usr/bin/python
#
# Copyright (c) 2013-2016, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# // Author: Filippov Ilia
def tail_and_save(file_in, file_out, tail = 100):
with open(file_in, 'r') as f_in:
lines = f_in.readlines()[-tail:]
with open(file_out, 'w') as f_out:
f_out.writelines(lines)
def attach_mail_file(msg, filename, name, tail = -1):
if os.path.exists(filename):
if tail > 0:
tail_and_save(filename, filename + '.tail', tail)
fp = open(filename + '.tail', "rb")
else:
fp = open(filename, "rb")
to_attach = MIMEBase("application", "octet-stream")
to_attach.set_payload(fp.read())
encode_base64(to_attach)
to_attach.add_header("Content-Disposition", "attachment", filename=name)
fp.close()
msg.attach(to_attach)
def setting_paths(llvm, ispc, sde):
if llvm != "":
os.environ["LLVM_HOME"]=llvm
if ispc != "":
os.environ["ISPC_HOME"]=ispc
if sde != "":
os.environ["SDE_HOME"]=sde
def get_sde():
sde_exe = ""
PATH_dir = string.split(os.getenv("PATH"), os.pathsep)
if current_OS == "Windows":
sde_n = "sde.exe"
else:
sde_n = "sde"
for counter in PATH_dir:
if os.path.exists(counter + os.sep + sde_n) and sde_exe == "":
sde_exe = counter + os.sep + sde_n
if os.environ.get("SDE_HOME") != None:
if os.path.exists(os.environ.get("SDE_HOME") + os.sep + sde_n):
sde_exe = os.environ.get("SDE_HOME") + os.sep + sde_n
return sde_exe
def check_LLVM(which_LLVM):
answer = []
if which_LLVM[0] == " ":
return answer
p = os.environ["LLVM_HOME"]
for i in range(0,len(which_LLVM)):
if not os.path.exists(p + os.sep + "bin-" + which_LLVM[i] + os.sep + "bin"):
answer.append(which_LLVM[i])
return answer
def try_do_LLVM(text, command, from_validation):
if from_validation == True:
text = text + "\n"
print_debug("Trying to " + text, from_validation, alloy_build)
postfix = ""
if current_OS == "Windows":
postfix = " 1>> " + alloy_build + " 2>&1"
else:
postfix = " >> " + alloy_build + " 2>> " + alloy_build
if os.system(command + postfix) != 0:
print_debug("ERROR.\n", from_validation, alloy_build)
if options.notify != "":
msg = MIMEMultipart()
attach_mail_file(msg, stability_log, "stability.log")
send_mail("ERROR: Non-zero exit status while executing " + command + ". Examine build log for more information.", msg)
error("can't " + text, 1)
print_debug("DONE.\n", from_validation, alloy_build)
def build_LLVM(version_LLVM, revision, folder, tarball, debug, selfbuild, extra, from_validation, force, make, gcc_toolchain_path):
print_debug("Building LLVM. Version: " + version_LLVM + ". ", from_validation, alloy_build)
if revision != "":
print_debug("Revision: " + revision + ".\n", from_validation, alloy_build)
else:
print_debug("\n", from_validation, alloy_build)
# Here we understand what and where do we want to build
current_path = os.getcwd()
llvm_home = os.environ["LLVM_HOME"]
make_sure_dir_exists(llvm_home)
os.chdir(llvm_home)
FOLDER_NAME=version_LLVM
if version_LLVM == "trunk":
SVN_PATH="trunk"
if version_LLVM == "3.9":
SVN_PATH="branches/release_39"
version_LLVM = "3_9"
if version_LLVM == "3.8":
SVN_PATH="tags/RELEASE_381/final"
version_LLVM = "3_8"
if version_LLVM == "3.7":
SVN_PATH="tags/RELEASE_370/final"
version_LLVM = "3_7"
if version_LLVM == "3.6":
SVN_PATH="tags/RELEASE_362/final"
version_LLVM = "3_6"
if version_LLVM == "3.5":
SVN_PATH="tags/RELEASE_351/final"
version_LLVM = "3_5"
if version_LLVM == "3.4":
SVN_PATH="tags/RELEASE_34/dot2-final"
version_LLVM = "3_4"
if version_LLVM == "3.3":
SVN_PATH="tags/RELEASE_33/final"
version_LLVM = "3_3"
if version_LLVM == "3.2":
SVN_PATH="tags/RELEASE_32/final"
version_LLVM = "3_2"
if revision != "":
FOLDER_NAME = FOLDER_NAME + "_" + revision
revision = "-" + revision
if folder == "":
folder = FOLDER_NAME
if debug == True:
folder = folder + "dbg"
LLVM_SRC="llvm-" + folder
LLVM_BUILD="build-" + folder
LLVM_BIN="bin-" + folder
if os.path.exists(LLVM_BIN + os.sep + "bin") and not force:
error("you have folder " + LLVM_BIN + ".\nIf you want to rebuild use --force", 1)
LLVM_BUILD_selfbuild = LLVM_BUILD + "_temp"
LLVM_BIN_selfbuild = LLVM_BIN + "_temp"
common.remove_if_exists(LLVM_SRC)
common.remove_if_exists(LLVM_BUILD)
common.remove_if_exists(LLVM_BIN)
# Starting with MacOS 10.9 Maverics, we depend on XCode being installed, as it contains C and C++ library headers.
# sysroot trick below helps finding C headers. For C++ we just check out libc++ sources.
mac_system_root = ""
if current_OS == "MacOS" and int(current_OS_version.split(".")[0]) >= 13:
search_path = string.split(os.environ["PATH"], os.pathsep)
found_xcrun = False
for path in search_path:
if os.path.exists(os.path.join(path, "xcrun")):
found_xcrun = True
if found_xcrun:
mac_system_root = " --with-default-sysroot=`xcrun --show-sdk-path`"
else:
error("Can't find XCode (xcrun tool) - it's required on MacOS 10.9 and newer", 1)
if selfbuild:
common.remove_if_exists(LLVM_BUILD_selfbuild)
common.remove_if_exists(LLVM_BIN_selfbuild)
print_debug("Using folders: " + LLVM_SRC + " " + LLVM_BUILD + " " + LLVM_BIN + " in " +
llvm_home + "\n", from_validation, alloy_build)
# load llvm
if tarball == "":
try_do_LLVM("load LLVM from http://llvm.org/svn/llvm-project/llvm/" + SVN_PATH + " ",
"svn co " + revision + " http://llvm.org/svn/llvm-project/llvm/" + SVN_PATH + " " + LLVM_SRC,
from_validation)
os.chdir(LLVM_SRC + "/tools")
try_do_LLVM("load clang from http://llvm.org/svn/llvm-project/cfe/" + SVN_PATH + " ",
"svn co " + revision + " http://llvm.org/svn/llvm-project/cfe/" + SVN_PATH + " clang",
from_validation)
os.chdir("..")
if current_OS == "MacOS" and int(current_OS_version.split(".")[0]) >= 13:
# Starting with MacOS 10.9 Maverics, the system doesn't contain headers for standard C++ library and
# the default library is libc++, bit libstdc++. The headers are part of XCode now. But we are checking out
# headers as part of LLVM source tree, so they will be installed in clang location and clang will be able
# to find them. Though they may not match to the library installed in the system, but seems that this should
# not happen.
# Note, that we can also build a libc++ library, but it must be on system default location or should be passed
# to the linker explicitly (either through command line or environment variables). So we are not doing it
# currently to make the build process easier.
os.chdir("projects")
try_do_LLVM("load libcxx http://llvm.org/svn/llvm-project/libcxx/" + SVN_PATH + " ",
"svn co " + revision + " http://llvm.org/svn/llvm-project/libcxx/" + SVN_PATH + " libcxx",
from_validation)
os.chdir("..")
if extra == True:
os.chdir("tools/clang/tools")
try_do_LLVM("load extra clang extra tools ",
"svn co " + revision + " http://llvm.org/svn/llvm-project/clang-tools-extra/" + SVN_PATH + " extra",
from_validation)
os.chdir("../../../projects")
try_do_LLVM("load extra clang compiler-rt ",
"svn co " + revision + " http://llvm.org/svn/llvm-project/compiler-rt/" + SVN_PATH + " compiler-rt",
from_validation)
os.chdir("..")
else:
tar = tarball.split(" ")
os.makedirs(LLVM_SRC)
os.chdir(LLVM_SRC)
try_do_LLVM("untar LLVM from " + tar[0] + " ",
"tar -xvzf " + tar[0] + " --strip-components 1", from_validation)
os.chdir("./tools")
os.makedirs("clang")
os.chdir("./clang")
try_do_LLVM("untar clang from " + tar[1] + " ",
"tar -xvzf " + tar[1] + " --strip-components 1", from_validation)
os.chdir("../../")
# paching llvm
patches = glob.glob(os.environ["ISPC_HOME"] + os.sep + "llvm_patches" + os.sep + "*.*")
for patch in patches:
if version_LLVM in os.path.basename(patch):
if current_OS != "Windows":
try_do_LLVM("patch LLVM with patch " + patch + " ", "patch -p0 < " + patch, from_validation)
else:
try_do_LLVM("patch LLVM with patch " + patch + " ", "C:\\gnuwin32\\bin\\patch.exe -p0 < " + patch, from_validation)
os.chdir("../")
# configuring llvm, build first part of selfbuild
os.makedirs(LLVM_BUILD)
os.makedirs(LLVM_BIN)
selfbuild_compiler = ""
LLVM_configure_capable = ["3_2", "3_3", "3_4", "3_5", "3_6", "3_7"]
if selfbuild:
print_debug("Making selfbuild and use folders " + LLVM_BUILD_selfbuild + " and " +
LLVM_BIN_selfbuild + "\n", from_validation, alloy_build)
os.makedirs(LLVM_BUILD_selfbuild)
os.makedirs(LLVM_BIN_selfbuild)
os.chdir(LLVM_BUILD_selfbuild)
if version_LLVM not in LLVM_configure_capable:
# TODO: mac_root
try_do_LLVM("configure release version for selfbuild ",
"cmake -G Unix\ Makefiles" + " -DCMAKE_EXPORT_COMPILE_COMMANDS=ON" +
" -DCMAKE_INSTALL_PREFIX=" + llvm_home + "/" + LLVM_BIN_selfbuild +
" -DCMAKE_BUILD_TYPE=Release" +
" -DLLVM_ENABLE_ASSERTIONS=ON" +
((" -DGCC_INSTALL_PREFIX=" + gcc_toolchain_path) if gcc_toolchain_path != "" else "") +
((" -DCMAKE_C_COMPILER=" + gcc_toolchain_path+"/bin/gcc") if gcc_toolchain_path != "" else "") +
((" -DCMAKE_CXX_COMPILER=" + gcc_toolchain_path+"/bin/g++") if gcc_toolchain_path != "" else "") +
" -DLLVM_TARGETS_TO_BUILD=NVPTX\;X86" +
" ../" + LLVM_SRC,
from_validation)
selfbuild_compiler = (" -DCMAKE_C_COMPILER=" +llvm_home+ "/" + LLVM_BIN_selfbuild + "/bin/clang " +
" -DCMAKE_CXX_COMPILER="+llvm_home+ "/" + LLVM_BIN_selfbuild + "/bin/clang++ ")
else:
try_do_LLVM("configure release version for selfbuild ",
"../" + LLVM_SRC + "/configure --prefix=" + llvm_home + "/" +
LLVM_BIN_selfbuild + " --enable-optimized" +
" --enable-targets=x86,x86_64,nvptx" +
((" --with-gcc-toolchain=" + gcc_toolchain_path) if gcc_toolchain_path != "" else "") +
mac_system_root,
from_validation)
selfbuild_compiler = ("CC=" +llvm_home+ "/" + LLVM_BIN_selfbuild + "/bin/clang " +
"CXX="+llvm_home+ "/" + LLVM_BIN_selfbuild + "/bin/clang++ ")
try_do_LLVM("build release version for selfbuild ",
make, from_validation)
try_do_LLVM("install release version for selfbuild ",
"make install",
from_validation)
os.chdir("../")
print_debug("Now we have compiler for selfbuild: " + selfbuild_compiler + "\n", from_validation, alloy_build)
os.chdir(LLVM_BUILD)
if debug == False:
if current_OS != "Windows":
if version_LLVM not in LLVM_configure_capable:
# TODO: mac_root
try_do_LLVM("configure release version ",
"cmake -G Unix\ Makefiles" + " -DCMAKE_EXPORT_COMPILE_COMMANDS=ON" +
selfbuild_compiler +
" -DCMAKE_INSTALL_PREFIX=" + llvm_home + "/" + LLVM_BIN +
" -DCMAKE_BUILD_TYPE=Release" +
" -DLLVM_ENABLE_ASSERTIONS=ON" +
((" -DGCC_INSTALL_PREFIX=" + gcc_toolchain_path) if gcc_toolchain_path != "" else "") +
((" -DCMAKE_C_COMPILER=" + gcc_toolchain_path+"/bin/gcc") if gcc_toolchain_path != "" and selfbuild_compiler == "" else "") +
((" -DCMAKE_CXX_COMPILER=" + gcc_toolchain_path+"/bin/g++") if gcc_toolchain_path != "" and selfbuild_compiler == "" else "") +
" -DLLVM_TARGETS_TO_BUILD=NVPTX\;X86" +
" ../" + LLVM_SRC,
from_validation)
else:
try_do_LLVM("configure release version ",
selfbuild_compiler + "../" + LLVM_SRC + "/configure --prefix=" + llvm_home + "/" +
LLVM_BIN + " --enable-optimized" +
" --enable-targets=x86,x86_64,nvptx" +
((" --with-gcc-toolchain=" + gcc_toolchain_path) if gcc_toolchain_path != "" else "") +
mac_system_root,
from_validation)
else:
try_do_LLVM("configure release version ",
'cmake -G "Visual Studio 12" -DCMAKE_INSTALL_PREFIX="..\\'+ LLVM_BIN +
'" -DLLVM_LIT_TOOLS_DIR="C:\\gnuwin32\\bin" ..\\' + LLVM_SRC,
from_validation)
else:
if version_LLVM not in LLVM_configure_capable:
# TODO: mac_root
try_do_LLVM("configure debug version ",
"cmake -G Unix\ Makefiles" + " -DCMAKE_EXPORT_COMPILE_COMMANDS=ON" +
selfbuild_compiler +
" -DCMAKE_INSTALL_PREFIX=" + llvm_home + "/" + LLVM_BIN +
" -DCMAKE_BUILD_TYPE=Debug" +
" -DLLVM_ENABLE_ASSERTIONS=ON" +
((" -DGCC_INSTALL_PREFIX=" + gcc_toolchain_path) if gcc_toolchain_path != "" else "") +
((" -DCMAKE_C_COMPILER=" + gcc_toolchain_path+"/bin/gcc") if gcc_toolchain_path != "" and selfbuild_compiler == "" else "") +
((" -DCMAKE_CXX_COMPILER=" + gcc_toolchain_path+"/bin/g++") if gcc_toolchain_path != "" and selfbuild_compiler == "" else "") +
" -DLLVM_TARGETS_TO_BUILD=NVPTX\;X86" +
" ../" + LLVM_SRC,
from_validation)
else:
try_do_LLVM("configure debug version ",
selfbuild_compiler + "../" + LLVM_SRC + "/configure --prefix=" + llvm_home + "/" + LLVM_BIN +
" --enable-debug-runtime --enable-debug-symbols --enable-keep-symbols" +
" --enable-targets=x86,x86_64,nvptx" +
((" --with-gcc-toolchain=" + gcc_toolchain_path) if gcc_toolchain_path != "" else "") +
mac_system_root,
from_validation)
# building llvm
if current_OS != "Windows":
try_do_LLVM("build LLVM ", make, from_validation)
try_do_LLVM("install LLVM ", "make install", from_validation)
else:
try_do_LLVM("build LLVM and than install LLVM ", "msbuild INSTALL.vcxproj /V:m /p:Platform=Win32 /p:Configuration=Release /t:rebuild", from_validation)
os.chdir(current_path)
def unsupported_llvm_targets(LLVM_VERSION):
prohibited_list = {"3.2":["avx512knl-i32x16", "avx512skx-i32x16"],
"3.3":["avx512knl-i32x16", "avx512skx-i32x16"],
"3.4":["avx512knl-i32x16", "avx512skx-i32x16"],
"3.5":["avx512knl-i32x16", "avx512skx-i32x16"],
"3.6":["avx512knl-i32x16", "avx512skx-i32x16"],
"3.7":["avx512skx-i32x16"],
"3.8":[],
"3.9":[],
"4.0":[],
"trunk":[]}
return prohibited_list[LLVM_VERSION]
def check_targets():
answer = []
answer_generic = []
answer_knc = []
answer_sde = []
# check what native targets do we have
if current_OS != "Windows":
if options.ispc_build_compiler == "clang":
cisa_compiler = "clang"
elif options.ispc_build_compiler == "gcc":
cisa_compiler = "g++"
try_do_LLVM("build check_ISA", cisa_compiler + " check_isa.cpp -o check_isa.exe", True)
else:
try_do_LLVM("build check_ISA", "cl check_isa.cpp", True)
SSE2 = ["sse2-i32x4", "sse2-i32x8"]
SSE4 = ["sse4-i32x4", "sse4-i32x8", "sse4-i16x8", "sse4-i8x16"]
AVX = ["avx1-i32x4", "avx1-i32x8", "avx1-i32x16", "avx1-i64x4"]
AVX11 = ["avx1.1-i32x8","avx1.1-i32x16","avx1.1-i64x4"]
AVX2 = ["avx2-i32x8", "avx2-i32x16", "avx2-i64x4"]
KNL = ["knl-generic", "avx512knl-i32x16"]
SKX = ["avx512skx-i32x16"]
targets = [["AVX2", AVX2, False], ["AVX1.1", AVX11, False], ["AVX", AVX, False], ["SSE4", SSE4, False],
["SSE2", SSE2, False], ["KNL", KNL, False], ["SKX", SKX, False]]
f_lines = take_lines("check_isa.exe", "first")
for i in range(0,5):
if targets[i][0] in f_lines:
for j in range(i,5):
answer = targets[j][1] + answer
targets[j][2] = True
break
# generate targets for KNC
if current_OS == "Linux":
answer_knc = ["knc-generic"]
if current_OS != "Windows":
answer_generic = ["generic-4", "generic-16", "generic-8", "generic-1", "generic-32", "generic-64"]
# now check what targets we have with the help of SDE
sde_exists = get_sde()
if sde_exists == "":
error("you haven't got sde neither in SDE_HOME nor in your PATH.\n" +
"To test all platforms please set SDE_HOME to path containing SDE.\n" +
"Please refer to http://www.intel.com/software/sde for SDE download information.", 2)
return [answer, answer_generic, answer_sde, answer_knc]
# here we have SDE
f_lines = take_lines(sde_exists + " -help", "all")
for i in range(0,len(f_lines)):
if targets[6][2] == False and "skx" in f_lines[i]:
answer_sde = answer_sde + [["-skx", "avx512skx-i32x16"]]
if targets[5][2] == False and "knl" in f_lines[i]:
answer_sde = answer_sde + [["-knl", "knl-generic"], ["-knl", "avx512knl-i32x16"]]
if targets[3][2] == False and "wsm" in f_lines[i]:
answer_sde = answer_sde + [["-wsm", "sse4-i32x4"], ["-wsm", "sse4-i32x8"], ["-wsm", "sse4-i16x8"], ["-wsm", "sse4-i8x16"]]
if targets[2][2] == False and "snb" in f_lines[i]:
answer_sde = answer_sde + [["-snb", "avx1-i32x4"], ["-snb", "avx1-i32x8"], ["-snb", "avx1-i32x16"], ["-snb", "avx1-i64x4"]]
if targets[1][2] == False and "ivb" in f_lines[i]:
answer_sde = answer_sde + [["-ivb", "avx1.1-i32x8"], ["-ivb", "avx1.1-i32x16"], ["-ivb", "avx1.1-i64x4"]]
if targets[0][2] == False and "hsw" in f_lines[i]:
answer_sde = answer_sde + [["-hsw", "avx2-i32x8"], ["-hsw", "avx2-i32x16"], ["-hsw", "avx2-i64x4"]]
return [answer, answer_generic, answer_sde, answer_knc]
def build_ispc(version_LLVM, make):
current_path = os.getcwd()
os.chdir(os.environ["ISPC_HOME"])
make_ispc = "make " + options.ispc_build_compiler + " -j" + options.speed
if current_OS != "Windows":
p_temp = os.getenv("PATH")
os.environ["PATH"] = os.environ["LLVM_HOME"] + "/bin-" + version_LLVM + "/bin:" + os.environ["PATH"]
try_do_LLVM("clean ISPC for building", "make clean", True)
folder = os.environ["LLVM_HOME"] + os.sep + "llvm-"
if options.folder == "":
folder += version_LLVM
if options.debug == True:
folder += "dbg"
llvm_rev = ""
# determine LLVM revision
p = subprocess.Popen("svn info " + folder, shell=True, \
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(info_llvm, err) = p.communicate()
info_llvm = re.split('\n', info_llvm)
for i in info_llvm:
if len(i) > 0 and i.startswith("Last Changed Rev: "):
llvm_rev = str(i[len("Last Changed Rev: "):])
if llvm_rev != "":
common.ex_state.switch_revision(llvm_rev)
print_debug("\nBuilding ISPC with LLVM %s (%s):\n" \
% (version_LLVM, llvm_rev), False, stability_log)
else:
print_debug("Unable to retrieve LLVM revision\n", False, stability_log)
raise
try_do_LLVM("recognize LLVM revision", "svn info " + folder, True)
try_do_LLVM("build ISPC with LLVM version " + version_LLVM + " ", make_ispc, True)
os.environ["PATH"] = p_temp
else:
p_temp = os.getenv("LLVM_INSTALL_DIR")
v_temp = os.getenv("LLVM_VERSION")
os.environ["LLVM_INSTALL_DIR"] = os.environ["LLVM_HOME"] + "\\bin-" + version_LLVM
if version_LLVM == "3.2":
temp = "3_2"
if version_LLVM == "3.3":
temp = "3_3"
if version_LLVM == "3.4":
temp = "3_4"
if version_LLVM == "3.5":
temp = "3_5"
if version_LLVM == "3.6":
temp = "3_6"
if version_LLVM == "3.7":
temp = "3_7"
if version_LLVM == "3.8":
temp = "3_8"
if version_LLVM == "3.9":
temp = "3_9"
if version_LLVM == "trunk":
temp = "4_0"
os.environ["LLVM_VERSION"] = "LLVM_" + temp
try_do_LLVM("clean ISPC for building", "msbuild ispc.vcxproj /t:clean", True)
try_do_LLVM("build ISPC with LLVM version " + version_LLVM + " ", "msbuild ispc.vcxproj /V:m /p:Platform=Win32 /p:Configuration=Release /t:rebuild", True)
os.environ["LLVM_INSTALL_DIR"] = p_temp
os.environ["LLVM_VERSION"] = v_temp
os.chdir(current_path)
def execute_stability(stability, R, print_version):
stability1 = copy.deepcopy(stability)
b_temp = run_tests.run_tests(stability1, [], print_version)
temp = b_temp[0]
time = b_temp[1]
for j in range(0,4):
R[j][0] = R[j][0] + temp[j] # new_runfails, new_compfails, new_passes_runfails, new_passes_compfails
for i in range(0,len(temp[j])):
R[j][1].append(temp[4])
number_of_fails = temp[5]
number_of_new_fails = len(temp[0]) + len(temp[1])
number_of_passes = len(temp[2]) + len(temp[3])
if number_of_fails == 0:
str_fails = ". No fails"
else:
str_fails = ". Fails: " + str(number_of_fails)
if number_of_new_fails == 0:
str_new_fails = ", No new fails"
else:
str_new_fails = ", New fails: " + str(number_of_new_fails)
if number_of_passes == 0:
str_new_passes = "."
else:
str_new_passes = ", " + str(number_of_passes) + " new passes."
if stability.time:
str_time = " " + time + "\n"
else:
str_time = "\n"
print_debug(temp[4][1:-3] + stability1.ispc_flags + str_fails + str_new_fails + str_new_passes + str_time, False, stability_log)
'''
R = [[new_runfails, [new_line, new_line...]],
[new_compfails, [new_line, new_line...]],
[new_passes_runfails, [new_line, new_line...]],
[new_passes_runfails, [new_line, new_line...]]]
'''
def output_test_results(R):
ttt = ["NEW RUNFAILS: ", "NEW COMPFAILS: ", "NEW PASSES RUNFAILS: ", "NEW PASSES COMPFAILS: "]
for j in range(0, 4):
if len(R[j][0]) == 0:
print_debug("NO " + ttt[j][:-2] + "\n", False, stability_log)
else:
print_debug(ttt[j] + str(len(R[j][0])) + "\n", False, stability_log)
to_print = {}
for (fail_name, opt_str) in zip(R[j][0], R[j][1]):
if fail_name not in to_print:
to_print[fail_name] = []
to_print[fail_name].append(opt_str)
# sort
for key in to_print.keys():
to_print[key] = sorted(to_print[key])
# print out
for fail_name in sorted(to_print.keys()):
print_debug("\t" + fail_name + "\n", True, stability_log)
for opt_str in to_print[fail_name]:
print_debug("\t\t\t" + opt_str, True, stability_log)
def concatenate_test_results(R1, R2):
R = [[[],[]],[[],[]],[[],[]],[[],[]]]
for j in range(0, 4):
R[j][0] = R1[j][0] + R2[j][0]
R[j][1] = R1[j][1] + R2[j][1]
return R
def validation_run(only, only_targets, reference_branch, number, notify, update, speed_number, make, perf_llvm, time):
os.chdir(os.environ["ISPC_HOME"])
if current_OS != "Windows":
os.environ["PATH"] = os.environ["ISPC_HOME"] + ":" + os.environ["PATH"]
if options.notify != "":
common.remove_if_exists(os.environ["ISPC_HOME"] + os.sep + "notify_log.log")
msg = MIMEMultipart()
print_debug("Command: " + ' '.join(sys.argv) + "\n", False, "")
print_debug("Folder: " + os.environ["ISPC_HOME"] + "\n", False, "")
date = datetime.datetime.now()
print_debug("Date: " + date.strftime('%H:%M %d/%m/%Y') + "\n", False, "")
newest_LLVM="3.6"
msg_additional_info = ""
# *** *** ***
# Stability validation run
# *** *** ***
if ((("stability" in only) == True) or ("performance" in only) == False):
print_debug("\n\nStability validation run\n\n", False, "")
stability = common.EmptyClass()
# stability constant options
stability.save_bin = False
stability.random = False
stability.ispc_flags = ""
stability.compiler_exe = None
stability.num_jobs = speed_number
stability.verbose = False
stability.time = time
stability.non_interactive = True
stability.update = update
stability.include_file = None
stability.silent = True
stability.in_file = "." + os.sep + f_date + os.sep + "run_tests_log.log"
stability.verify = False
# stability varying options
stability.target = ""
stability.arch = ""
stability.no_opt = False
stability.wrapexe = ""
# prepare parameters of run
[targets_t, targets_generic_t, sde_targets_t, targets_knc_t] = check_targets()
rebuild = True
opts = []
archs = []
LLVM = []
targets = []
sde_targets = []
dbg_begin = 0
dbg_total = 1
# parsing option only, update parameters of run
if "-O2" in only:
opts.append(False)
if "-O0" in only:
opts.append(True)
if "debug" in only:
if not ("nodebug" in only):
dbg_begin = 1
dbg_total = 2
if "x86" in only and not ("x86-64" in only):
archs.append("x86")
if "x86-64" in only:
archs.append("x86-64")
if "native" in only:
sde_targets_t = []
for i in ["3.2", "3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "trunk"]:
if i in only:
LLVM.append(i)
if "current" in only:
LLVM = [" "]
rebuild = False
else:
common.check_tools(1)
if only_targets != "":
only_targets += " "
only_targets_t = only_targets.split(" ")
if "generic" in only_targets_t:
only_targets_t.append("generic-4")
only_targets_t.append("generic-16")
while "generic" in only_targets_t:
only_targets_t.remove("generic")
for i in only_targets_t:
if i == "":
continue
err = True
for j in range(0,len(targets_t)):
if i in targets_t[j]:
targets.append(targets_t[j])
err = False
for j in range(0,len(targets_generic_t)):
if i in targets_generic_t[j]:
targets.append(targets_generic_t[j])
err = False
for j in range(0,len(sde_targets_t)):
if i in sde_targets_t[j][1]:
sde_targets.append(sde_targets_t[j])
err = False
for j in range(0,len(targets_knc_t)):
if i in targets_knc_t[j]:
targets.append(targets_knc_t[j])
err = False
if err == True:
error("You haven't sde for target " + i, 1)
else:
targets = targets_t + targets_generic_t[:-4]
sde_targets = sde_targets_t
if "build" in only:
targets = []
sde_targets = []
only = only + " stability "
# finish parameters of run, prepare LLVM
if len(opts) == 0:
opts = [False]
if len(archs) == 0:
archs = ["x86", "x86-64"]
if len(LLVM) == 0:
LLVM = [newest_LLVM, "trunk"]
gen_archs = ["x86-64"]
need_LLVM = check_LLVM(LLVM)
for i in range(0,len(need_LLVM)):
build_LLVM(need_LLVM[i], "", "", "", False, False, False, True, False, make, options.gcc_toolchain_path)
# begin validation run for stabitily
common.remove_if_exists(stability.in_file)
R = [[[],[]],[[],[]],[[],[]],[[],[]]]
print_debug("\n" + common.get_host_name() + "\n", False, stability_log)
print_debug("\n_________________________STABILITY REPORT_________________________\n", False, stability_log)
ispc_flags_tmp = stability.ispc_flags
for i in range(0,len(LLVM)):
R_tmp = [[[],[]],[[],[]],[[],[]],[[],[]]]
print_version = 2
if rebuild:
build_ispc(LLVM[i], make)
for j in range(0,len(targets)):
stability.target = targets[j]
# the target might be not supported by the chosen llvm version
if (stability.target in unsupported_llvm_targets(LLVM[i])):
print_debug("Warning: target " + stability.target + " is not supported in LLVM " + LLVM[i] + "\n", False, stability_log)
continue
# *always* specify default values for global variables on each loop iteration
stability.wrapexe = ""
stability.compiler_exe = None
# choosing right compiler for a given target
# sometimes clang++ is not avaluable. if --ispc-build-compiler = gcc we will pass in g++ compiler
if options.ispc_build_compiler == "gcc":
stability.compiler_exe = "g++"
# but 'knc/knl' generic target is supported only by icpc, so set explicitly
if ("knc-generic" in stability.target) or ("knl-generic" in stability.target):
stability.compiler_exe = "icpc"
# now set archs for targets
if ("generic" in stability.target):
arch = gen_archs
else:
arch = archs
for i1 in range(0,len(arch)):
for i2 in range(0,len(opts)):
for i3 in range(dbg_begin,dbg_total):
stability.arch = arch[i1]
stability.no_opt = opts[i2]
stability.ispc_flags = ispc_flags_tmp
if (i3 != 0):
stability.ispc_flags += " -g"
try:
execute_stability(stability, R_tmp, print_version)
except:
print_debug("ERROR: Exception in execute_stability - maybe some test subprocess terminated before it should have\n", False, stability_log)
print_version = 0
for j in range(0,len(sde_targets)):
stability.target = sde_targets[j][1]
# the target might be not supported by the chosen llvm version
if (stability.target in unsupported_llvm_targets(LLVM[i])):
print_debug("Warning: target " + stability.target + " is not supported in LLVM " + LLVM[i] + "\n", False, stability_log)
continue
# *always* specify default values for global variables on each loop iteration
stability.wrapexe = ""
stability.compiler_exe = None
# choosing right compiler for a given target
# sometimes clang++ is not avaluable. if --ispc-build-compiler = gcc we will pass in g++ compiler
if options.ispc_build_compiler == "gcc":
stability.compiler_exe = "g++"
if ("knc-generic" in stability.target) or ("knl-generic" in stability.target):
stability.compiler_exe = "icpc"
stability.wrapexe = get_sde() + " " + sde_targets[j][0] + " -- "
if ("generic" in stability.target):
arch = gen_archs
else:
arch = archs
for i1 in range(0,len(arch)):
for i2 in range(0,len(opts)):
for i3 in range(dbg_begin,dbg_total):
stability.arch = arch[i1]
stability.no_opt = opts[i2]
stability.ispc_flags = ispc_flags_tmp
if (i3 != 0):
stability.ispc_flags += " -g"
execute_stability(stability, R_tmp, print_version)
print_version = 0
# Output testing results separate for each tested LLVM version
R = concatenate_test_results(R, R_tmp)
output_test_results(R_tmp)
print_debug("\n", False, stability_log)
print_debug("\n----------------------------------------\nTOTAL:\n", False, stability_log)
output_test_results(R)
print_debug("__________________Watch stability.log for details_________________\n", False, stability_log)
if options.notify != "":
# e-mail header for performance test:
msg_additional_info += "New runfails(%d) New compfails(%d) New passes runfails(%d) New passes compfails(%d)" \
% (len(R[0][0]), len(R[1][0]), len(R[2][0]), len(R[3][0]))
attach_mail_file(msg, stability.in_file, "run_tests_log.log", 100)
attach_mail_file(msg, stability_log, "stability.log")
# *** *** ***
# Performance validation run
# *** *** ***
if ((("performance" in only) == True) or ("stability" in only) == False):
print_debug("\n\nPerformance validation run\n\n", False, "")
common.check_tools(1)
performance = common.EmptyClass()
# performance constant options
performance.number = number
performance.config = "." + os.sep + "perf.ini"
performance.path = "." + os.sep
performance.silent = True
performance.output = ""
performance.compiler = ""
performance.ref = "ispc_ref"
if current_OS == "Windows":
performance.ref = "ispc_ref.exe"
performance.perf_target = ""
performance.in_file = "." + os.sep + f_date + os.sep + "performance.log"
# prepare newest LLVM
need_LLVM = check_LLVM([newest_LLVM])
if len(need_LLVM) != 0:
build_LLVM(need_LLVM[0], "", "", "", False, False, False, True, False, make, options.gcc_toolchain_path)
if perf_llvm == False:
# prepare reference point. build both test and reference compilers
try_do_LLVM("apply git", "git branch", True)
temp4 = take_lines("git branch", "all")
for line in temp4:
if "*" in line:
current_branch = line[2:-1]
stashing = True
sys.stdout.write("Please, don't interrupt script here! You can have not sync git status after interruption!\n")
if "No local changes" in take_lines("git stash", "first"):
stashing = False
#try_do_LLVM("stash current branch ", "git stash", True)
try_do_LLVM("checkout reference branch " + reference_branch + " ", "git checkout " + reference_branch, True)
sys.stdout.write(".\n")
build_ispc(newest_LLVM, make)
sys.stdout.write(".\n")
if current_OS != "Windows":
os.rename("ispc", "ispc_ref")
else:
common.remove_if_exists("Release\\ispc_ref.exe")
os.rename("Release\\ispc.exe", "Release\\ispc_ref.exe")
try_do_LLVM("checkout test branch " + current_branch + " ", "git checkout " + current_branch, True)
if stashing:
try_do_LLVM("return current branch ", "git stash pop", True)
sys.stdout.write("You can interrupt script now.\n")
build_ispc(newest_LLVM, make)
else:
# build compiler with two different LLVM versions
if len(check_LLVM([reference_branch])) != 0:
error("you haven't got llvm called " + reference_branch, 1)
build_ispc(newest_LLVM, make)
os.rename("ispc", "ispc_ref")
build_ispc(reference_branch, make)
# begin validation run for performance. output is inserted into perf()
perf.perf(performance, [])
if options.notify != "":
attach_mail_file(msg, performance.in_file, "performance.log")
attach_mail_file(msg, "." + os.sep + "logs" + os.sep + "perf_build.log", "perf_build.log")
# dumping gathered info to the file
common.ex_state.dump(alloy_folder + "test_table.dump", common.ex_state.tt)
# sending e-mail with results
if options.notify != "":
send_mail(msg_additional_info, msg)
def send_mail(body_header, msg):
try:
fp = open(os.environ["ISPC_HOME"] + os.sep + "notify_log.log", 'rb')
f_lines = fp.readlines()
fp.close()
except:
body_header += "\nUnable to open notify_log.log: " + str(sys.exc_info()) + "\n"
print_debug("Unable to open notify_log.log: " + str(sys.exc_info()) + "\n", False, stability_log)
body = "Hostname: " + common.get_host_name() + "\n\n"
if not sys.exc_info()[0] == None:
body += "ERROR: Exception(last) - " + str(sys.exc_info()) + '\n'
body += body_header + '\n'
for i in range(0, len(f_lines)):
body += f_lines[i][:-1]
body += ' \n'
attach_mail_file(msg, alloy_build, "alloy_build.log", 100) # build.log is always being sent
smtp_server = os.environ["SMTP_ISPC"]
msg['Subject'] = options.notify_subject
msg['From'] = "ISPC_test_system"
msg['To'] = options.notify
text = MIMEText(body, "", "KOI-8")
msg.attach(text)
s = smtplib.SMTP(smtp_server)
s.sendmail(options.notify, options.notify.split(" "), msg.as_string())
s.quit()
def Main():
global current_OS
global current_OS_version
current_OS_version = platform.release()
if (platform.system() == 'Windows' or 'CYGWIN_NT' in platform.system()) == True:
current_OS = "Windows"
else:
if (platform.system() == 'Darwin'):
current_OS = "MacOS"
else:
current_OS = "Linux"
if (options.build_llvm == False and options.validation_run == False):
parser.print_help()
exit(0)
# set appropriate makefile target
# gcc and g++ options are equal and added for ease of use
if options.ispc_build_compiler != "clang" and \
options.ispc_build_compiler != "gcc":
error("unknow option for --ispc-build-compiler: " + options.ispc_build_compiler, 1)
parser.print_help()
exit(0)
if options.notify != "":
# in case 'notify' option is used but build (in '-b' for example) failed we do not want to have trash in our message body
# NOTE! 'notify.log' must also be cleaned up at the beginning of every message sending function, i.e. in 'validation_run()'
common.remove_if_exists(os.environ["ISPC_HOME"] + os.sep + "notify_log.log")
setting_paths(options.llvm_home, options.ispc_home, options.sde_home)
if os.environ.get("LLVM_HOME") == None:
error("you have no LLVM_HOME", 1)
if os.environ.get("ISPC_HOME") == None:
error("you have no ISPC_HOME", 1)
if options.notify != "":
if os.environ.get("SMTP_ISPC") == None:
error("you have no SMTP_ISPC in your environment for option notify", 1)
if options.only != "":
test_only_r = " 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 trunk current build stability performance x86 x86-64 x86_64 -O0 -O2 native debug nodebug "
test_only = options.only.split(" ")
for iterator in test_only:
if not (" " + iterator + " " in test_only_r):
error("unknown option for only: " + iterator, 1)
if current_OS == "Windows":
if options.debug == True or options.selfbuild == True or options.tarball != "":
error("Debug, selfbuild and tarball options are unsupported on windows", 1)
global f_date
f_date = "logs"
common.remove_if_exists(f_date)
os.makedirs(f_date)
global alloy_folder
alloy_folder = os.getcwd() + os.sep + f_date + os.sep
global alloy_build
alloy_build = alloy_folder + "alloy_build.log"
global stability_log
stability_log = alloy_folder + "stability.log"
current_path = os.getcwd()
make = "make -j" + options.speed
if os.environ["ISPC_HOME"] != os.getcwd():
error("you ISPC_HOME and your current path are different! (" + os.environ["ISPC_HOME"] + " is not equal to " + os.getcwd() +
")\n", 2)
if options.perf_llvm == True:
if options.branch == "master":
options.branch = "trunk"
try:
start_time = time.time()
if options.build_llvm:
build_LLVM(options.version, options.revision, options.folder, options.tarball,
options.debug, options.selfbuild, options.extra, False, options.force, make, options.gcc_toolchain_path)
if options.validation_run:
validation_run(options.only, options.only_targets, options.branch,
options.number_for_performance, options.notify, options.update, int(options.speed),
make, options.perf_llvm, options.time)
elapsed_time = time.time() - start_time
if options.time:
print_debug("Elapsed time: " + time.strftime('%Hh%Mm%Ssec.', time.gmtime(elapsed_time)) + "\n", False, "")
finally:
os.chdir(current_path)
date_name = "alloy_results_" + datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
if os.path.exists(date_name):
error("It's forbidden to run alloy two times in a second, logs are in ./logs", 1)
os.rename(f_date, date_name)
print_debug("Logs are in " + date_name + "\n", False, "")
exit(0)
###Main###
from optparse import OptionParser
from optparse import OptionGroup
import sys
import os
import errno
import operator
import time
import glob
import string
import platform
import smtplib
import datetime
import copy
import multiprocessing
import subprocess
import re
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.mime.text import MIMEText
from email.Encoders import encode_base64
# our drivers
import run_tests
import perf
import common
error = common.error
take_lines = common.take_lines
print_debug = common.print_debug
make_sure_dir_exists = common.make_sure_dir_exists
if __name__ == '__main__':
# parsing options
class MyParser(OptionParser):
def format_epilog(self, formatter):
return self.epilog
examples = ("Examples:\n" +
"Load and build LLVM from trunk\n\talloy.py -b\n" +
"Load and build LLVM 3.3. Rewrite LLVM folders\n\talloy.py -b --version=3.3 --force\n" +
"Untar files llvm.tgz clang.tgz, build LLVM from them in folder bin-from_tar\n\talloy.py -b --tarball='llvm.tgz clang.tgz' --folder=from_tar\n" +
"Load LLVM from trunk, revision r172870. Build it. Do selfbuild\n\talloy.py -b --revision=r172870 --selfbuild\n" +
"Validation run with LLVM 3.3, trunk; x86, x86-64; -O2;\nall supported targets; performance\n\talloy.py -r\n" +
"Validation run with all avx targets and sse4-i8x16 without performance\n\talloy.py -r --only=stability --only-targets='avx sse4-i8x16'\n" +
"Validation run with avx2-i32x8, all sse4 and sse2 targets\nand all targets with i32x16\n\talloy.py -r --only-targets='avx2-i32x8 sse4 i32x16 sse2'\n" +
"Stability validation run with LLVM 3.2, 3.3; -O0; x86,\nupdate fail_db.txt with passes and fails\n\talloy.py -r --only='3.2 -O0 stability 3.3 x86' --update-errors=FP\n" +
"Try to build compiler with all LLVM\n\talloy.py -r --only=build\n" +
"Performance validation run with 10 runs of each test and comparing to branch 'old'\n\talloy.py -r --only=performance --compare-with=old --number=10\n" +
"Validation run. Update fail_db.txt with new fails, send results to my@my.com\n\talloy.py -r --update-errors=F --notify='my@my.com'\n" +
"Test KNC target (not tested when tested all supported targets, so should be set explicitly via --only-targets)\n\talloy.py -r --only='stability' --only-targets='knc-generic'\n" +
"Test KNL target (requires sde)\n\talloy.py -r --only='stability' --only-targets='knl-generic avx512knl-i32x16'\n")
num_threads="%s" % multiprocessing.cpu_count()
parser = MyParser(usage="Usage: alloy.py -r/-b [options]", epilog=examples)
parser.add_option('-b', '--build-llvm', dest='build_llvm',
help='ask to build LLVM', default=False, action="store_true")
parser.add_option('-r', '--run', dest='validation_run',
help='ask for validation run', default=False, action="store_true")
parser.add_option('-j', dest='speed',
help='set -j for make', default=num_threads)
parser.add_option('--ispc-build-compiler', dest='ispc_build_compiler',
help='set compiler to build ispc binary (clang/gcc)', default="clang")
# options for activity "build LLVM"
llvm_group = OptionGroup(parser, "Options for building LLVM",
"These options must be used with -b option.")
llvm_group.add_option('--version', dest='version',
help='version of llvm to build: 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 trunk. Default: trunk', default="trunk")
llvm_group.add_option('--with-gcc-toolchain', dest='gcc_toolchain_path',
help='GCC install dir to use when building clang. It is important to set when ' +
'you have alternative gcc installation. Note that otherwise gcc from standard ' +
'location will be used, not from your PATH', default="")
llvm_group.add_option('--revision', dest='revision',
help='revision of llvm to build in format r172870', default="")
llvm_group.add_option('--debug', dest='debug',
help='debug build of LLVM?', default=False, action="store_true")
llvm_group.add_option('--folder', dest='folder',
help='folder to build LLVM in', default="")
llvm_group.add_option('--tarball', dest='tarball',
help='"llvm_tarball clang_tarball"', default="")
llvm_group.add_option('--selfbuild', dest='selfbuild',
help='make selfbuild of LLVM and clang', default=False, action="store_true")
llvm_group.add_option('--force', dest='force',
help='rebuild LLVM', default=False, action='store_true')
llvm_group.add_option('--extra', dest='extra',
help='load extra clang tools', default=False, action='store_true')
parser.add_option_group(llvm_group)
# options for activity "validation run"
run_group = OptionGroup(parser, "Options for validation run",
"These options must be used with -r option.")
run_group.add_option('--compare-with', dest='branch',
help='set performance reference point. Dafault: master', default="master")
run_group.add_option('--number', dest='number_for_performance',
help='number of performance runs for each test. Default: 5', default=5)
run_group.add_option('--notify', dest='notify',
help='email to sent results to', default="")
run_group.add_option('--notify-subject', dest='notify_subject',
help='set the subject of the notification email, the default is ISPC test system results', default="ISPC test system results")
run_group.add_option('--update-errors', dest='update',
help='rewrite fail_db.txt file according to received results (F or FP)', default="")
run_group.add_option('--only-targets', dest='only_targets',
help='set list of targets to test. Possible values - all subnames of targets, plus "knc-generic" for "generic" ' +
'version of knc support, "knl-generic" or "avx512knl-i32x16" for "generic"/"native" knl support', default="")
run_group.add_option('--time', dest='time',
help='display time of testing', default=False, action='store_true')
run_group.add_option('--only', dest='only',
help='set types of tests. Possible values:\n' +
'-O0, -O2, x86, x86-64, stability (test only stability), performance (test only performance),\n' +
'build (only build with different LLVM), 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, trunk, native (do not use SDE),\n' +
'current (do not rebuild ISPC), debug (only with debug info), nodebug (only without debug info, default).',
default="")
run_group.add_option('--perf_LLVM', dest='perf_llvm',
help='compare LLVM 3.6 with "--compare-with", default trunk', default=False, action='store_true')
parser.add_option_group(run_group)
# options for activity "setup PATHS"
setup_group = OptionGroup(parser, "Options for setup",
"These options must be use with -r or -b to setup environment variables")
setup_group.add_option('--llvm_home', dest='llvm_home',help='path to LLVM',default="")
setup_group.add_option('--ispc_home', dest='ispc_home',help='path to ISPC',default="")
setup_group.add_option('--sde_home', dest='sde_home',help='path to SDE',default="")
parser.add_option_group(setup_group)
(options, args) = parser.parse_args()
Main()
| 48.79128
| 183
| 0.582638
|
815ab0e2b8a2df5d507e3c7aed8b839ff90a5369
| 1,350
|
py
|
Python
|
app/core/tests/test_admin.py
|
erobc88/recipe-app-api
|
595a3dd5f023d0ad29f5f1e4d5adad1cd6d3618e
|
[
"MIT"
] | null | null | null |
app/core/tests/test_admin.py
|
erobc88/recipe-app-api
|
595a3dd5f023d0ad29f5f1e4d5adad1cd6d3618e
|
[
"MIT"
] | null | null | null |
app/core/tests/test_admin.py
|
erobc88/recipe-app-api
|
595a3dd5f023d0ad29f5f1e4d5adad1cd6d3618e
|
[
"MIT"
] | null | null | null |
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='admin@qrlocus.com',
password='password123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='test@qrlocus.com',
password='password123',
name='Test user full name'
)
def test_users_listed(self):
"""Test that users are listed on user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
# /admin/core/user/1
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| 31.395349
| 68
| 0.637037
|
a7ba541714eda1fd2ac30bd62ad6234e9a4f6373
| 1,027
|
py
|
Python
|
shawk/Message.py
|
hawkins/Shawk
|
c5be1165d8f6c0471544d06f7da07156df1dcc10
|
[
"MIT"
] | 13
|
2016-05-05T11:16:31.000Z
|
2021-02-25T11:23:14.000Z
|
shawk/Message.py
|
hawkins/Shawk
|
c5be1165d8f6c0471544d06f7da07156df1dcc10
|
[
"MIT"
] | 18
|
2016-05-05T20:23:51.000Z
|
2020-12-25T16:34:05.000Z
|
shawk/Message.py
|
hawkins/Shawk
|
c5be1165d8f6c0471544d06f7da07156df1dcc10
|
[
"MIT"
] | 5
|
2017-07-25T23:50:43.000Z
|
2021-12-04T11:05:13.000Z
|
"""
shawk.Message
-------------
Define the Message representation in Shawk.
"""
class Message(object):
"""Define the structure for messages."""
def __init__(self, text, sender, date=None):
"""Initialize a Message."""
self.text = str(text).strip()
self.sender = sender
self.date = date
def __repr__(self):
"""Return the object representation of the Message."""
if self.date:
return "<shawk.Message('{}', '{}', '{}')>".format(self.text, self.sender, self.date)
else:
return "<shawk.Message('{}', '{}')>".format(self.text, self.sender)
def __str__(self):
"""Return the String representation of the Message."""
return "Message from {} at {}: \"{}\"".format(str(self.sender), self.date, self.text)
def __eq__(self, other):
"""Determine if one Message is equivalent to another."""
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
| 27.756757
| 96
| 0.582278
|
c0a17ee1377f938454f5acedd448736b70de37ee
| 154
|
py
|
Python
|
follows/admin.py
|
mohamed17717/Like-Reddit-Backend-Clone
|
d60d7a4625ee0f7354a21e53c26c7c52746d735f
|
[
"MIT"
] | 1
|
2022-01-10T12:00:59.000Z
|
2022-01-10T12:00:59.000Z
|
follows/admin.py
|
mohamed17717/Like-Reddit-Backend-Clone
|
d60d7a4625ee0f7354a21e53c26c7c52746d735f
|
[
"MIT"
] | null | null | null |
follows/admin.py
|
mohamed17717/Like-Reddit-Backend-Clone
|
d60d7a4625ee0f7354a21e53c26c7c52746d735f
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from follows.models import UserFollow, ThreadFollow
admin.site.register(UserFollow)
admin.site.register(ThreadFollow)
| 19.25
| 51
| 0.837662
|
bd3d61f8e96cfddade931c28bec50069dddd5389
| 3,567
|
py
|
Python
|
watson/config.py
|
efabens/Watson
|
5bcbef81ff1bbc32e8810899f8fb2af0bc62fc4b
|
[
"MIT"
] | 2,148
|
2015-03-17T18:16:07.000Z
|
2022-03-31T21:20:54.000Z
|
watson/config.py
|
efabens/Watson
|
5bcbef81ff1bbc32e8810899f8fb2af0bc62fc4b
|
[
"MIT"
] | 402
|
2015-03-18T22:24:42.000Z
|
2022-03-28T14:55:40.000Z
|
watson/config.py
|
efabens/Watson
|
5bcbef81ff1bbc32e8810899f8fb2af0bc62fc4b
|
[
"MIT"
] | 281
|
2015-09-17T14:07:43.000Z
|
2022-03-25T02:20:20.000Z
|
"""A convenience and compatibility wrapper for RawConfigParser."""
import shlex
from configparser import RawConfigParser
__all__ = ('ConfigParser',)
class ConfigParser(RawConfigParser):
"""A simple wrapper for RawConfigParser to make options access easier."""
def get(self, section, option, default=None, **kwargs):
"""
Return value of option in given configuration section as a string.
If option is not set, return default instead (defaults to None).
"""
return (RawConfigParser.get(self, section, option, **kwargs)
if self.has_option(section, option) else default)
def getint(self, section, option, default=None):
"""
Return value of option in given configuration section as an integer.
If option is not set, return default (defaults to None).
Raises ValueError if the value cannot be converted to an integer.
"""
val = self.get(section, option)
return default if val is None else int(val)
def getfloat(self, section, option, default=None):
"""
Return value of option in given configuration section as a float.
If option is not set, return default (defaults to None).
Raises ValueError if the value cannot be converted to a float.
"""
val = self.get(section, option)
return default if val is None else float(val)
def getboolean(self, section, option, default=False):
"""
Return value of option in given configuration section as a boolean.
A configuration option is considered true when it has one of the
following values: '1', 'on', 'true' or 'yes'. The comparison is
case-insensitive. All other values are considered false.
If option is not set or empty, return default (defaults to False).
"""
val = self.get(section, option)
return val.lower() in ('1', 'on', 'true', 'yes') if val else default
def getlist(self, section, option, default=None):
"""
Return value of option in given section as a list of strings.
If option is not set, return default (defaults to an empty list).
The option value is split into list tokens using one of two strategies:
* If the value contains any newlines, i.e. it was written in the
configuration file using continuation lines, the value is split at
newlines and empty items are discarded.
* Otherwise, the value is split according to unix shell parsing rules.
Items are separated by whitespace, but items can be enclosed in
single or double quotes to preserve spaces in them.
Example::
[test]
option2 =
one
two three
four
five six
option1 = one "two three" four 'five six'
"""
if not self.has_option(section, option):
return [] if default is None else default
value = self.get(section, option)
if '\n' in value:
return [item.strip()
for item in value.splitlines() if item.strip()]
else:
return shlex.split(value)
def set(self, section, option, value):
"""
Set option in given configuration section to value.
If section does not exist yet, it is added implicitly.
"""
if not self.has_section(section):
self.add_section(section)
RawConfigParser.set(self, section, option, value)
| 33.027778
| 79
| 0.622091
|
2c1f28e448f75b1e2594a333a691d9d78605c9d5
| 117
|
py
|
Python
|
python-modules/twisted/twisted/internet/test/__init__.py
|
stormtheh4ck3r/python-for-android
|
b9ea9161392f60566b81482b1e25cd77004d5c45
|
[
"Apache-2.0"
] | 267
|
2015-03-22T15:23:48.000Z
|
2022-03-05T21:57:34.000Z
|
python-modules/twisted/twisted/internet/test/__init__.py
|
stormtheh4ck3r/python-for-android
|
b9ea9161392f60566b81482b1e25cd77004d5c45
|
[
"Apache-2.0"
] | 133
|
2015-03-21T15:13:43.000Z
|
2021-12-11T23:37:58.000Z
|
python-modules/twisted/twisted/internet/test/__init__.py
|
stormtheh4ck3r/python-for-android
|
b9ea9161392f60566b81482b1e25cd77004d5c45
|
[
"Apache-2.0"
] | 119
|
2015-04-28T16:07:10.000Z
|
2022-03-18T03:49:48.000Z
|
# Copyright (c) 2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet}.
"""
| 16.714286
| 49
| 0.709402
|
4be366b3d5578126cc54f2384cff8df870cbb5a0
| 2,454
|
py
|
Python
|
script/hassfest/services.py
|
zachware/home-assistant
|
c70b8afbd315156fa3331a699a7d4ca8304056b8
|
[
"Apache-2.0"
] | null | null | null |
script/hassfest/services.py
|
zachware/home-assistant
|
c70b8afbd315156fa3331a699a7d4ca8304056b8
|
[
"Apache-2.0"
] | null | null | null |
script/hassfest/services.py
|
zachware/home-assistant
|
c70b8afbd315156fa3331a699a7d4ca8304056b8
|
[
"Apache-2.0"
] | null | null | null |
"""Validate dependencies."""
import pathlib
from typing import Dict
import re
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv
from homeassistant.util.yaml import load_yaml
from .model import Integration
def exists(value):
"""Check if value exists."""
if value is None:
raise vol.Invalid("Value cannot be None")
return value
FIELD_SCHEMA = vol.Schema({
vol.Required('description'): str,
vol.Optional('example'): exists,
vol.Optional('default'): exists,
vol.Optional('values'): exists,
vol.Optional('required'): bool,
})
SERVICE_SCHEMA = vol.Schema({
vol.Required('description'): str,
vol.Optional('fields'): vol.Schema({
str: FIELD_SCHEMA
})
})
SERVICES_SCHEMA = vol.Schema({
cv.slug: SERVICE_SCHEMA
})
def grep_dir(path: pathlib.Path, glob_pattern: str, search_pattern: str) \
-> bool:
"""Recursively go through a dir and it's children and find the regex."""
pattern = re.compile(search_pattern)
for fil in path.glob(glob_pattern):
if not fil.is_file():
continue
if pattern.search(fil.read_text()):
return True
return False
def validate_services(integration: Integration):
"""Validate services."""
# Find if integration uses services
has_services = grep_dir(integration.path, "**/*.py",
r"hass\.(services|async_register)")
if not has_services:
return
try:
data = load_yaml(str(integration.path / 'services.yaml'))
except FileNotFoundError:
integration.add_error(
'services', 'Registers services but has no services.yaml')
return
except HomeAssistantError:
integration.add_error(
'services', 'Registers services but unable to load services.yaml')
return
try:
SERVICES_SCHEMA(data)
except vol.Invalid as err:
integration.add_error(
'services',
"Invalid services.yaml: {}".format(humanize_error(data, err)))
def validate(integrations: Dict[str, Integration], config):
"""Handle dependencies for integrations."""
# check services.yaml is cool
for integration in integrations.values():
if not integration.manifest:
continue
validate_services(integration)
| 26.106383
| 78
| 0.662999
|
0b9d29451f5c144a1cb786b852964c8a96897420
| 111,508
|
py
|
Python
|
ibis/expr/api.py
|
goodwanghan/ibis
|
cec78c28776c0eca5d7cca4f73db51ce22041d31
|
[
"Apache-2.0"
] | null | null | null |
ibis/expr/api.py
|
goodwanghan/ibis
|
cec78c28776c0eca5d7cca4f73db51ce22041d31
|
[
"Apache-2.0"
] | null | null | null |
ibis/expr/api.py
|
goodwanghan/ibis
|
cec78c28776c0eca5d7cca4f73db51ce22041d31
|
[
"Apache-2.0"
] | null | null | null |
"""Ibis expression API definitions."""
from __future__ import annotations
import collections
import datetime
import functools
import numbers
import operator
from typing import Any, Iterable
import dateutil.parser
import pandas as pd
import toolz
import ibis.common.exceptions as com
import ibis.expr.analysis as _L
import ibis.expr.builders as bl
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
import ibis.expr.rules as rlz
import ibis.expr.schema as sch
import ibis.expr.types as ir
import ibis.util as util
from ibis.expr.groupby import GroupedTableExpr # noqa
from ibis.expr.random import random # noqa
from ibis.expr.schema import Schema
from ibis.expr.types import ( # noqa
ArrayColumn,
ArrayScalar,
ArrayValue,
BooleanColumn,
BooleanScalar,
BooleanValue,
CategoryScalar,
CategoryValue,
ColumnExpr,
DateColumn,
DateScalar,
DateValue,
DecimalColumn,
DecimalScalar,
DecimalValue,
DestructColumn,
DestructScalar,
DestructValue,
Expr,
FloatingColumn,
FloatingScalar,
FloatingValue,
GeoSpatialColumn,
GeoSpatialScalar,
GeoSpatialValue,
IntegerColumn,
IntegerScalar,
IntegerValue,
IntervalColumn,
IntervalScalar,
IntervalValue,
LineStringColumn,
LineStringScalar,
LineStringValue,
MapColumn,
MapScalar,
MapValue,
MultiLineStringColumn,
MultiLineStringScalar,
MultiLineStringValue,
MultiPointColumn,
MultiPointScalar,
MultiPointValue,
MultiPolygonColumn,
MultiPolygonScalar,
MultiPolygonValue,
NullColumn,
NullScalar,
NullValue,
NumericColumn,
NumericScalar,
NumericValue,
PointColumn,
PointScalar,
PointValue,
PolygonColumn,
PolygonScalar,
PolygonValue,
ScalarExpr,
StringColumn,
StringScalar,
StringValue,
StructColumn,
StructScalar,
StructValue,
TableExpr,
TimeColumn,
TimeScalar,
TimestampColumn,
TimestampScalar,
TimestampValue,
TimeValue,
ValueExpr,
array,
literal,
null,
)
from ibis.expr.window import (
cumulative_window,
range_window,
rows_with_max_lookback,
trailing_range_window,
trailing_window,
window,
)
__all__ = (
'aggregate',
'array',
'case',
'cast',
'coalesce',
'cross_join',
'cumulative_window',
'date',
'desc',
'Expr',
'geo_area',
'geo_as_binary',
'geo_as_ewkb',
'geo_as_ewkt',
'geo_as_text',
'geo_azimuth',
'geo_buffer',
'geo_centroid',
'geo_contains',
'geo_contains_properly',
'geo_covers',
'geo_covered_by',
'geo_crosses',
'geo_d_fully_within',
'geo_disjoint',
'geo_difference',
'geo_d_within',
'geo_envelope',
'geo_equals',
'geo_geometry_n',
'geo_geometry_type',
'geo_intersection',
'geo_intersects',
'geo_is_valid',
'geo_line_locate_point',
'geo_line_merge',
'geo_line_substring',
'geo_ordering_equals',
'geo_overlaps',
'geo_touches',
'geo_distance',
'geo_end_point',
'geo_length',
'geo_max_distance',
'geo_n_points',
'geo_n_rings',
'geo_perimeter',
'geo_point',
'geo_point_n',
'geo_simplify',
'geo_srid',
'geo_start_point',
'geo_transform',
'geo_unary_union',
'geo_union',
'geo_within',
'geo_x',
'geo_x_max',
'geo_x_min',
'geo_y',
'geo_y_max',
'geo_y_min',
'greatest',
'ifelse',
'infer_dtype',
'infer_schema',
'interval',
'join',
'least',
'literal',
'NA',
'negate',
'now',
'null',
'param',
'pi',
'prevent_rewrite',
'random',
'range_window',
'row_number',
'rows_with_max_lookback',
'schema',
'Schema',
'sequence',
'table',
'time',
'timestamp',
'trailing_range_window',
'trailing_window',
'where',
'window',
)
infer_dtype = dt.infer
infer_schema = sch.infer
NA = null()
def param(type):
"""Create a parameter of a particular type to be defined just before
execution.
Parameters
----------
type : dt.DataType
The type of the unbound parameter, e.g., double, int64, date, etc.
Returns
-------
ScalarExpr
Examples
--------
>>> import ibis
>>> import ibis.expr.datatypes as dt
>>> start = ibis.param(dt.date)
>>> end = ibis.param(dt.date)
>>> schema = [('timestamp_col', 'timestamp'), ('value', 'double')]
>>> t = ibis.table(schema)
>>> predicates = [t.timestamp_col >= start, t.timestamp_col <= end]
>>> expr = t.filter(predicates).value.sum()
"""
return ops.ScalarParameter(dt.dtype(type)).to_expr()
def sequence(values):
"""
Wrap a list of Python values as an Ibis sequence type
Parameters
----------
values : list
Should all be None or the same type
Returns
-------
seq : Sequence
"""
return ops.ValueList(values).to_expr()
def schema(
pairs: Iterable[tuple[str, dt.DataType]] | None = None,
names: Iterable[str] | None = None,
types: Iterable[str | dt.DataType] | None = None,
) -> sch.Schema:
"""Validate and return an :class:`~ibis.expr.schema.Schema` object.
Parameters
----------
pairs
List of name, type pairs. Mutually exclusive with `names` and `types`.
names
Field names. Mutually exclusive with `pairs`.
types
Field types. Mutually exclusive with `pairs`.
Examples
--------
>>> from ibis import schema
>>> sc = schema([('foo', 'string'),
... ('bar', 'int64'),
... ('baz', 'boolean')])
>>> sc2 = schema(names=['foo', 'bar', 'baz'],
... types=['string', 'int64', 'boolean'])
"""
if pairs is not None:
return Schema.from_tuples(pairs)
else:
return Schema(names, types)
def table(schema, name=None):
"""
Create an unbound Ibis table for creating expressions. Cannot be executed
without being bound to some physical table.
Useful for testing
Parameters
----------
schema : ibis Schema
name : string, default None
Name for table
Returns
-------
table : TableExpr
"""
if not isinstance(schema, Schema):
if isinstance(schema, dict):
schema = Schema.from_dict(schema)
else:
schema = Schema.from_tuples(schema)
node = ops.UnboundTable(schema, name=name)
return node.to_expr()
def desc(expr):
"""
Create a sort key (when used in sort_by) by the passed array expression or
column name.
Parameters
----------
expr : array expression or string
Can be a column name in the table being sorted
Examples
--------
>>> import ibis
>>> t = ibis.table([('g', 'string')])
>>> result = t.group_by('g').size('count').sort_by(ibis.desc('count'))
"""
if not isinstance(expr, Expr):
return ops.DeferredSortKey(expr, ascending=False)
else:
return ops.SortKey(expr, ascending=False).to_expr()
def timestamp(value, timezone=None):
"""
Returns a timestamp literal if value is likely coercible to a timestamp
Parameters
----------
value : timestamp value as string
timezone: timezone as string
defaults to None
Returns
--------
result : TimestampScalar
"""
if isinstance(value, str):
try:
value = pd.Timestamp(value, tz=timezone)
except pd.errors.OutOfBoundsDatetime:
value = dateutil.parser.parse(value)
if isinstance(value, numbers.Integral):
raise TypeError(
(
"Passing an integer to ibis.timestamp is not supported. Use "
"ibis.literal({value}).to_timestamp() to create a timestamp "
"expression from an integer."
).format(value=value)
)
return literal(value, type=dt.Timestamp(timezone=timezone))
def date(value):
"""
Returns a date literal if value is likely coercible to a date
Parameters
----------
value : date value as string
Returns
--------
result : TimeScalar
"""
if isinstance(value, str):
value = pd.to_datetime(value).date()
return literal(value, type=dt.date)
def time(value):
"""
Returns a time literal if value is likely coercible to a time
Parameters
----------
value : time value as string
Returns
--------
result : TimeScalar
"""
if isinstance(value, str):
value = pd.to_datetime(value).time()
return literal(value, type=dt.time)
def interval(
value=None,
unit='s',
years=None,
quarters=None,
months=None,
weeks=None,
days=None,
hours=None,
minutes=None,
seconds=None,
milliseconds=None,
microseconds=None,
nanoseconds=None,
):
"""
Returns an interval literal
Parameters
----------
value : int or datetime.timedelta, default None
years : int, default None
quarters : int, default None
months : int, default None
days : int, default None
weeks : int, default None
hours : int, default None
minutes : int, default None
seconds : int, default None
milliseconds : int, default None
microseconds : int, default None
nanoseconds : int, default None
Returns
--------
result : IntervalScalar
"""
if value is not None:
if isinstance(value, datetime.timedelta):
unit = 's'
value = int(value.total_seconds())
elif not isinstance(value, int):
raise ValueError('Interval value must be an integer')
else:
kwds = [
('Y', years),
('Q', quarters),
('M', months),
('W', weeks),
('D', days),
('h', hours),
('m', minutes),
('s', seconds),
('ms', milliseconds),
('us', microseconds),
('ns', nanoseconds),
]
defined_units = [(k, v) for k, v in kwds if v is not None]
if len(defined_units) != 1:
raise ValueError('Exactly one argument is required')
unit, value = defined_units[0]
value_type = literal(value).type()
type = dt.Interval(unit, value_type)
return literal(value, type=type).op().to_expr()
def case():
"""
Similar to the .case method on array expressions, create a case builder
that accepts self-contained boolean expressions (as opposed to expressions
which are to be equality-compared with a fixed value expression)
Use the .when method on the resulting object followed by .end to create a
complete case.
Examples
--------
>>> import ibis
>>> cond1 = ibis.literal(1) == 1
>>> cond2 = ibis.literal(2) == 1
>>> result1 = 3
>>> result2 = 4
>>> expr = (ibis.case()
... .when(cond1, result1)
... .when(cond2, result2).end())
Returns
-------
case : CaseBuilder
"""
return bl.SearchedCaseBuilder()
def now():
"""
Compute the current timestamp
Returns
-------
now : Timestamp scalar
"""
return ops.TimestampNow().to_expr()
def row_number():
"""Analytic function for the current row number, starting at 0.
This function does not require an ORDER BY clause, however, without an
ORDER BY clause the order of the result is nondeterministic.
Returns
-------
row_number : IntArray
"""
return ops.RowNumber().to_expr()
e = ops.E().to_expr()
pi = ops.Pi().to_expr()
def _add_methods(klass, method_table):
for k, v in method_table.items():
setattr(klass, k, v)
def _unary_op(name, klass, doc=None):
def f(arg):
return klass(arg).to_expr()
f.__name__ = name
if doc is not None:
f.__doc__ = doc
else:
f.__doc__ = klass.__doc__
return f
def negate(arg):
"""
Negate a numeric expression
Parameters
----------
arg : numeric value expression
Returns
-------
negated : type of caller
"""
op = arg.op()
if hasattr(op, 'negate'):
result = op.negate()
else:
result = ops.Negate(arg)
return result.to_expr()
def count(expr, where=None):
"""
Compute cardinality / sequence size of expression. For array expressions,
the count is excluding nulls. For tables, it's the size of the entire
table.
Returns
-------
counts : int64 type
"""
op = expr.op()
if isinstance(op, ops.DistinctColumn):
result = ops.CountDistinct(op.args[0], where).to_expr()
else:
result = ops.Count(expr, where).to_expr()
return result.name('count')
def group_concat(arg, sep=',', where=None):
"""
Concatenate values using the indicated separator (comma by default) to
produce a string
Parameters
----------
arg : array expression
sep : string, default ','
where : bool, default None
Returns
-------
concatenated : string scalar
"""
return ops.GroupConcat(arg, sep=sep, where=where).to_expr()
def arbitrary(arg, where=None, how=None):
"""
Selects the first / last non-null value in a column
Parameters
----------
arg : array expression
where: bool, default None
how : {'first', 'last', 'heavy'}, default 'first'
Heavy selects a frequently occurring value using the heavy hitters
algorithm. Heavy is only supported by Clickhouse backend.
Returns
-------
arbitrary element : scalar type of caller
"""
return ops.Arbitrary(arg, how=how, where=where).to_expr()
def _binop_expr(name, klass):
def f(self, other):
try:
other = rlz.any(other)
op = klass(self, other)
return op.to_expr()
except (com.IbisTypeError, NotImplementedError):
return NotImplemented
f.__name__ = name
return f
def _rbinop_expr(name, klass):
# For reflexive binary ops, like radd, etc.
def f(self, other):
other = rlz.any(other)
op = klass(other, self)
return op.to_expr()
f.__name__ = name
return f
def _boolean_binary_op(name, klass):
def f(self, other):
other = rlz.any(other)
if not isinstance(other, ir.BooleanValue):
raise TypeError(other)
op = klass(self, other)
return op.to_expr()
f.__name__ = name
return f
def _boolean_unary_op(name, klass):
def f(self):
return klass(self).to_expr()
f.__name__ = name
return f
def _boolean_binary_rop(name, klass):
def f(self, other):
other = rlz.any(other)
if not isinstance(other, ir.BooleanValue):
raise TypeError(other)
op = klass(other, self)
return op.to_expr()
f.__name__ = name
return f
def _agg_function(name, klass, assign_default_name=True):
def f(self, where=None):
expr = klass(self, where).to_expr()
if assign_default_name:
expr = expr.name(name)
return expr
f.__name__ = name
f.__doc__ = klass.__doc__
return f
def _extract_field(name, klass):
def f(self):
expr = klass(self).to_expr()
return expr.name(name)
f.__name__ = name
return f
# ---------------------------------------------------------------------
# Generic value API
def cast(arg, target_type):
"""Cast value(s) to indicated data type.
Parameters
----------
target_type
Type to cast to
"""
# validate
op = ops.Cast(arg, to=target_type)
if op.to.equals(arg.type()):
# noop case if passed type is the same
return arg
if isinstance(op.to, (dt.Geography, dt.Geometry)):
from_geotype = arg.type().geotype or 'geometry'
to_geotype = op.to.geotype
if from_geotype == to_geotype:
return arg
result = op.to_expr()
if not arg.has_name():
return result
expr_name = f'cast({arg.get_name()}, {op.to})'
return result.name(expr_name)
def typeof(arg):
"""
Return the data type of the argument according to the current backend
Returns
-------
typeof_arg : string
"""
return ops.TypeOf(arg).to_expr()
def hash(arg, how='fnv'):
"""
Compute an integer hash value for the indicated value expression.
Parameters
----------
arg : value expression
how : {'fnv', 'farm_fingerprint'}, default 'fnv'
Hash algorithm to use
Returns
-------
hash_value : int64 expression
"""
return ops.Hash(arg, how).to_expr()
def fillna(arg, fill_value):
"""
Replace any null values with the indicated fill value
Parameters
----------
fill_value : scalar / array value or expression
Examples
--------
>>> import ibis
>>> table = ibis.table([('col', 'int64'), ('other_col', 'int64')])
>>> result = table.col.fillna(5)
>>> result2 = table.col.fillna(table.other_col * 3)
Returns
-------
filled : type of caller
"""
return ops.IfNull(arg, fill_value).to_expr()
def coalesce(*args):
"""
Compute the first non-null value(s) from the passed arguments in
left-to-right order. This is also known as "combine_first" in pandas.
Parameters
----------
*args : variable-length value list
Examples
--------
>>> import ibis
>>> expr1 = None
>>> expr2 = 4
>>> result = ibis.coalesce(expr1, expr2, 5)
Returns
-------
coalesced : type of first provided argument
"""
return ops.Coalesce(args).to_expr()
def greatest(*args):
"""
Compute the largest value (row-wise, if any arrays are present) among the
supplied arguments.
Returns
-------
greatest : type depending on arguments
"""
return ops.Greatest(args).to_expr()
def least(*args):
"""
Compute the smallest value (row-wise, if any arrays are present) among the
supplied arguments.
Returns
-------
least : type depending on arguments
"""
return ops.Least(args).to_expr()
def where(boolean_expr, true_expr, false_null_expr):
"""
Equivalent to the ternary expression: if X then Y else Z
Parameters
----------
boolean_expr : BooleanValue (array or scalar)
true_expr : value
Values for each True value
false_null_expr : value
Values for False or NULL values
Returns
-------
result : arity depending on inputs
Type of true_expr used to determine output type
"""
op = ops.Where(boolean_expr, true_expr, false_null_expr)
return op.to_expr()
def over(expr, window):
"""
Turn an aggregation or full-sample analytic operation into a windowed
operation. See ibis.window for more details on window configuration
Parameters
----------
expr : value expression
window : ibis.Window
Returns
-------
expr : type of input
"""
prior_op = expr.op()
if isinstance(prior_op, ops.WindowOp):
op = prior_op.over(window)
else:
op = ops.WindowOp(expr, window)
result = op.to_expr()
try:
name = expr.get_name()
except com.ExpressionError:
pass
else:
result = result.name(name)
return result
def value_counts(arg, metric_name='count'):
"""
Compute a frequency table for this value expression
Parameters
----------
Returns
-------
counts : TableExpr
Aggregated table
"""
base = ir.find_base_table(arg)
metric = base.count().name(metric_name)
try:
arg.get_name()
except com.ExpressionError:
arg = arg.name('unnamed')
return base.group_by(arg).aggregate(metric)
def nullif(value, null_if_expr):
"""
Set values to null if they match/equal a particular expression (scalar or
array-valued).
Common use to avoid divide-by-zero problems (get NULL instead of INF on
divide-by-zero): 5 / expr.nullif(0)
Parameters
----------
value : value expression
Value to modify
null_if_expr : value expression (array or scalar)
Returns
-------
null_if : type of caller
"""
return ops.NullIf(value, null_if_expr).to_expr()
def between(arg, lower, upper):
"""
Check if the input expr falls between the lower/upper bounds
passed. Bounds are inclusive. All arguments must be comparable.
Returns
-------
is_between : BooleanValue
"""
lower, upper = rlz.any(lower), rlz.any(upper)
op = ops.Between(arg, lower, upper)
return op.to_expr()
def isin(arg, values):
"""
Check whether the value expression is contained within the indicated
list of values.
Parameters
----------
values : list, tuple, or array expression
The values can be scalar or array-like. Each of them must be
comparable with the calling expression, or None (NULL).
Examples
--------
>>> import ibis
>>> table = ibis.table([('string_col', 'string')])
>>> table2 = ibis.table([('other_string_col', 'string')])
>>> expr = table.string_col.isin(['foo', 'bar', 'baz'])
>>> expr2 = table.string_col.isin(table2.other_string_col)
Returns
-------
contains : BooleanValue
"""
op = ops.Contains(arg, values)
return op.to_expr()
def notin(arg, values):
"""
Like isin, but checks whether this expression's value(s) are not
contained in the passed values. See isin docs for full usage.
"""
op = ops.NotContains(arg, values)
return op.to_expr()
add = _binop_expr('__add__', ops.Add)
sub = _binop_expr('__sub__', ops.Subtract)
mul = _binop_expr('__mul__', ops.Multiply)
div = _binop_expr('__div__', ops.Divide)
floordiv = _binop_expr('__floordiv__', ops.FloorDivide)
pow = _binop_expr('__pow__', ops.Power)
mod = _binop_expr('__mod__', ops.Modulus)
radd = _rbinop_expr('__radd__', ops.Add)
rsub = _rbinop_expr('__rsub__', ops.Subtract)
rdiv = _rbinop_expr('__rdiv__', ops.Divide)
rfloordiv = _rbinop_expr('__rfloordiv__', ops.FloorDivide)
def substitute(arg, value, replacement=None, else_=None):
"""
Substitute (replace) one or more values in a value expression
Parameters
----------
value : expr-like or dict
replacement : expr-like, optional
If an expression is passed to value, this must be passed
else_ : expr, optional
Returns
-------
replaced : case statement (for now!)
"""
expr = arg.case()
if isinstance(value, dict):
for k, v in sorted(value.items()):
expr = expr.when(k, v)
else:
expr = expr.when(value, replacement)
if else_ is not None:
expr = expr.else_(else_)
else:
expr = expr.else_(arg)
return expr.end()
def _case(arg):
"""Create a new SimpleCaseBuilder to chain multiple if-else statements. Add
new search expressions with the .when method. These must be comparable with
this array expression. Conclude by calling .end()
Returns
-------
builder : CaseBuilder
Examples
--------
>>> import ibis
>>> t = ibis.table([('string_col', 'string')], name='t')
>>> expr = t.string_col
>>> case_expr = (expr.case()
... .when('a', 'an a')
... .when('b', 'a b')
... .else_('null or (not a and not b)')
... .end())
>>> case_expr # doctest: +NORMALIZE_WHITESPACE
ref_0
UnboundTable[table]
name: t
schema:
string_col : string
<BLANKLINE>
SimpleCase[string*]
base:
string_col = Column[string*] 'string_col' from table
ref_0
cases:
Literal[string]
a
Literal[string]
b
results:
Literal[string]
an a
Literal[string]
a b
default:
Literal[string]
null or (not a and not b)
"""
return bl.SimpleCaseBuilder(arg)
def cases(arg, case_result_pairs, default=None):
"""
Create a case expression in one shot.
Returns
-------
case_expr : SimpleCase
"""
builder = arg.case()
for case, result in case_result_pairs:
builder = builder.when(case, result)
if default is not None:
builder = builder.else_(default)
return builder.end()
_generic_value_methods = {
'hash': hash,
'cast': cast,
'coalesce': coalesce,
'typeof': typeof,
'fillna': fillna,
'nullif': nullif,
'between': between,
'isin': isin,
'notin': notin,
'isnull': _unary_op('isnull', ops.IsNull),
'notnull': _unary_op('notnull', ops.NotNull),
'over': over,
'case': _case,
'cases': cases,
'substitute': substitute,
'__eq__': _binop_expr('__eq__', ops.Equals),
'__ne__': _binop_expr('__ne__', ops.NotEquals),
'__ge__': _binop_expr('__ge__', ops.GreaterEqual),
'__gt__': _binop_expr('__gt__', ops.Greater),
'__le__': _binop_expr('__le__', ops.LessEqual),
'__lt__': _binop_expr('__lt__', ops.Less),
'collect': _unary_op('collect', ops.ArrayCollect),
'identical_to': _binop_expr('identical_to', ops.IdenticalTo),
}
approx_nunique = _agg_function('approx_nunique', ops.HLLCardinality, True)
approx_median = _agg_function('approx_median', ops.CMSMedian, True)
max = _agg_function('max', ops.Max, True)
min = _agg_function('min', ops.Min, True)
nunique = _agg_function('nunique', ops.CountDistinct, True)
def lag(arg, offset=None, default=None):
return ops.Lag(arg, offset, default).to_expr()
def lead(arg, offset=None, default=None):
return ops.Lead(arg, offset, default).to_expr()
first = _unary_op('first', ops.FirstValue)
last = _unary_op('last', ops.LastValue)
rank = _unary_op('rank', ops.MinRank)
dense_rank = _unary_op('dense_rank', ops.DenseRank)
percent_rank = _unary_op('percent_rank', ops.PercentRank)
cummin = _unary_op('cummin', ops.CumulativeMin)
cummax = _unary_op('cummax', ops.CumulativeMax)
def ntile(arg, buckets):
return ops.NTile(arg, buckets).to_expr()
def nth(arg, k):
"""
Analytic operation computing nth value from start of sequence
Parameters
----------
arg : array expression
k : int
Desired rank value
Returns
-------
nth : type of argument
"""
return ops.NthValue(arg, k).to_expr()
def distinct(arg):
"""
Compute set of unique values occurring in this array. Can not be used
in conjunction with other array expressions from the same context
(because it's a cardinality-modifying pseudo-reduction).
"""
op = ops.DistinctColumn(arg)
return op.to_expr()
def topk(arg, k, by=None):
"""
Returns
-------
topk : TopK filter expression
"""
op = ops.TopK(arg, k, by=by if by is not None else arg.count())
return op.to_expr()
def bottomk(arg, k, by=None):
raise NotImplementedError
def _generic_summary(arg, exact_nunique=False, prefix="", suffix=""):
"""
Compute a set of summary metrics from the input value expression
Parameters
----------
arg : value expression
exact_nunique : boolean, default False
Compute the exact number of distinct values (slower)
prefix : string, default ""
String prefix for metric names
suffix : string, default ""
String suffix for metric names
Returns
-------
summary : (count, # nulls, nunique)
"""
if exact_nunique:
unique_metric = arg.nunique().name('uniques')
else:
unique_metric = arg.approx_nunique().name('uniques')
metrics = [arg.count(), arg.isnull().sum().name('nulls'), unique_metric]
metrics = [m.name(f"{prefix}{m.get_name()}{suffix}") for m in metrics]
return metrics
def _numeric_summary(arg, exact_nunique=False, prefix="", suffix=""):
"""
Compute a set of summary metrics from the input numeric value expression
Parameters
----------
arg : numeric value expression
exact_nunique : boolean, default False
prefix : string, default ""
String prefix for metric names
suffix : string, default ""
String suffix for metric names
Returns
-------
summary : (count, # nulls, min, max, sum, mean, nunique)
"""
if exact_nunique:
unique_metric = arg.nunique().name('nunique')
else:
unique_metric = arg.approx_nunique().name('approx_nunique')
metrics = [
arg.count(),
arg.isnull().sum().name('nulls'),
arg.min(),
arg.max(),
arg.sum(),
arg.mean(),
unique_metric,
]
metrics = [m.name(f"{prefix}{m.get_name()}{suffix}") for m in metrics]
return metrics
_generic_column_methods = {
'bottomk': bottomk,
'distinct': distinct,
'nunique': nunique,
'topk': topk,
'summary': _generic_summary,
'count': count,
'arbitrary': arbitrary,
'min': min,
'max': max,
'approx_median': approx_median,
'approx_nunique': approx_nunique,
'group_concat': group_concat,
'value_counts': value_counts,
'first': first,
'last': last,
'dense_rank': dense_rank,
'rank': rank,
'percent_rank': percent_rank,
# 'nth': nth,
'ntile': ntile,
'lag': lag,
'lead': lead,
'cummin': cummin,
'cummax': cummax,
}
# TODO: should bound to AnyValue and AnyColumn instead, but that breaks
# doc builds, because it checks methods on ColumnExpr
_add_methods(ir.ValueExpr, _generic_value_methods)
_add_methods(ir.ColumnExpr, _generic_column_methods)
# ---------------------------------------------------------------------
# Numeric API
def round(arg, digits=None):
"""
Round values either to integer or indicated number of decimal places.
Returns
-------
rounded : type depending on digits argument
digits None or 0
decimal types: decimal
other numeric types: bigint
digits nonzero
decimal types: decimal
other numeric types: double
"""
op = ops.Round(arg, digits)
return op.to_expr()
def log(arg, base=None):
"""
Perform the logarithm using a specified base
Parameters
----------
base : number, default None
If None, base e is used
Returns
-------
logarithm : double type
"""
op = ops.Log(arg, base)
return op.to_expr()
def clip(arg, lower=None, upper=None):
"""
Trim values at input threshold(s).
Parameters
----------
lower : float
upper : float
Returns
-------
clipped : same as type of the input
"""
if lower is None and upper is None:
raise ValueError("at least one of lower and " "upper must be provided")
op = ops.Clip(arg, lower, upper)
return op.to_expr()
def quantile(arg, quantile, interpolation='linear'):
"""
Return value at the given quantile, a la numpy.percentile.
Parameters
----------
quantile : float/int or array-like
0 <= quantile <= 1, the quantile(s) to compute
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
quantile
if scalar input, scalar type, same as input
if array input, list of scalar type
"""
if isinstance(quantile, collections.abc.Sequence):
op = ops.MultiQuantile(
arg, quantile=quantile, interpolation=interpolation
)
else:
op = ops.Quantile(arg, quantile=quantile, interpolation=interpolation)
return op.to_expr()
def _integer_to_timestamp(arg, unit='s'):
"""
Convert integer UNIX timestamp (at some resolution) to a timestamp type
Parameters
----------
unit : {'s', 'ms', 'us'}
Second (s), millisecond (ms), or microsecond (us) resolution
Returns
-------
timestamp : timestamp value expression
"""
op = ops.TimestampFromUNIX(arg, unit)
return op.to_expr()
def _integer_to_interval(arg, unit='s'):
"""
Convert integer interval with the same inner type
Parameters
----------
unit : {'Y', 'M', 'W', 'D', 'h', 'm', s', 'ms', 'us', 'ns'}
Returns
-------
interval : interval value expression
"""
op = ops.IntervalFromInteger(arg, unit)
return op.to_expr()
abs = _unary_op('abs', ops.Abs)
ceil = _unary_op('ceil', ops.Ceil)
degrees = _unary_op('degrees', ops.Degrees)
exp = _unary_op('exp', ops.Exp)
floor = _unary_op('floor', ops.Floor)
log2 = _unary_op('log2', ops.Log2)
log10 = _unary_op('log10', ops.Log10)
ln = _unary_op('ln', ops.Ln)
radians = _unary_op('radians', ops.Radians)
sign = _unary_op('sign', ops.Sign)
sqrt = _unary_op('sqrt', ops.Sqrt)
# TRIGONOMETRIC OPERATIONS
acos = _unary_op('acos', ops.Acos)
asin = _unary_op('asin', ops.Asin)
atan = _unary_op('atan', ops.Atan)
atan2 = _binop_expr('atan2', ops.Atan2)
cos = _unary_op('cos', ops.Cos)
cot = _unary_op('cot', ops.Cot)
sin = _unary_op('sin', ops.Sin)
tan = _unary_op('tan', ops.Tan)
_numeric_value_methods = {
'__neg__': negate,
'abs': abs,
'ceil': ceil,
'degrees': degrees,
'deg2rad': radians,
'floor': floor,
'radians': radians,
'rad2deg': degrees,
'sign': sign,
'exp': exp,
'sqrt': sqrt,
'log': log,
'ln': ln,
'log2': log2,
'log10': log10,
'round': round,
'nullifzero': _unary_op('nullifzero', ops.NullIfZero),
'zeroifnull': _unary_op('zeroifnull', ops.ZeroIfNull),
'clip': clip,
'__add__': add,
'add': add,
'__sub__': sub,
'sub': sub,
'__mul__': mul,
'mul': mul,
'__div__': div,
'__truediv__': div,
'__floordiv__': floordiv,
'div': div,
'floordiv': floordiv,
'__rdiv__': rdiv,
'__rtruediv__': rdiv,
'__rfloordiv__': rfloordiv,
'rdiv': rdiv,
'rfloordiv': rfloordiv,
'__pow__': pow,
'pow': pow,
'__radd__': add,
'radd': add,
'__rsub__': rsub,
'rsub': rsub,
'__rmul__': _rbinop_expr('__rmul__', ops.Multiply),
'__rpow__': _rbinop_expr('__rpow__', ops.Power),
'__mod__': mod,
'__rmod__': _rbinop_expr('__rmod__', ops.Modulus),
# trigonometric operations
'acos': acos,
'asin': asin,
'atan': atan,
'atan2': atan2,
'cos': cos,
'cot': cot,
'sin': sin,
'tan': tan,
}
def convert_base(arg, from_base, to_base):
"""
Convert number (as integer or string) from one base to another
Parameters
----------
arg : string or integer
from_base : integer
to_base : integer
Returns
-------
converted : string
"""
return ops.BaseConvert(arg, from_base, to_base).to_expr()
_integer_value_methods = {
'to_timestamp': _integer_to_timestamp,
'to_interval': _integer_to_interval,
'convert_base': convert_base,
}
bit_and = _agg_function('bit_and', ops.BitAnd, True)
bit_or = _agg_function('bit_or', ops.BitOr, True)
bit_xor = _agg_function('bit_xor', ops.BitXor, True)
mean = _agg_function('mean', ops.Mean, True)
cummean = _unary_op('cummean', ops.CumulativeMean)
sum = _agg_function('sum', ops.Sum, True)
cumsum = _unary_op('cumsum', ops.CumulativeSum)
def std(arg, where=None, how='sample'):
"""
Compute standard deviation of numeric array
Parameters
----------
how : {'sample', 'pop'}, default 'sample'
Returns
-------
stdev : double scalar
"""
expr = ops.StandardDev(arg, how=how, where=where).to_expr()
expr = expr.name('std')
return expr
def variance(arg, where=None, how='sample'):
"""
Compute standard deviation of numeric array
Parameters
----------
how : {'sample', 'pop'}, default 'sample'
Returns
-------
stdev : double scalar
"""
expr = ops.Variance(arg, how=how, where=where).to_expr()
expr = expr.name('var')
return expr
def correlation(left, right, where=None, how='sample'):
"""
Compute correlation of two numeric array
Parameters
----------
how : {'sample', 'pop'}, default 'sample'
Returns
-------
corr : double scalar
"""
expr = ops.Correlation(left, right, how=how, where=where).to_expr()
return expr
def covariance(left, right, where=None, how='sample'):
"""
Compute covariance of two numeric array
Parameters
----------
how : {'sample', 'pop'}, default 'sample'
Returns
-------
cov : double scalar
"""
expr = ops.Covariance(left, right, how=how, where=where).to_expr()
return expr
def bucket(
arg,
buckets,
closed='left',
close_extreme=True,
include_under=False,
include_over=False,
):
"""
Compute a discrete binning of a numeric array
Parameters
----------
arg : numeric array expression
buckets : list
closed : {'left', 'right'}, default 'left'
Which side of each interval is closed. For example
buckets = [0, 100, 200]
closed = 'left': 100 falls in 2nd bucket
closed = 'right': 100 falls in 1st bucket
close_extreme : boolean, default True
Returns
-------
bucketed : coded value expression
"""
op = ops.Bucket(
arg,
buckets,
closed=closed,
close_extreme=close_extreme,
include_under=include_under,
include_over=include_over,
)
return op.to_expr()
def histogram(
arg, nbins=None, binwidth=None, base=None, closed='left', aux_hash=None
):
"""Compute a histogram with fixed width bins.
Parameters
----------
arg : numeric array expression
nbins : int, default None
If supplied, will be used to compute the binwidth
binwidth : number, default None
If not supplied, computed from the data (actual max and min values)
base : number, default None
closed : {'left', 'right'}, default 'left'
Which side of each interval is closed
Returns
-------
histogrammed : coded value expression
"""
op = ops.Histogram(
arg, nbins, binwidth, base, closed=closed, aux_hash=aux_hash
)
return op.to_expr()
def category_label(arg, labels, nulls=None):
"""Format a known number of categories as strings.
Parameters
----------
labels : list of string
nulls : string, optional
How to label any null values among the categories
Returns
-------
string_categories : string value expression
"""
op = ops.CategoryLabel(arg, labels, nulls)
return op.to_expr()
_numeric_column_methods = {
'mean': mean,
'cummean': cummean,
'sum': sum,
'cumsum': cumsum,
'quantile': quantile,
'std': std,
'var': variance,
'corr': correlation,
'cov': covariance,
'bucket': bucket,
'histogram': histogram,
'summary': _numeric_summary,
}
_integer_column_methods = {
'bit_and': bit_and,
'bit_or': bit_or,
'bit_xor': bit_xor,
}
_floating_value_methods = {
'isnan': _unary_op('isnull', ops.IsNan),
'isinf': _unary_op('isinf', ops.IsInf),
}
_add_methods(ir.NumericValue, _numeric_value_methods)
_add_methods(ir.IntegerValue, _integer_value_methods)
_add_methods(ir.FloatingValue, _floating_value_methods)
_add_methods(ir.NumericColumn, _numeric_column_methods)
_add_methods(ir.IntegerColumn, _integer_column_methods)
# ----------------------------------------------------------------------
# GeoSpatial API
def geo_area(arg):
"""
Compute area of a geo spatial data
Parameters
----------
arg : geometry or geography
Returns
-------
area : double scalar
"""
op = ops.GeoArea(arg)
return op.to_expr()
def geo_as_binary(arg):
"""
Get the geometry as well-known bytes (WKB) without the SRID data.
Parameters
----------
arg : geometry or geography
Returns
-------
wkb : binary
"""
op = ops.GeoAsBinary(arg)
return op.to_expr()
def geo_as_ewkt(arg):
"""
Get the geometry as well-known text (WKT) with the SRID data.
Parameters
----------
arg : geometry or geography
Returns
-------
wkt : string
"""
op = ops.GeoAsEWKT(arg)
return op.to_expr()
def geo_as_text(arg):
"""
Get the geometry as well-known text (WKT) without the SRID data.
Parameters
----------
arg : geometry or geography
Returns
-------
wkt : string
"""
op = ops.GeoAsText(arg)
return op.to_expr()
def geo_as_ewkb(arg):
"""
Get the geometry as well-known bytes (WKB) with the SRID data.
Parameters
----------
arg : geometry or geography
Returns
-------
wkb : binary
"""
op = ops.GeoAsEWKB(arg)
return op.to_expr()
def geo_contains(left, right):
"""
Check if the first geometry contains the second one
Parameters
----------
left : geometry
right : geometry
Returns
-------
contains : bool scalar
"""
op = ops.GeoContains(left, right)
return op.to_expr()
def geo_contains_properly(left, right):
"""
Check if the first geometry contains the second one,
with no common border points.
Parameters
----------
left : geometry
right : geometry
Returns
-------
contains_properly : bool scalar
"""
op = ops.GeoContainsProperly(left, right)
return op.to_expr()
def geo_covers(left, right):
"""
Check if the first geometry covers the second one.
Parameters
----------
left : geometry
right : geometry
Returns
-------
covers : bool scalar
"""
op = ops.GeoCovers(left, right)
return op.to_expr()
def geo_covered_by(left, right):
"""
Check if the first geometry is covered by the second one.
Parameters
----------
left : geometry
right : geometry
Returns
-------
covered_by : bool scalar
"""
op = ops.GeoCoveredBy(left, right)
return op.to_expr()
def geo_crosses(left, right):
"""
Check if the geometries have some, but not all, interior points in common.
Parameters
----------
left : geometry
right : geometry
Returns
-------
crosses : bool scalar
"""
op = ops.GeoCrosses(left, right)
return op.to_expr()
def geo_d_fully_within(left, right, distance):
"""
Check if the first geometry is fully within a specified distance from
the second one.
Parameters
----------
left : geometry
right : geometry
distance: double
Returns
-------
d_fully_within : bool scalar
"""
op = ops.GeoDFullyWithin(left, right, distance)
return op.to_expr()
def geo_disjoint(left, right):
"""
Check if the geometries have no points in common.
Parameters
----------
left : geometry
right : geometry
Returns
-------
disjoint : bool scalar
"""
op = ops.GeoDisjoint(left, right)
return op.to_expr()
def geo_d_within(left, right, distance):
"""
Check if the first geometry is within a specified distance from
the second one.
Parameters
----------
left : geometry
right : geometry
distance: double
Returns
-------
d_within : bool scalar
"""
op = ops.GeoDWithin(left, right, distance)
return op.to_expr()
def geo_equals(left, right):
"""
Check if the geometries are the same.
Parameters
----------
left : geometry
right : geometry
Returns
-------
equals : bool scalar
"""
op = ops.GeoEquals(left, right)
return op.to_expr()
def geo_geometry_n(arg, n):
"""
Get the 1-based Nth geometry of a multi geometry.
Parameters
----------
arg : geometry
n : integer
Returns
-------
geom : geometry scalar
"""
op = ops.GeoGeometryN(arg, n)
return op.to_expr()
def geo_geometry_type(arg):
"""
Get the type of a geometry.
Parameters
----------
arg : geometry
Returns
-------
type : string scalar
"""
op = ops.GeoGeometryType(arg)
return op.to_expr()
def geo_intersects(left, right):
"""
Check if the geometries share any points.
Parameters
----------
left : geometry
right : geometry
Returns
-------
intersects : bool scalar
"""
op = ops.GeoIntersects(left, right)
return op.to_expr()
def geo_is_valid(arg):
"""
Check if the geometry is valid.
Parameters
----------
arg : geometry
Returns
-------
valid : bool scalar
"""
op = ops.GeoIsValid(arg)
return op.to_expr()
def geo_line_locate_point(left, right):
"""
Locate the distance a point falls along the length of a line.
Returns a float between zero and one representing the location of the
closest point on the linestring to the given point, as a fraction of the
total 2d line length.
Parameters
----------
left : linestring
right: point
Returns
-------
distance: float scalar
"""
op = ops.GeoLineLocatePoint(left, right)
return op.to_expr()
def geo_line_merge(arg):
"""
Merge a MultiLineString into a LineString.
Returns a (set of) LineString(s) formed by sewing together the
constituent line work of a MultiLineString. If a geometry other than
a LineString or MultiLineString is given, this will return an empty
geometry collection.
Parameters
----------
arg : (multi)linestring
Returns
-------
merged: geometry scalar
"""
op = ops.GeoLineMerge(arg)
return op.to_expr()
def geo_line_substring(arg, start, end):
"""
Clip a substring from a LineString.
Returns a linestring that is a substring of the input one, starting
and ending at the given fractions of the total 2d length. The second
and third arguments are floating point values between zero and one.
This only works with linestrings.
Parameters
----------
arg: linestring
start: float
end: float
Returns
-------
substring: linestring scalar
"""
op = ops.GeoLineSubstring(arg, start, end)
return op.to_expr()
def geo_ordering_equals(left, right):
"""
Check if two geometries are equal and have the same point ordering.
Returns true if the two geometries are equal and the coordinates
are in the same order.
Parameters
----------
left : geometry
right : geometry
Returns
-------
ordering_equals : bool scalar
"""
op = ops.GeoOrderingEquals(left, right)
return op.to_expr()
def geo_overlaps(left, right):
"""
Check if the geometries share space, are of the same dimension,
but are not completely contained by each other.
Parameters
----------
left : geometry
right : geometry
Returns
-------
overlaps : bool scalar
"""
op = ops.GeoOverlaps(left, right)
return op.to_expr()
def geo_point(
left: NumericValue | int | float,
right: NumericValue | int | float,
) -> ops.GeoPoint:
"""
Return a point constructed on the fly from the provided coordinate values.
Constant coordinates result in construction of a POINT literal.
Parameters
----------
left : NumericValue, integer or float
right : NumericValue, integer or float
Returns
-------
point
"""
op = ops.GeoPoint(left, right)
return op.to_expr()
def geo_touches(left, right):
"""
Check if the geometries have at least one point in common,
but do not intersect.
Parameters
----------
left : geometry
right : geometry
Returns
-------
touches : bool scalar
"""
op = ops.GeoTouches(left, right)
return op.to_expr()
def geo_distance(left, right):
"""
Compute distance between two geo spatial data
Parameters
----------
left : geometry or geography
right : geometry or geography
Returns
-------
distance : double scalar
"""
op = ops.GeoDistance(left, right)
return op.to_expr()
def geo_length(arg):
"""
Compute length of a geo spatial data
Parameters
----------
arg : geometry or geography
Returns
-------
length : double scalar
"""
op = ops.GeoLength(arg)
return op.to_expr()
def geo_perimeter(arg):
"""
Compute perimeter of a geo spatial data
Parameters
----------
arg : geometry or geography
Returns
-------
perimeter : double scalar
"""
op = ops.GeoPerimeter(arg)
return op.to_expr()
def geo_max_distance(left, right):
"""Returns the 2-dimensional maximum distance between two geometries in
projected units. If g1 and g2 is the same geometry the function will
return the distance between the two vertices most far from each other
in that geometry
Parameters
----------
left : geometry
right : geometry
Returns
-------
MaxDistance : double scalar
"""
op = ops.GeoMaxDistance(left, right)
return op.to_expr()
def geo_unary_union(arg):
"""
Aggregate a set of geometries into a union.
This corresponds to the aggregate version of the PostGIS ST_Union.
We give it a different name (following the corresponding method
in GeoPandas) to avoid name conflicts with the non-aggregate version.
Parameters
----------
arg : geometry column
Returns
-------
union : geometry scalar
"""
expr = ops.GeoUnaryUnion(arg).to_expr()
expr = expr.name('union')
return expr
def geo_union(left, right):
"""
Merge two geometries into a union geometry.
Returns the pointwise union of the two geometries.
This corresponds to the non-aggregate version the PostGIS ST_Union.
Parameters
----------
left : geometry
right : geometry
Returns
-------
union : geometry scalar
"""
op = ops.GeoUnion(left, right)
return op.to_expr()
def geo_x(arg):
"""Return the X coordinate of the point, or NULL if not available.
Input must be a point
Parameters
----------
arg : geometry
Returns
-------
X : double scalar
"""
op = ops.GeoX(arg)
return op.to_expr()
def geo_y(arg):
"""Return the Y coordinate of the point, or NULL if not available.
Input must be a point
Parameters
----------
arg : geometry
Returns
-------
Y : double scalar
"""
op = ops.GeoY(arg)
return op.to_expr()
def geo_x_min(arg):
"""Returns Y minima of a geometry
Parameters
----------
arg : geometry
Returns
-------
XMin : double scalar
"""
op = ops.GeoXMin(arg)
return op.to_expr()
def geo_x_max(arg):
"""Returns X maxima of a geometry
Parameters
----------
arg : geometry
Returns
-------
XMax : double scalar
"""
op = ops.GeoXMax(arg)
return op.to_expr()
def geo_y_min(arg):
"""Returns Y minima of a geometry
Parameters
----------
arg : geometry
Returns
-------
YMin : double scalar
"""
op = ops.GeoYMin(arg)
return op.to_expr()
def geo_y_max(arg):
"""Returns Y maxima of a geometry
Parameters
----------
arg : geometry
Returns
-------
YMax : double scalar
"""
op = ops.GeoYMax(arg)
return op.to_expr()
def geo_start_point(arg):
"""Returns the first point of a LINESTRING geometry as a POINT or
NULL if the input parameter is not a LINESTRING
Parameters
----------
arg : geometry
Returns
-------
Point : geometry scalar
"""
op = ops.GeoStartPoint(arg)
return op.to_expr()
def geo_end_point(arg):
"""Returns the last point of a LINESTRING geometry as a POINT or
NULL if the input parameter is not a LINESTRING
Parameters
----------
arg : geometry or geography
Returns
-------
EndPoint : geometry scalar
"""
op = ops.GeoEndPoint(arg)
return op.to_expr()
def geo_point_n(arg, n):
"""Return the Nth point in a single linestring in the geometry.
Negative values are counted backwards from the end of the LineString,
so that -1 is the last point. Returns NULL if there is no linestring in
the geometry
Parameters
----------
arg : geometry
n : integer
Returns
-------
PointN : geometry scalar
"""
op = ops.GeoPointN(arg, n)
return op.to_expr()
def geo_n_points(arg):
"""Return the number of points in a geometry. Works for all geometries
Parameters
----------
arg : geometry
Returns
-------
NPoints : double scalar
"""
op = ops.GeoNPoints(arg)
return op.to_expr()
def geo_n_rings(arg):
"""If the geometry is a polygon or multi-polygon returns the number of
rings. It counts the outer rings as well
Parameters
----------
arg : geometry or geography
Returns
-------
NRings : double scalar
"""
op = ops.GeoNRings(arg)
return op.to_expr()
def geo_srid(arg):
"""Returns the spatial reference identifier for the ST_Geometry
Parameters
----------
arg : geometry
Returns
-------
SRID : Integer scalar
"""
op = ops.GeoSRID(arg)
return op.to_expr()
def geo_set_srid(arg, srid):
"""Set the spatial reference identifier for the ST_Geometry
Parameters
----------
arg : geometry
srid : integer
Returns
-------
SetSRID : geometry
"""
op = ops.GeoSetSRID(arg, srid)
return op.to_expr()
def geo_buffer(arg, radius):
"""Returns a geometry that represents all points whose distance from this
Geometry is less than or equal to distance. Calculations are in the
Spatial Reference System of this Geometry.
Parameters
----------
arg : geometry
radius: double
Returns
-------
buffer : geometry scalar
"""
op = ops.GeoBuffer(arg, radius)
return op.to_expr()
def geo_centroid(arg):
"""Returns the centroid of the geometry.
Parameters
----------
arg : geometry
Returns
-------
centroid : geometry scalar
"""
op = ops.GeoCentroid(arg)
return op.to_expr()
def geo_envelope(arg):
"""Returns a geometry representing the bounding box of the arg.
Parameters
----------
arg : geometry
Returns
-------
envelope : geometry scalar
"""
op = ops.GeoEnvelope(arg)
return op.to_expr()
def geo_within(left, right):
"""
Check if the first geometry is completely inside of the second.
Parameters
----------
left : geometry
right : geometry
Returns
-------
within : bool scalar
"""
op = ops.GeoWithin(left, right)
return op.to_expr()
def geo_azimuth(left, right):
"""
Check if the geometries have at least one point in common,
but do not intersect.
Parameters
----------
left : point
right : point
Returns
-------
azimuth : float scalar
"""
op = ops.GeoAzimuth(left, right)
return op.to_expr()
def geo_intersection(left, right):
"""
Return the intersection of two geometries.
Parameters
----------
left : geometry
right : geometry
Returns
-------
intersection : geometry scalar
"""
op = ops.GeoIntersection(left, right)
return op.to_expr()
def geo_difference(left, right):
"""
Return the difference of two geometries.
Parameters
----------
left : geometry
right : geometry
Returns
-------
difference : geometry scalar
"""
op = ops.GeoDifference(left, right)
return op.to_expr()
def geo_simplify(arg, tolerance, preserve_collapsed):
"""
Simplify a given geometry.
Parameters
----------
arg : geometry
tolerance: float
preserved_collapsed: boolean
Returns
-------
simplified : geometry scalar
"""
op = ops.GeoSimplify(arg, tolerance, preserve_collapsed)
return op.to_expr()
def geo_transform(arg, srid):
"""
Transform a geometry into a new SRID.
Parameters
----------
arg : geometry
srid: integer
Returns
-------
transformed : geometry scalar
"""
op = ops.GeoTransform(arg, srid)
return op.to_expr()
_geospatial_value_methods = {
'area': geo_area,
'as_binary': geo_as_binary,
'as_ewkb': geo_as_ewkb,
'as_ewkt': geo_as_ewkt,
'as_text': geo_as_text,
'azimuth': geo_azimuth,
'buffer': geo_buffer,
'centroid': geo_centroid,
'contains': geo_contains,
'contains_properly': geo_contains_properly,
'covers': geo_covers,
'covered_by': geo_covered_by,
'crosses': geo_crosses,
'd_fully_within': geo_d_fully_within,
'difference': geo_difference,
'disjoint': geo_disjoint,
'distance': geo_distance,
'd_within': geo_d_within,
'end_point': geo_end_point,
'envelope': geo_envelope,
'geo_equals': geo_equals,
'geometry_n': geo_geometry_n,
'geometry_type': geo_geometry_type,
'intersection': geo_intersection,
'intersects': geo_intersects,
'is_valid': geo_is_valid,
'line_locate_point': geo_line_locate_point,
'line_merge': geo_line_merge,
'line_substring': geo_line_substring,
'length': geo_length,
'max_distance': geo_max_distance,
'n_points': geo_n_points,
'n_rings': geo_n_rings,
'ordering_equals': geo_ordering_equals,
'overlaps': geo_overlaps,
'perimeter': geo_perimeter,
'point_n': geo_point_n,
'set_srid': geo_set_srid,
'simplify': geo_simplify,
'srid': geo_srid,
'start_point': geo_start_point,
'touches': geo_touches,
'transform': geo_transform,
'union': geo_union,
'within': geo_within,
'x': geo_x,
'x_max': geo_x_max,
'x_min': geo_x_min,
'y': geo_y,
'y_max': geo_y_max,
'y_min': geo_y_min,
}
_geospatial_column_methods = {'unary_union': geo_unary_union}
_add_methods(ir.GeoSpatialValue, _geospatial_value_methods)
_add_methods(ir.GeoSpatialColumn, _geospatial_column_methods)
# ----------------------------------------------------------------------
# Boolean API
# TODO: logical binary operators for BooleanValue
def ifelse(arg, true_expr, false_expr):
"""
Shorthand for implementing ternary expressions
bool_expr.ifelse(0, 1)
e.g., in SQL: CASE WHEN bool_expr THEN 0 else 1 END
"""
# Result will be the result of promotion of true/false exprs. These
# might be conflicting types; same type resolution as case expressions
# must be used.
case = bl.SearchedCaseBuilder()
return case.when(arg, true_expr).else_(false_expr).end()
_boolean_value_methods = {
'ifelse': ifelse,
'__and__': _boolean_binary_op('__and__', ops.And),
'__or__': _boolean_binary_op('__or__', ops.Or),
'__xor__': _boolean_binary_op('__xor__', ops.Xor),
'__rand__': _boolean_binary_rop('__rand__', ops.And),
'__ror__': _boolean_binary_rop('__ror__', ops.Or),
'__rxor__': _boolean_binary_rop('__rxor__', ops.Xor),
'__invert__': _boolean_unary_op('__invert__', ops.Not),
}
_boolean_column_methods = {
'any': _unary_op('any', ops.Any),
'notany': _unary_op('notany', ops.NotAny),
'all': _unary_op('all', ops.All),
'notall': _unary_op('notany', ops.NotAll),
'cumany': _unary_op('cumany', ops.CumulativeAny),
'cumall': _unary_op('cumall', ops.CumulativeAll),
}
_add_methods(ir.BooleanValue, _boolean_value_methods)
_add_methods(ir.BooleanColumn, _boolean_column_methods)
# ---------------------------------------------------------------------
# Binary API
def hashbytes(arg, how='sha256'):
"""
Compute a binary hash value for the indicated value expression.
Parameters
----------
arg : binary or string value expression
how : {'md5', 'sha1', 'sha256', 'sha512'}, default 'sha256'
Hash algorithm to use
Returns
-------
hash_value : binary expression
"""
return ops.HashBytes(arg, how).to_expr()
_binary_value_methods = {'hashbytes': hashbytes}
_add_methods(ir.BinaryValue, _binary_value_methods)
# ---------------------------------------------------------------------
# String API
def _string_substr(self, start, length=None):
"""
Pull substrings out of each string value by position and maximum
length.
Parameters
----------
start : int
First character to start splitting, indices starting at 0 (like
Python)
length : int, optional
Maximum length of each substring. If not supplied, splits each string
to the end
Returns
-------
substrings : type of caller
"""
op = ops.Substring(self, start, length)
return op.to_expr()
def _string_left(self, nchars):
"""
Return left-most up to N characters from each string. Convenience
use of substr.
Returns
-------
substrings : type of caller
"""
return self.substr(0, length=nchars)
def _string_right(self, nchars):
"""
Return up to nchars starting from end of each string.
Returns
-------
substrings : type of caller
"""
return ops.StrRight(self, nchars).to_expr()
def repeat(self, n):
"""
Returns the argument string repeated n times
Parameters
----------
n : int
Returns
-------
result : string
"""
return ops.Repeat(self, n).to_expr()
def _translate(self, from_str, to_str):
"""
Returns string with set of 'from' characters replaced
by set of 'to' characters.
from_str[x] is replaced by to_str[x].
To avoid unexpected behavior, from_str should be
shorter than to_string.
Parameters
----------
from_str : string
to_str : string
Examples
--------
>>> import ibis
>>> table = ibis.table([('string_col', 'string')])
>>> expr = table.string_col.translate('a', 'b')
>>> expr = table.string_col.translate('a', 'bc')
Returns
-------
translated : string
"""
return ops.Translate(self, from_str, to_str).to_expr()
def _string_find(self, substr, start=None, end=None):
"""
Returns position (0 indexed) of first occurence of substring,
optionally after a particular position (0 indexed)
Parameters
----------
substr : string
start : int, default None
end : int, default None
Not currently implemented
Returns
-------
position : int, 0 indexed
"""
if end is not None:
raise NotImplementedError
return ops.StringFind(self, substr, start, end).to_expr()
def _lpad(self, length, pad=' '):
"""
Returns string of given length by truncating (on right)
or padding (on left) original string
Parameters
----------
length : int
pad : string, default is ' '
Examples
--------
>>> import ibis
>>> table = ibis.table([('strings', 'string')])
>>> expr = table.strings.lpad(5, '-')
>>> expr = ibis.literal('a').lpad(5, '-') # 'a' becomes '----a'
>>> expr = ibis.literal('abcdefg').lpad(5, '-') # 'abcdefg' becomes 'abcde' # noqa: E501
Returns
-------
padded : string
"""
return ops.LPad(self, length, pad).to_expr()
def _rpad(self, length, pad=' '):
"""
Returns string of given length by truncating (on right)
or padding (on right) original string
Parameters
----------
length : int
pad : string, default is ' '
Examples
--------
>>> import ibis
>>> table = ibis.table([('string_col', 'string')])
>>> expr = table.string_col.rpad(5, '-')
>>> expr = ibis.literal('a').rpad(5, '-') # 'a' becomes 'a----'
>>> expr = ibis.literal('abcdefg').rpad(5, '-') # 'abcdefg' becomes 'abcde' # noqa: E501
Returns
-------
padded : string
"""
return ops.RPad(self, length, pad).to_expr()
def _find_in_set(self, str_list):
"""
Returns postion (0 indexed) of first occurence of argument within
a list of strings. No string in list can have a comma
Returns -1 if search string isn't found or if search string contains ','
Parameters
----------
str_list : list of strings
Examples
--------
>>> import ibis
>>> table = ibis.table([('strings', 'string')])
>>> result = table.strings.find_in_set(['a', 'b'])
Returns
-------
position : int
"""
return ops.FindInSet(self, str_list).to_expr()
def _string_join(self, strings):
"""
Joins a list of strings together using the calling string as a separator
Parameters
----------
strings : list of strings
Examples
--------
>>> import ibis
>>> sep = ibis.literal(',')
>>> result = sep.join(['a', 'b', 'c'])
Returns
-------
joined : string
"""
return ops.StringJoin(self, strings).to_expr()
def _startswith(self, start):
"""
Determine if `self` string starts with `start` string.
Parameters
----------
start: string
Examples
--------
>>> import ibis
>>> text = ibis.literal('Ibis project)
>>> text.startswith('Ibis')
StartsWith[boolean]
Literal[string]
Ibis project
start:
Literal[string]
Ibis
Returns
-------
result : boolean
"""
return ops.StartsWith(self, start).to_expr()
def _endswith(self, end):
"""
Determine if `self` string ends with `end` string.
Parameters
----------
end: string
Examples
--------
>>> import ibis
>>> text = ibis.literal('Ibis project)
>>> text.endswith('project')
EndsWith[boolean]
Literal[string]
Ibis project
end:
Literal[string]
project
Returns
-------
result : boolean
"""
return ops.EndsWith(self, end).to_expr()
def _string_like(self, patterns):
"""
Wildcard fuzzy matching function equivalent to the SQL LIKE directive. Use
% as a multiple-character wildcard or _ (underscore) as a single-character
wildcard.
Use re_search or rlike for regex-based matching.
Parameters
----------
pattern : str or List[str]
A pattern or list of patterns to match. If `pattern` is a list, then if
**any** pattern matches the input then the corresponding row in the
output is ``True``.
Returns
-------
matched : ir.BooleanColumn
"""
return functools.reduce(
operator.or_,
(
ops.StringSQLLike(self, pattern).to_expr()
for pattern in util.promote_list(patterns)
),
)
def _string_ilike(self, patterns):
"""
Wildcard fuzzy matching function equivalent to the SQL LIKE directive. Use
% as a multiple-character wildcard or _ (underscore) as a single-character
wildcard.
Use re_search or rlike for regex-based matching.
Parameters
----------
pattern : str or List[str]
A pattern or list of patterns to match. If `pattern` is a list, then if
**any** pattern matches the input then the corresponding row in the
output is ``True``.
Returns
-------
matched : ir.BooleanColumn
"""
return functools.reduce(
operator.or_,
(
ops.StringSQLILike(self, pattern).to_expr()
for pattern in util.promote_list(patterns)
),
)
def re_search(arg, pattern):
"""
Search string values using a regular expression. Returns True if the regex
matches a string and False otherwise.
Parameters
----------
pattern : string (regular expression string)
Returns
-------
searched : boolean value
"""
return ops.RegexSearch(arg, pattern).to_expr()
def regex_extract(arg, pattern, index):
"""
Returns specified index, 0 indexed, from string based on regex pattern
given
Parameters
----------
pattern : string (regular expression string)
index : int, 0 indexed
Returns
-------
extracted : string
"""
return ops.RegexExtract(arg, pattern, index).to_expr()
def regex_replace(arg, pattern, replacement):
"""
Replaces match found by regex with replacement string.
Replacement string can also be a regex
Parameters
----------
pattern : string (regular expression string)
replacement : string (can be regular expression string)
Examples
--------
>>> import ibis
>>> table = ibis.table([('strings', 'string')])
>>> result = table.strings.replace('(b+)', r'<\1>') # 'aaabbbaa' becomes 'aaa<bbb>aaa' # noqa: E501
Returns
-------
modified : string
"""
return ops.RegexReplace(arg, pattern, replacement).to_expr()
def _string_replace(arg, pattern, replacement):
"""
Replaces each exactly occurrence of pattern with given replacement
string. Like Python built-in str.replace
Parameters
----------
pattern : string
replacement : string
Examples
--------
>>> import ibis
>>> table = ibis.table([('strings', 'string')])
>>> result = table.strings.replace('aaa', 'foo') # 'aaabbbaaa' becomes 'foobbbfoo' # noqa: E501
Returns
-------
replaced : string
"""
return ops.StringReplace(arg, pattern, replacement).to_expr()
def to_timestamp(arg, format_str, timezone=None):
"""
Parses a string and returns a timestamp.
Parameters
----------
format_str : A format string potentially of the type '%Y-%m-%d'
timezone : An optional string indicating the timezone,
i.e. 'America/New_York'
Examples
--------
>>> import ibis
>>> date_as_str = ibis.literal('20170206')
>>> result = date_as_str.to_timestamp('%Y%m%d')
Returns
-------
parsed : TimestampValue
"""
return ops.StringToTimestamp(arg, format_str, timezone).to_expr()
def parse_url(arg, extract, key=None):
"""
Returns the portion of a URL corresponding to a part specified
by 'extract'
Can optionally specify a key to retrieve an associated value
if extract parameter is 'QUERY'
Parameters
----------
extract : str
One of {'PROTOCOL', 'HOST', 'PATH', 'REF', 'AUTHORITY', 'FILE',
'USERINFO', 'QUERY'}
key : string (optional)
Examples
--------
>>> url = "https://www.youtube.com/watch?v=kEuEcWfewf8&t=10"
>>> parse_url(url, 'QUERY', 'v') # doctest: +SKIP
'kEuEcWfewf8'
Returns
-------
extracted : string
"""
return ops.ParseURL(arg, extract, key).to_expr()
def _string_contains(arg, substr):
"""
Determine if indicated string is exactly contained in the calling string.
Parameters
----------
substr : str or ibis.expr.types.StringValue
Returns
-------
contains : ibis.expr.types.BooleanValue
"""
return arg.find(substr) >= 0
def _string_split(arg, delimiter):
"""Split `arg` on `delimiter`.
Parameters
----------
arg : str or ibis.expr.types.StringValue
delimiter : str or ibis.expr.types.StringValue
Returns
-------
splitsville : Array[String]
"""
return ops.StringSplit(arg, delimiter).to_expr()
def _string_concat(*args):
return ops.StringConcat(args).to_expr()
def _string_dunder_contains(arg, substr):
raise TypeError('Use val.contains(arg)')
def _string_getitem(self, key):
if isinstance(key, slice):
start, stop, step = key.start, key.stop, key.step
if step is not None and not isinstance(step, ir.Expr) and step != 1:
raise ValueError('Step can only be 1')
if not isinstance(start, ir.Expr):
if start is not None and start < 0:
raise ValueError(
'Negative slicing not yet supported, got start value of '
'{:d}'.format(start)
)
if start is None:
start = 0
if not isinstance(stop, ir.Expr):
if stop is not None and stop < 0:
raise ValueError(
'Negative slicing not yet supported, got stop value of '
'{:d}'.format(stop)
)
if stop is None:
stop = self.length()
return self.substr(start, stop - start)
elif isinstance(key, int):
return self.substr(key, 1)
raise NotImplementedError(f'string __getitem__[{type(key).__name__}]')
_string_value_methods = {
'__getitem__': _string_getitem,
'length': _unary_op('length', ops.StringLength),
'lower': _unary_op('lower', ops.Lowercase),
'upper': _unary_op('upper', ops.Uppercase),
'reverse': _unary_op('reverse', ops.Reverse),
'ascii_str': _unary_op('ascii', ops.StringAscii),
'strip': _unary_op('strip', ops.Strip),
'lstrip': _unary_op('lstrip', ops.LStrip),
'rstrip': _unary_op('rstrip', ops.RStrip),
'capitalize': _unary_op('initcap', ops.Capitalize),
'convert_base': convert_base,
'__contains__': _string_dunder_contains,
'contains': _string_contains,
'hashbytes': hashbytes,
'like': _string_like,
'ilike': _string_ilike,
'rlike': re_search,
'replace': _string_replace,
're_search': re_search,
're_extract': regex_extract,
're_replace': regex_replace,
'to_timestamp': to_timestamp,
'parse_url': parse_url,
'substr': _string_substr,
'left': _string_left,
'right': _string_right,
'repeat': repeat,
'find': _string_find,
'translate': _translate,
'find_in_set': _find_in_set,
'split': _string_split,
'join': _string_join,
'startswith': _startswith,
'endswith': _endswith,
'lpad': _lpad,
'rpad': _rpad,
'__add__': _string_concat,
'__radd__': lambda *args: _string_concat(*args[::-1]),
'__mul__': mul,
'__rmul__': mul,
}
_add_methods(ir.StringValue, _string_value_methods)
# ---------------------------------------------------------------------
# Array API
def _array_slice(array, index):
"""Slice or index `array` at `index`.
Parameters
----------
index : int or ibis.expr.types.IntegerValue or slice
Returns
-------
sliced_array : ibis.expr.types.ValueExpr
If `index` is an ``int`` or :class:`~ibis.expr.types.IntegerValue` then
the return type is the element type of `array`. If `index` is a
``slice`` then the return type is the same type as the input.
"""
if isinstance(index, slice):
start = index.start
stop = index.stop
step = index.step
if step is not None and step != 1:
raise NotImplementedError('step can only be 1')
op = ops.ArraySlice(array, start if start is not None else 0, stop)
else:
op = ops.ArrayIndex(array, index)
return op.to_expr()
_array_column_methods = {
'length': _unary_op('length', ops.ArrayLength),
'__getitem__': _array_slice,
'__add__': _binop_expr('__add__', ops.ArrayConcat),
'__radd__': toolz.flip(_binop_expr('__radd__', ops.ArrayConcat)),
'__mul__': _binop_expr('__mul__', ops.ArrayRepeat),
'__rmul__': _binop_expr('__rmul__', ops.ArrayRepeat),
}
_add_methods(ir.ArrayValue, _array_column_methods)
# ---------------------------------------------------------------------
# Map API
def get(expr, key, default=None):
"""
Return the mapped value for this key, or the default
if the key does not exist
Parameters
----------
key : any
default : any
"""
return ops.MapValueOrDefaultForKey(expr, key, default).to_expr()
_map_column_methods = {
'get': get,
'length': _unary_op('length', ops.MapLength),
'__getitem__': _binop_expr('__getitem__', ops.MapValueForKey),
'keys': _unary_op('keys', ops.MapKeys),
'values': _unary_op('values', ops.MapValues),
'__add__': _binop_expr('__add__', ops.MapConcat),
'__radd__': toolz.flip(_binop_expr('__radd__', ops.MapConcat)),
}
_add_methods(ir.MapValue, _map_column_methods)
# ---------------------------------------------------------------------
# Struct API
def _struct_get_field(expr: StructValue, field_name: str) -> ValueExpr:
"""Get the `field_name` field from the ``StructValue`` expression `expr`.
Parameters
----------
field_name : str
The name of the field to access from the ``Struct`` typed expression
`expr`. Must be a Python ``str`` type; programmatic struct field
access is not yet supported.
Examples
--------
>>> import ibis
>>> from collections import OrderedDict
>>> struct_expr = ibis.literal(
... OrderedDict([("fruit", "pear"), ("weight", 0)])
... )
>>> struct_expr['fruit'] # doctest: +NORMALIZE_WHITESPACE
fruit = StructField[string]
Literal[struct<fruit: string, weight: int8>]
OrderedDict([('fruit', 'pear'), ('weight', 0)])
field:
fruit
Returns
-------
value_expr : ibis.expr.types.ValueExpr
An expression with the type of the field being accessed.
"""
return ops.StructField(expr, field_name).to_expr().name(field_name)
def _destructure(expr: StructValue) -> DestructValue:
"""Destructure a ``StructValue`` into a corresponding ``DestructValue``.
Each subclass of ``StructValue`` will be destructed accordingly. For
example, a ``StructColumn`` will be destructed into a ``DestructColumn``.
When assigned, a destruct column will destructured and assigned to multiple
columns.
Parameters
----------
expr : StructColumn
The struct column to destructure.
Returns
-------
destruct_expr: ibis.expr.types.DestructValue
A destruct value expression.
"""
# Set name to empty string here so that we can detect and error when
# user set name for a destruct column.
if isinstance(expr, StructScalar):
return DestructScalar(expr._arg, expr._dtype).name("")
elif isinstance(expr, StructColumn):
return DestructColumn(expr._arg, expr._dtype).name("")
elif isinstance(expr, StructValue):
return DestructValue(expr._arg, expr._dtype).name("")
else:
raise AssertionError()
_struct_value_methods = {
'destructure': _destructure,
'__getitem__': _struct_get_field,
}
_add_methods(ir.StructValue, _struct_value_methods)
# ---------------------------------------------------------------------
# Timestamp API
def _timestamp_truncate(arg, unit):
"""
Zero out smaller-size units beyond indicated unit. Commonly used for time
series resampling.
Parameters
----------
unit : string, one of below table
'Y': year
'Q': quarter
'M': month
'W': week
'D': day
'h': hour
'm': minute
's': second
'ms': millisecond
'us': microsecond
'ns': nanosecond
Returns
-------
truncated : timestamp
"""
return ops.TimestampTruncate(arg, unit).to_expr()
def _timestamp_strftime(arg, format_str):
"""
Format timestamp according to the passed format string. Format string may
depend on backend, but we try to conform to ANSI strftime (e.g. Python
built-in datetime.strftime)
Parameters
----------
format_str : string
Returns
-------
formatted : string
"""
return ops.Strftime(arg, format_str).to_expr()
def _timestamp_time(arg):
"""Return a Time node for a Timestamp.
We can perform certain operations on this node w/o actually instantiating
the underlying structure (which is inefficient in pandas/numpy)
Returns
-------
TimeValue
"""
return ops.Time(arg).to_expr()
def _timestamp_date(arg):
"""Return a Date for a Timestamp.
Returns
-------
DateValue
"""
return ops.Date(arg).to_expr()
def _timestamp_sub(left, right):
right = rlz.any(right)
if isinstance(right, ir.TimestampValue):
op = ops.TimestampDiff(left, right)
else:
op = ops.TimestampSub(left, right) # let the operation validate
return op.to_expr()
_timestamp_add = _binop_expr('__add__', ops.TimestampAdd)
_timestamp_radd = _binop_expr('__radd__', ops.TimestampAdd)
_day_of_week = property(
lambda self: ops.DayOfWeekNode(self).to_expr(),
doc="""\
Namespace expression containing methods for extracting information about the
day of the week of a TimestampValue or DateValue expression.
Returns
-------
DayOfWeek
An namespace expression containing methods to use to extract information.
""",
)
_timestamp_value_methods = {
'strftime': _timestamp_strftime,
'year': _extract_field('year', ops.ExtractYear),
'month': _extract_field('month', ops.ExtractMonth),
'day': _extract_field('day', ops.ExtractDay),
'day_of_week': _day_of_week,
'day_of_year': _extract_field('day_of_year', ops.ExtractDayOfYear),
'quarter': _extract_field('quarter', ops.ExtractQuarter),
'epoch_seconds': _extract_field('epoch', ops.ExtractEpochSeconds),
'week_of_year': _extract_field('week_of_year', ops.ExtractWeekOfYear),
'hour': _extract_field('hour', ops.ExtractHour),
'minute': _extract_field('minute', ops.ExtractMinute),
'second': _extract_field('second', ops.ExtractSecond),
'millisecond': _extract_field('millisecond', ops.ExtractMillisecond),
'truncate': _timestamp_truncate,
'time': _timestamp_time,
'date': _timestamp_date,
'__sub__': _timestamp_sub,
'sub': _timestamp_sub,
'__add__': _timestamp_add,
'add': _timestamp_add,
'__radd__': _timestamp_radd,
'radd': _timestamp_radd,
'__rsub__': _timestamp_sub,
'rsub': _timestamp_sub,
}
_add_methods(ir.TimestampValue, _timestamp_value_methods)
# ---------------------------------------------------------------------
# Date API
def _date_truncate(arg, unit):
"""
Zero out smaller-size units beyond indicated unit. Commonly used for time
series resampling.
Parameters
----------
unit : string, one of below table
'Y': year
'Q': quarter
'M': month
'W': week
'D': day
Returns
-------
truncated : date
"""
return ops.DateTruncate(arg, unit).to_expr()
def _date_sub(left, right):
right = rlz.one_of([rlz.date, rlz.interval], right)
if isinstance(right, ir.DateValue):
op = ops.DateDiff(left, right)
else:
op = ops.DateSub(left, right) # let the operation validate
return op.to_expr()
_date_add = _binop_expr('__add__', ops.DateAdd)
_date_value_methods = {
'strftime': _timestamp_strftime,
'year': _extract_field('year', ops.ExtractYear),
'month': _extract_field('month', ops.ExtractMonth),
'day': _extract_field('day', ops.ExtractDay),
'day_of_week': _day_of_week,
'day_of_year': _extract_field('day_of_year', ops.ExtractDayOfYear),
'quarter': _extract_field('quarter', ops.ExtractQuarter),
'epoch_seconds': _extract_field('epoch', ops.ExtractEpochSeconds),
'week_of_year': _extract_field('week_of_year', ops.ExtractWeekOfYear),
'truncate': _date_truncate,
'__sub__': _date_sub,
'sub': _date_sub,
'__rsub__': _date_sub,
'rsub': _date_sub,
'__add__': _date_add,
'add': _date_add,
'__radd__': _date_add,
'radd': _date_add,
}
_add_methods(ir.DateValue, _date_value_methods)
def _to_unit(arg, target_unit):
if arg._dtype.unit != target_unit:
arg = util.convert_unit(arg, arg._dtype.unit, target_unit)
arg.type().unit = target_unit
return arg
def _interval_property(target_unit, name):
return property(
functools.partial(_to_unit, target_unit=target_unit),
doc="""Extract the number of {0}s from an IntervalValue expression.
Returns
-------
IntegerValue
The number of {0}s in the expression
""".format(
name
),
)
_interval_add = _binop_expr('__add__', ops.IntervalAdd)
_interval_radd = _binop_expr('__radd__', ops.IntervalAdd)
_interval_sub = _binop_expr('__sub__', ops.IntervalSubtract)
_interval_mul = _binop_expr('__mul__', ops.IntervalMultiply)
_interval_rmul = _binop_expr('__rmul__', ops.IntervalMultiply)
_interval_floordiv = _binop_expr('__floordiv__', ops.IntervalFloorDivide)
_interval_value_methods = {
'to_unit': _to_unit,
'years': _interval_property('Y', 'year'),
'quarters': _interval_property('Q', 'quarter'),
'months': _interval_property('M', 'month'),
'weeks': _interval_property('W', 'week'),
'days': _interval_property('D', 'day'),
'hours': _interval_property('h', 'hour'),
'minutes': _interval_property('m', 'minute'),
'seconds': _interval_property('s', 'second'),
'milliseconds': _interval_property('ms', 'millisecond'),
'microseconds': _interval_property('us', 'microsecond'),
'nanoseconds': _interval_property('ns', 'nanosecond'),
'__add__': _interval_add,
'add': _interval_add,
'__sub__': _interval_sub,
'sub': _interval_sub,
'__radd__': _interval_radd,
'radd': _interval_radd,
'__mul__': _interval_mul,
'mul': _interval_mul,
'__rmul__': _interval_rmul,
'rmul': _interval_rmul,
'__floordiv__': _interval_floordiv,
'floordiv': _interval_floordiv,
'__neg__': negate,
'negate': negate,
}
_add_methods(ir.IntervalValue, _interval_value_methods)
# ---------------------------------------------------------------------
# Time API
def between_time(arg, lower, upper, timezone=None):
"""Check if the input expr falls between the lower/upper bounds passed.
Bounds are inclusive. All arguments must be comparable.
Parameters
----------
lower : str, datetime.time
upper : str, datetime.time
timezone : str, timezone, default None
Returns
-------
BooleanValue
"""
op = arg.op()
if isinstance(op, ops.Time):
# Here we pull out the first argument to the underlying Time operation
# which is by definition (in _timestamp_value_methods) a
# TimestampValue. We do this so that we can potentially specialize the
# "between time" operation for timestamp_value_expr.time().between().
# A similar mechanism is triggered when creating expressions like
# t.column.distinct().count(), which is turned into t.column.nunique().
arg = op.arg
if timezone is not None:
arg = arg.cast(dt.Timestamp(timezone=timezone))
op = ops.BetweenTime(arg, lower, upper)
else:
op = ops.Between(arg, lower, upper)
return op.to_expr()
def _time_truncate(arg, unit):
"""
Zero out smaller-size units beyond indicated unit. Commonly used for time
series resampling.
Parameters
----------
unit : string, one of below table
'h': hour
'm': minute
's': second
'ms': millisecond
'us': microsecond
'ns': nanosecond
Returns
-------
truncated : time
"""
return ops.TimeTruncate(arg, unit).to_expr()
def _time_sub(left, right):
right = rlz.any(right)
if isinstance(right, ir.TimeValue):
op = ops.TimeDiff(left, right)
else:
op = ops.TimeSub(left, right) # let the operation validate
return op.to_expr()
_time_add = _binop_expr('__add__', ops.TimeAdd)
_time_value_methods = {
'between': between_time,
'truncate': _time_truncate,
'hour': _extract_field('hour', ops.ExtractHour),
'minute': _extract_field('minute', ops.ExtractMinute),
'second': _extract_field('second', ops.ExtractSecond),
'millisecond': _extract_field('millisecond', ops.ExtractMillisecond),
'__sub__': _time_sub,
'sub': _time_sub,
'__rsub__': _time_sub,
'rsub': _time_sub,
'__add__': _time_add,
'add': _time_add,
'__radd__': _time_add,
'radd': _time_add,
}
_add_methods(ir.TimeValue, _time_value_methods)
# ---------------------------------------------------------------------
# Decimal API
_decimal_value_methods = {
'precision': _unary_op('precision', ops.DecimalPrecision),
'scale': _unary_op('scale', ops.DecimalScale),
}
_add_methods(ir.DecimalValue, _decimal_value_methods)
# ----------------------------------------------------------------------
# Category API
_category_value_methods = {'label': category_label}
_add_methods(ir.CategoryValue, _category_value_methods)
# ---------------------------------------------------------------------
# Table API
_join_classes = {
'inner': ops.InnerJoin,
'left': ops.LeftJoin,
'any_inner': ops.AnyInnerJoin,
'any_left': ops.AnyLeftJoin,
'outer': ops.OuterJoin,
'right': ops.RightJoin,
'left_semi': ops.LeftSemiJoin,
'semi': ops.LeftSemiJoin,
'anti': ops.LeftAntiJoin,
'cross': ops.CrossJoin,
}
def join(
left: ir.TableExpr,
right: ir.TableExpr,
predicates=(),
how: str = "inner",
*,
suffixes: tuple[str, str] = ("_x", "_y"),
):
"""Join two tables.
Parameters
----------
left
Left table to join
right
Right table to join
predicates
Boolean or column names to join on
how
Join method
suffixes
Left and right suffixes that will be used to rename overlapping
columns.
"""
klass = _join_classes[how.lower()]
if isinstance(predicates, Expr):
predicates = _L.flatten_predicate(predicates)
expr = klass(left, right, predicates).to_expr()
# semi/anti join only give access to the left table's fields, so
# there's never overlap
if how in ("semi", "anti"):
return expr
return ops.relations._dedup_join_columns(
expr,
left=left,
right=right,
suffixes=suffixes,
)
def asof_join(
left,
right,
predicates=(),
by=(),
tolerance=None,
*,
suffixes: tuple[str, str] = ("_x", "_y"),
):
"""Perform an asof join between two tables. Similar to a left join
except that the match is done on nearest key rather than equal keys.
Optionally, match keys with 'by' before joining with predicates.
Parameters
----------
left : TableExpr
right : TableExpr
predicates : join expression(s)
by : string
column to group by before joining
tolerance : interval
Amount of time to look behind when joining
suffixes
Left and right suffixes that will be used to rename overlapping
columns.
"""
expr = ops.AsOfJoin(left, right, predicates, by, tolerance).to_expr()
return ops.relations._dedup_join_columns(
expr,
left=left,
right=right,
suffixes=suffixes,
)
def cross_join(
left,
right,
*rest,
suffixes: tuple[str, str] = ("_x", "_y"),
):
"""
Perform a cross join (cartesian product) amongst a list of tables, with
optional set of prefixes to apply to overlapping column names
Parameters
----------
left
Left table
right
Right table
rest
Additional tables to cross join
suffixes
Left and right suffixes that will be used to rename overlapping
columns.
Returns
-------
joined : TableExpr
Examples
--------
>>> import ibis
>>> schemas = [(name, 'int64') for name in 'abcde']
>>> a, b, c, d, e = [
... ibis.table([(name, type)], name=name) for name, type in schemas
... ]
>>> joined1 = ibis.cross_join(a, b, c, d, e)
>>> joined1 # doctest: +NORMALIZE_WHITESPACE
ref_0
UnboundTable[table]
name: a
schema:
a : int64
ref_1
UnboundTable[table]
name: b
schema:
b : int64
ref_2
UnboundTable[table]
name: c
schema:
c : int64
ref_3
UnboundTable[table]
name: d
schema:
d : int64
ref_4
UnboundTable[table]
name: e
schema:
e : int64
CrossJoin[table]
left:
Table: ref_0
right:
CrossJoin[table]
left:
CrossJoin[table]
left:
CrossJoin[table]
left:
Table: ref_1
right:
Table: ref_2
right:
Table: ref_3
right:
Table: ref_4
"""
expr = ops.CrossJoin(
left,
functools.reduce(ir.TableExpr.cross_join, rest, right),
[],
).to_expr()
return ops.relations._dedup_join_columns(
expr,
left=left,
right=right,
suffixes=suffixes,
)
def _table_count(self):
"""
Returns the computed number of rows in the table expression
Returns
-------
count : Int64Scalar
"""
return ops.Count(self, None).to_expr().name('count')
def _table_dropna(self, subset: list[str] | None = None, how: str = 'any'):
"""
Remove rows with null values from the table.
Parameters
----------
subset : list of strings
Optional, columns names to consider. Defaults to all columns.
how : string
Determine whether a row is removed if there is at least one null
value in the row ('any'), or if all row values are null ('all').
Options are 'any' or 'all'. Default is 'any'.
Examples
--------
>>> import ibis
>>> t = ibis.table([('a', 'int64'), ('b', 'string')])
>>> t = t.dropna() # Drop all rows where any values are null
>>> t = t.dropna(how='all') # Only drop rows where all values are null
>>> t = t.dropna(subset=['a'], how='all') # Only drop rows where all values in column 'a' are null # noqa: E501
Returns
-------
table : TableExpr
New table expression
"""
if subset is None:
subset = []
subset = util.promote_list(subset)
return ops.DropNa(self, how, subset).to_expr()
def _table_fillna(self, replacements):
"""
Fill null values in the table.
Parameters
----------
replacements : scalar or dict
Value with which to fill the nulls. If passed as a dict, the
keys are column name strings that map to their replacement value.
If passed as a scalar, all columns are filled with that value.
Notes
-----
There is potential lack of type stability with the fillna API. For
example, different library versions may impact whether or not a given
backend type-promotes integer replacement values to floats.
Examples
--------
>>> import ibis
>>> t = ibis.table([('a', 'int64'), ('b', 'string')])
>>> t = t.fillna(0.0) # Replace nulls in all columns with 0.0
>>> t.fillna({c: 0.0 for c, t in t.schema().items() if t == dt.float64}) # Replace all na values in all columns of a given type with the same value # noqa: E501
Returns
-------
table : TableExpr
New table expression
"""
if isinstance(replacements, collections.abc.Mapping):
columns = replacements.keys()
table_columns = self.schema().names
invalid = set(columns) - set(table_columns)
if invalid:
raise com.IbisTypeError(
f'value {list(invalid)} is not a field in {table_columns}.'
)
return ops.FillNa(self, replacements).to_expr()
def _table_info(self, buf=None):
"""
Similar to pandas DataFrame.info. Show column names, types, and null
counts. Output to stdout by default
"""
metrics = [self.count().name('nrows')]
for col in self.columns:
metrics.append(self[col].count().name(col))
metrics = self.aggregate(metrics).execute().loc[0]
names = ['Column', '------'] + self.columns
types = ['Type', '----'] + [repr(x) for x in self.schema().types]
counts = ['Non-null #', '----------'] + [str(x) for x in metrics[1:]]
col_metrics = util.adjoin(2, names, types, counts)
result = f'Table rows: {metrics[0]}\n\n{col_metrics}'
print(result, file=buf)
def _table_set_column(table, name, expr):
"""
Replace an existing column with a new expression
Parameters
----------
name : string
Column name to replace
expr : value expression
New data for column
Returns
-------
set_table : TableExpr
New table expression
"""
expr = table._ensure_expr(expr)
if expr._name != name:
expr = expr.name(name)
if name not in table:
raise KeyError(f'{name} is not in the table')
# TODO: This assumes that projection is required; may be backend-dependent
proj_exprs = []
for key in table.columns:
if key == name:
proj_exprs.append(expr)
else:
proj_exprs.append(table[key])
return table.projection(proj_exprs)
def _regular_join_method(name, how, doc=None):
def f(self, other, predicates=(), rename_left=None, rename_right=None):
return self.join(other, predicates, how=how)
if doc:
f.__doc__ = doc
else:
# XXX
f.__doc__ = join.__doc__
f.__name__ = name
return f
def filter(table, predicates):
"""
Select rows from table based on boolean expressions
Parameters
----------
predicates : boolean array expressions, or list thereof
Returns
-------
filtered_expr : TableExpr
"""
resolved_predicates = _resolve_predicates(table, predicates)
return _L.apply_filter(table, resolved_predicates)
def _resolve_predicates(table, predicates):
if isinstance(predicates, Expr):
predicates = _L.flatten_predicate(predicates)
predicates = util.promote_list(predicates)
predicates = [ir.bind_expr(table, x) for x in predicates]
resolved_predicates = []
for pred in predicates:
if isinstance(pred, ir.AnalyticExpr):
pred = pred.to_filter()
resolved_predicates.append(pred)
return resolved_predicates
def aggregate(table, metrics=None, by=None, having=None, **kwargs):
"""
Aggregate a table with a given set of reductions, with grouping
expressions, and post-aggregation filters.
Parameters
----------
table : table expression
metrics : expression or expression list
by : optional, default None
Grouping expressions
having : optional, default None
Post-aggregation filters
Returns
-------
agg_expr : TableExpr
"""
metrics = [] if metrics is None else util.promote_list(metrics)
metrics.extend(
table._ensure_expr(expr).name(name)
for name, expr in sorted(kwargs.items(), key=operator.itemgetter(0))
)
op = table.op().aggregate(
table,
metrics,
by=util.promote_list(by if by is not None else []),
having=util.promote_list(having if having is not None else []),
)
return op.to_expr()
def _table_distinct(self):
"""
Compute set of unique rows/tuples occurring in this table
"""
op = ops.Distinct(self)
return op.to_expr()
def _table_limit(table, n, offset=0):
"""
Select the first n rows at beginning of table (may not be deterministic
depending on implementation and presence of a sorting).
Parameters
----------
n : int
Number of rows to include
offset : int, default 0
Number of rows to skip first
Returns
-------
limited : TableExpr
"""
op = ops.Limit(table, n, offset=offset)
return op.to_expr()
def _head(table, n=5):
"""
Select the first n rows at beginning of a table (may not be deterministic
depending on implementation and presence of a sorting).
Parameters
----------
n : int
Number of rows to include, defaults to 5
Returns
-------
limited : TableExpr
See Also
--------
ibis.expr.types.TableExpr.limit
"""
return _table_limit(table, n=n)
def _table_sort_by(table, sort_exprs):
"""
Sort table by the indicated column expressions and sort orders
(ascending/descending)
Parameters
----------
sort_exprs : sorting expressions
Must be one of:
- Column name or expression
- Sort key, e.g. desc(col)
- (column name, True (ascending) / False (descending))
Examples
--------
>>> import ibis
>>> t = ibis.table([('a', 'int64'), ('b', 'string')])
>>> ab_sorted = t.sort_by([('a', True), ('b', False)])
Returns
-------
TableExpr
"""
result = table.op().sort_by(
table,
util.promote_list(sort_exprs if sort_exprs is not None else []),
)
return result.to_expr()
def _table_union(left, right, distinct=False):
"""
Form the table set union of two table expressions having identical
schemas.
Parameters
----------
left : TableExpr
right : TableExpr
distinct : boolean, default False
Only union distinct rows not occurring in the calling table (this
can be very expensive, be careful)
Returns
-------
union : TableExpr
"""
return ops.Union(left, right, distinct=distinct).to_expr()
def _table_intersect(left: TableExpr, right: TableExpr):
"""
Form the table set intersect of two table expressions having identical
schemas. An intersect returns only the common rows between the two tables.
Parameters
----------
left : TableExpr
right : TableExpr
Returns
-------
intersection : TableExpr
"""
return ops.Intersection(left, right).to_expr()
def _table_difference(left: TableExpr, right: TableExpr):
"""
Form the table set difference of two table expressions having identical
schemas. A set difference returns only the rows present in the left table
that are not present in the right table
Parameters
----------
left : TableExpr
right : TableExpr
Returns
-------
difference : TableExpr
"""
return ops.Difference(left, right).to_expr()
def _table_to_array(self):
"""View a single column table as an array."""
schema = self.schema()
if len(schema) != 1:
raise com.ExpressionError(
'Table must have exactly one column when viewed as array'
)
return ops.TableArrayView(self).to_expr()
def _safe_get_name(expr):
try:
return expr.get_name()
except com.ExpressionError:
return None
def mutate(
table: ir.TableExpr, exprs: list[ir.Expr] = None, **mutations: Any
) -> ir.TableExpr:
"""
Convenience function for table projections involving adding columns
Parameters
----------
exprs : list, default None
List of named expressions to add as columns
mutations : keywords for new columns
Returns
-------
mutated : TableExpr
Examples
--------
Using keywords arguments to name the new columns
>>> import ibis
>>> table = ibis.table([('foo', 'double'), ('bar', 'double')], name='t')
>>> expr = table.mutate(qux=table.foo + table.bar, baz=5)
>>> expr # doctest: +NORMALIZE_WHITESPACE
ref_0
UnboundTable[table]
name: t
schema:
foo : float64
bar : float64
<BLANKLINE>
Selection[table]
table:
Table: ref_0
selections:
Table: ref_0
baz = Literal[int8]
5
qux = Add[float64*]
left:
foo = Column[float64*] 'foo' from table
ref_0
right:
bar = Column[float64*] 'bar' from table
ref_0
Using the :meth:`ibis.expr.types.Expr.name` method to name the new columns
>>> new_columns = [ibis.literal(5).name('baz',),
... (table.foo + table.bar).name('qux')]
>>> expr2 = table.mutate(new_columns)
>>> expr.equals(expr2)
True
"""
exprs = [] if exprs is None else util.promote_list(exprs)
for name, expr in sorted(mutations.items(), key=operator.itemgetter(0)):
if util.is_function(expr):
value = expr(table)
else:
value = rlz.any(expr)
exprs.append(value.name(name))
mutation_exprs = _L.get_mutation_exprs(exprs, table)
return table.projection(mutation_exprs)
def projection(table, exprs):
"""
Compute new table expression with the indicated column expressions from
this table.
Parameters
----------
exprs : column expression, or string, or list of column expressions and
strings. If strings passed, must be columns in the table already
Returns
-------
projection : TableExpr
Notes
-----
Passing an aggregate function to this method will broadcast the aggregate's
value over the number of rows in the table. See the examples section for
more details.
Examples
--------
Simple projection
>>> import ibis
>>> fields = [('a', 'int64'), ('b', 'double')]
>>> t = ibis.table(fields, name='t')
>>> proj = t.projection([t.a, (t.b + 1).name('b_plus_1')])
>>> proj # doctest: +NORMALIZE_WHITESPACE
ref_0
UnboundTable[table]
name: t
schema:
a : int64
b : float64
<BLANKLINE>
Selection[table]
table:
Table: ref_0
selections:
a = Column[int64*] 'a' from table
ref_0
b_plus_1 = Add[float64*]
left:
b = Column[float64*] 'b' from table
ref_0
right:
Literal[int8]
1
>>> proj2 = t[t.a, (t.b + 1).name('b_plus_1')]
>>> proj.equals(proj2)
True
Aggregate projection
>>> agg_proj = t[t.a.sum().name('sum_a'), t.b.mean().name('mean_b')]
>>> agg_proj # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
ref_0
UnboundTable[table]
name: t
schema:
a : int64
b : float64
<BLANKLINE>
Selection[table]
table:
Table: ref_0
selections:
sum_a = WindowOp[int64*]
sum_a = Sum[int64]
a = Column[int64*] 'a' from table
ref_0
where:
None
<ibis.expr.window.Window object at 0x...>
mean_b = WindowOp[float64*]
mean_b = Mean[float64]
b = Column[float64*] 'b' from table
ref_0
where:
None
<ibis.expr.window.Window object at 0x...>
Note the ``<ibis.expr.window.Window>`` objects here, their existence means
that the result of the aggregation will be broadcast across the number of
rows in the input column. The purpose of this expression rewrite is to make
it easy to write column/scalar-aggregate operations like
.. code-block:: python
t[(t.a - t.a.mean()).name('demeaned_a')]
"""
import ibis.expr.analysis as L
if isinstance(exprs, (Expr, str)):
exprs = [exprs]
projector = L.Projector(table, exprs)
op = projector.get_result()
return op.to_expr()
def _table_relabel(table, substitutions, replacements=None):
"""
Change table column names, otherwise leaving table unaltered
Parameters
----------
substitutions
Returns
-------
relabeled : TableExpr
"""
if replacements is not None:
raise NotImplementedError
observed = set()
exprs = []
for c in table.columns:
expr = table[c]
if c in substitutions:
expr = expr.name(substitutions[c])
observed.add(c)
exprs.append(expr)
for c in substitutions:
if c not in observed:
raise KeyError(f'{c!r} is not an existing column')
return table.projection(exprs)
def _table_view(self):
"""
Create a new table expression that is semantically equivalent to the
current one, but is considered a distinct relation for evaluation
purposes (e.g. in SQL).
For doing any self-referencing operations, like a self-join, you will
use this operation to create a reference to the current table
expression.
Returns
-------
expr : TableExpr
"""
new_view = ops.SelfReference(self)
return new_view.to_expr()
def _table_drop(self, fields: str | list[str]) -> ir.TableExpr:
"""
Remove one or more fields from a table.
Parameters
----------
fields : The field(s) to be removed from the target table.
Returns
-------
A TableExpr with specified fields removed
"""
if not fields:
# no-op if nothing to be dropped
return self
if isinstance(fields, str):
# We want to drop just one attribute.
fields = [fields]
schema = self.schema()
field_set = frozenset(fields)
missing_fields = field_set.difference(schema)
if missing_fields:
raise KeyError(f'Fields not in table: {missing_fields!s}')
return self[[field for field in schema if field not in field_set]]
def _rowid(self):
"""
An autonumeric representing the row number of the results.
It can be 0 or 1 indexed depending on the backend. Check the backend
documentation.
Note that this is different from the window function row number
(even if they are conceptually the same), and different from row
id in backends where it represents the physical location (e.g. Oracle
or PostgreSQL's ctid).
Returns
-------
ir.IntegerColumn
Examples
--------
>>> my_table[my_table.rowid(), my_table.name].execute()
1|Ibis
2|pandas
3|Dask
"""
return ops.RowID().to_expr()
_table_methods = {
'aggregate': aggregate,
'count': _table_count,
'distinct': _table_distinct,
'drop': _table_drop,
'dropna': _table_dropna,
'fillna': _table_fillna,
'info': _table_info,
'limit': _table_limit,
'head': _head,
'set_column': _table_set_column,
'filter': filter,
'mutate': mutate,
'projection': projection,
'select': projection,
'relabel': _table_relabel,
'join': join,
'cross_join': cross_join,
'inner_join': _regular_join_method('inner_join', 'inner'),
'left_join': _regular_join_method('left_join', 'left'),
'any_inner_join': _regular_join_method('any_inner_join', 'any_inner'),
'any_left_join': _regular_join_method('any_left_join', 'any_left'),
'outer_join': _regular_join_method('outer_join', 'outer'),
'semi_join': _regular_join_method('semi_join', 'semi'),
'anti_join': _regular_join_method('anti_join', 'anti'),
'asof_join': asof_join,
'sort_by': _table_sort_by,
'to_array': _table_to_array,
'union': _table_union,
'intersect': _table_intersect,
'difference': _table_difference,
'view': _table_view,
'rowid': _rowid,
}
_add_methods(ir.TableExpr, _table_methods)
def prevent_rewrite(expr, client=None):
"""Prevent optimization from happening below `expr`.
Parameters
----------
expr : ir.TableExpr
Any table expression whose optimization you want to prevent
client : ibis.backends.base.Client, optional, default None
A client to use to create the SQLQueryResult operation. This is useful
if you're compiling an expression that derives from an
:class:`~ibis.expr.operations.UnboundTable` operation.
Returns
-------
sql_query_result : ir.TableExpr
"""
if client is None:
client = expr._find_backend()
query = client.compile(expr)
return ops.SQLQueryResult(query, expr.schema(), client).to_expr()
| 23.629583
| 166
| 0.60801
|
a10b99d45b416dca8913197cff475dcaa8b8b098
| 4,823
|
py
|
Python
|
lib/spack/spack/test/cmd/load.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-06-25T15:25:29.000Z
|
2020-06-25T15:25:29.000Z
|
lib/spack/spack/test/cmd/load.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2018-07-06T19:11:46.000Z
|
2018-07-06T19:12:28.000Z
|
lib/spack/spack/test/cmd/load.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-03-06T11:04:37.000Z
|
2020-03-06T11:04:37.000Z
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack.main import SpackCommand
import spack.spec
import spack.user_environment as uenv
load = SpackCommand('load')
unload = SpackCommand('unload')
install = SpackCommand('install')
location = SpackCommand('location')
def test_load(install_mockery, mock_fetch, mock_archive, mock_packages):
"""Test that the commands generated by load add the specified prefix
inspections. Also test that Spack records loaded specs by hash in the
user environment.
CMAKE_PREFIX_PATH is the only prefix inspection guaranteed for fake
packages, since it keys on the prefix instead of a subdir."""
install('mpileaks')
mpileaks_spec = spack.spec.Spec('mpileaks').concretized()
sh_out = load('--sh', '--only', 'package', 'mpileaks')
csh_out = load('--csh', '--only', 'package', 'mpileaks')
# Test prefix inspections
sh_out_test = 'export CMAKE_PREFIX_PATH=%s' % mpileaks_spec.prefix
csh_out_test = 'setenv CMAKE_PREFIX_PATH %s' % mpileaks_spec.prefix
assert sh_out_test in sh_out
assert csh_out_test in csh_out
# Test hashes recorded properly
hash_test_replacements = (uenv.spack_loaded_hashes_var,
mpileaks_spec.dag_hash())
sh_hash_test = 'export %s=%s' % hash_test_replacements
csh_hash_test = 'setenv %s %s' % hash_test_replacements
assert sh_hash_test in sh_out
assert csh_hash_test in csh_out
def test_load_recursive(install_mockery, mock_fetch, mock_archive,
mock_packages):
"""Test that the '-r' option to the load command prepends dependency prefix
inspections in post-order"""
install('mpileaks')
mpileaks_spec = spack.spec.Spec('mpileaks').concretized()
sh_out = load('--sh', 'mpileaks')
csh_out = load('--csh', 'mpileaks')
# Test prefix inspections
prefix_test_replacement = ':'.join(reversed(
[s.prefix for s in mpileaks_spec.traverse(order='post')]))
sh_prefix_test = 'export CMAKE_PREFIX_PATH=%s' % prefix_test_replacement
csh_prefix_test = 'setenv CMAKE_PREFIX_PATH %s' % prefix_test_replacement
assert sh_prefix_test in sh_out
assert csh_prefix_test in csh_out
# Test spack records loaded hashes properly
hash_test_replacement = (uenv.spack_loaded_hashes_var, ':'.join(reversed(
[s.dag_hash() for s in mpileaks_spec.traverse(order='post')])))
sh_hash_test = 'export %s=%s' % hash_test_replacement
csh_hash_test = 'setenv %s %s' % hash_test_replacement
assert sh_hash_test in sh_out
assert csh_hash_test in csh_out
def test_load_includes_run_env(install_mockery, mock_fetch, mock_archive,
mock_packages):
"""Tests that environment changes from the package's
`setup_run_environment` method are added to the user environment in
addition to the prefix inspections"""
install('mpileaks')
sh_out = load('--sh', 'mpileaks')
csh_out = load('--csh', 'mpileaks')
assert 'export FOOBAR=mpileaks' in sh_out
assert 'setenv FOOBAR mpileaks' in csh_out
def test_load_fails_no_shell(install_mockery, mock_fetch, mock_archive,
mock_packages):
"""Test that spack load prints an error message without a shell."""
install('mpileaks')
out = load('mpileaks', fail_on_error=False)
assert "To initialize spack's shell commands" in out
def test_unload(install_mockery, mock_fetch, mock_archive, mock_packages,
working_env):
"""Tests that any variables set in the user environment are undone by the
unload command"""
install('mpileaks')
mpileaks_spec = spack.spec.Spec('mpileaks').concretized()
# Set so unload has something to do
os.environ['FOOBAR'] = 'mpileaks'
os.environ[uenv.spack_loaded_hashes_var] = '%s:%s' % (
mpileaks_spec.dag_hash(), 'garbage')
sh_out = unload('--sh', 'mpileaks')
csh_out = unload('--csh', 'mpileaks')
assert 'unset FOOBAR' in sh_out
assert 'unsetenv FOOBAR' in csh_out
assert 'export %s=garbage' % uenv.spack_loaded_hashes_var in sh_out
assert 'setenv %s garbage' % uenv.spack_loaded_hashes_var in csh_out
def test_unload_fails_no_shell(install_mockery, mock_fetch, mock_archive,
mock_packages, working_env):
"""Test that spack unload prints an error message without a shell."""
install('mpileaks')
mpileaks_spec = spack.spec.Spec('mpileaks').concretized()
os.environ[uenv.spack_loaded_hashes_var] = mpileaks_spec.dag_hash()
out = unload('mpileaks', fail_on_error=False)
assert "To initialize spack's shell commands" in out
| 38.277778
| 79
| 0.706199
|
7657f3a5f7164bc2cf86aae708535413926a34c5
| 263,858
|
py
|
Python
|
ac2git.py
|
orao-navico/ac2git
|
c11e9793eec948476c59d1d1a1d4a9d49e7788ac
|
[
"Unlicense"
] | 23
|
2016-07-18T18:23:27.000Z
|
2021-04-25T16:29:15.000Z
|
ac2git.py
|
orao-navico/ac2git
|
c11e9793eec948476c59d1d1a1d4a9d49e7788ac
|
[
"Unlicense"
] | 51
|
2016-08-06T14:52:13.000Z
|
2022-03-20T10:16:45.000Z
|
ac2git.py
|
orao-navico/ac2git
|
c11e9793eec948476c59d1d1a1d4a9d49e7788ac
|
[
"Unlicense"
] | 12
|
2016-08-29T15:26:21.000Z
|
2022-01-28T13:09:21.000Z
|
#!/usr/bin/python3
# ################################################################################################ #
# AccuRev to Git conversion script #
# Author: Lazar Sumar #
# Date: 06/11/2014 #
# #
# This script is intended to convert an entire AccuRev depot into a git repository converting #
# workspaces and streams into branches and respecting merges. #
# ################################################################################################ #
import sys
import argparse
import os
import os.path
import shutil
import subprocess
import logging
import warnings
import xml.etree.ElementTree as ElementTree
from datetime import datetime, timedelta
import time
import re
import types
import copy
import codecs
import json
import pytz
import tempfile
import stat
from collections import OrderedDict
import accurev
import git
logger = None
ignored_transaction_types = [ "archive", "compress", "defcomp", "dispatch", "unarchive" ]
# Taken from this StackOverflow answer: http://stackoverflow.com/a/19238551
# Compulsary quote: https://twitter.com/codinghorror/status/712467615780708352
def utc2local(utc):
epoch = time.mktime(utc.timetuple())
offset = datetime.fromtimestamp (epoch) - datetime.utcfromtimestamp (epoch)
return utc + offset
# This function calls the provided function func, only with arguments that were
# not None.
def CallOnNonNoneArgs(func, *args):
return func(a for a in args if a is not None)
# ################################################################################################ #
# Script Classes #
# ################################################################################################ #
class Config(object):
class AccuRev(object):
@classmethod
def fromxmlelement(cls, xmlElement):
if xmlElement is not None and xmlElement.tag == 'accurev':
depot = xmlElement.attrib.get('depot')
username = xmlElement.attrib.get('username')
password = xmlElement.attrib.get('password')
startTransaction = xmlElement.attrib.get('start-transaction')
endTransaction = xmlElement.attrib.get('end-transaction')
commandCacheFilename = xmlElement.attrib.get('command-cache-filename')
excludeStreamTypes = None
streamMap = None
streamListElement = xmlElement.find('stream-list')
if streamListElement is not None:
excludeStreamTypes = streamListElement.attrib.get("exclude-types")
if excludeStreamTypes is not None:
excludeStreamTypes = [x.strip() for x in excludeStreamTypes.split(',') if len(x.strip()) > 0]
streamMap = OrderedDict()
streamElementList = streamListElement.findall('stream')
for streamElement in streamElementList:
streamName = streamElement.text
branchName = streamElement.attrib.get("branch-name")
if branchName is None:
branchName = streamName
streamMap[streamName] = branchName
return cls(depot, username, password, startTransaction, endTransaction, streamMap, commandCacheFilename, excludeStreamTypes)
else:
return None
def __init__(self, depot = None, username = None, password = None, startTransaction = None, endTransaction = None, streamMap = None, commandCacheFilename = None, excludeStreamTypes = None):
self.depot = depot
self.username = username
self.password = password
self.startTransaction = startTransaction
self.endTransaction = endTransaction
self.streamMap = streamMap
self.commandCacheFilename = commandCacheFilename
self.excludeStreamTypes = excludeStreamTypes
def __repr__(self):
str = "Config.AccuRev(depot=" + repr(self.depot)
str += ", username=" + repr(self.username)
str += ", password=" + repr(self.password)
str += ", startTransaction=" + repr(self.startTransaction)
str += ", endTransaction=" + repr(self.endTransaction)
if self.streamMap is not None:
str += ", streamMap=" + repr(self.streamMap)
if self.commandCacheFilename is not None:
str += ", commandCacheFilename=" + repr(self.commandCacheFilename)
if self.excludeStreamTypes is not None:
str += ", excludeStreamTypes=" + repr(self.excludeStreamTypes)
str += ")"
return str
def UseCommandCache(self):
return self.commandCacheFilename is not None
class Git(object):
@classmethod
def fromxmlelement(cls, xmlElement):
if xmlElement is not None and xmlElement.tag == 'git':
repoPath = xmlElement.attrib.get('repo-path')
messageStyle = xmlElement.attrib.get('message-style')
messageKey = xmlElement.attrib.get('message-key')
authorIsCommitter = xmlElement.attrib.get('author-is-committer')
emptyChildStreamAction = xmlElement.attrib.get('empty-child-stream-action')
sourceStreamFastForward = xmlElement.attrib.get('source-stream-fast-forward')
sourceStreamInferrence = xmlElement.attrib.get('source-stream-inferrence')
newBasisIsFirstParent = xmlElement.attrib.get('new-basis-is-first-parent')
remoteMap = OrderedDict()
remoteElementList = xmlElement.findall('remote')
for remoteElement in remoteElementList:
remoteName = remoteElement.attrib.get("name")
remoteUrl = remoteElement.attrib.get("url")
remotePushUrl = remoteElement.attrib.get("push-url")
remoteMap[remoteName] = git.GitRemoteListItem(name=remoteName, url=remoteUrl, pushUrl=remotePushUrl)
return cls(repoPath=repoPath, messageStyle=messageStyle, messageKey=messageKey, authorIsCommitter=authorIsCommitter, remoteMap=remoteMap, emptyChildStreamAction=emptyChildStreamAction, sourceStreamFastForward=sourceStreamFastForward, sourceStreamInferrence=sourceStreamInferrence, newBasisIsFirstParent=newBasisIsFirstParent)
else:
return None
def __init__(self, repoPath, messageStyle=None, messageKey=None, authorIsCommitter=None, remoteMap=None, emptyChildStreamAction=None, sourceStreamFastForward=None, sourceStreamInferrence=None, newBasisIsFirstParent=None):
self.repoPath = repoPath
self.messageStyle = messageStyle
self.messageKey = messageKey
self.remoteMap = remoteMap
if authorIsCommitter is not None:
authorIsCommitter = authorIsCommitter.lower()
if authorIsCommitter not in [ "true", "false" ]:
raise Exception("The author-is-committer attribute only accepts true or false but was set to '{v}'.".format(v=authorIsCommitter))
authorIsCommitter = (authorIsCommitter == "true")
else:
authroIsCommitter = True
self.authorIsCommitter = authorIsCommitter
if emptyChildStreamAction is not None:
if emptyChildStreamAction not in [ "merge", "cherry-pick" ]:
raise Exception("Error, the empty-child-stream-action attribute only accepts merge or cherry-pick options but got: {0}".format(emptyChildStreamAction))
self.emptyChildStreamAction = emptyChildStreamAction
else:
self.emptyChildStreamAction = "cherry-pick"
if sourceStreamFastForward is not None:
sourceStreamFastForward = sourceStreamFastForward.lower()
if sourceStreamFastForward not in [ "true", "false" ]:
raise Exception("Error, the source-stream-fast-forward attribute only accepts true or false options but got: {0}".format(sourceStreamFastForward))
self.sourceStreamFastForward = (sourceStreamFastForward == "true")
else:
self.sourceStreamFastForward = False
if sourceStreamInferrence is not None:
sourceStreamInferrence = sourceStreamInferrence.lower()
if sourceStreamInferrence not in [ "true", "false" ]:
raise Exception("Error, the source-stream-inferrence attribute only accepts true or false options but got: {0}".format(sourceStreamInferrence))
self.sourceStreamInferrence = (sourceStreamInferrence == "true")
else:
self.sourceStreamInferrence = False
if newBasisIsFirstParent is not None:
newBasisIsFirstParent = newBasisIsFirstParent.lower()
if newBasisIsFirstParent not in [ "true", "false" ]:
raise Exception("Error, the new-basis-is-first-parent attribute only accepts true or false options but got: {0}".format(newBasisIsFirstParent))
self.newBasisIsFirstParent = (newBasisIsFirstParent == "true")
else:
self.newBasisIsFirstParent = True
def __repr__(self):
str = "Config.Git(repoPath=" + repr(self.repoPath)
if self.messageStyle is not None:
str += ", messageStyle=" + repr(self.messageStyle)
if self.messageKey is not None:
str += ", messageKey=" + repr(self.messageKey)
if self.remoteMap is not None:
str += ", remoteMap=" + repr(self.remoteMap)
if self.authorIsCommitter is not None:
str += ", authorIsCommitter=" + repr(self.authorIsCommitter)
if self.newBasisIsFirstParent is not None:
str += ", newBasisIsFirstParent=" + repr(self.newBasisIsFirstParent)
str += ")"
return str
class UserMap(object):
@classmethod
def fromxmlelement(cls, xmlElement):
if xmlElement is not None and xmlElement.tag == 'map-user':
accurevUsername = None
gitName = None
gitEmail = None
timezone = None
accurevElement = xmlElement.find('accurev')
if accurevElement is not None:
accurevUsername = accurevElement.attrib.get('username')
gitElement = xmlElement.find('git')
if gitElement is not None:
gitName = gitElement.attrib.get('name')
gitEmail = gitElement.attrib.get('email')
timezone = gitElement.attrib.get('timezone')
return cls(accurevUsername=accurevUsername, gitName=gitName, gitEmail=gitEmail, timezone=timezone)
else:
return None
def __init__(self, accurevUsername, gitName, gitEmail, timezone=None):
self.accurevUsername = accurevUsername
self.gitName = gitName
self.gitEmail = gitEmail
self.timezone = timezone
def __repr__(self):
str = "Config.UserMap(accurevUsername=" + repr(self.accurevUsername)
str += ", gitName=" + repr(self.gitName)
str += ", gitEmail=" + repr(self.gitEmail)
str += ", timezone=" + repr(self.timezone)
str += ")"
return str
@staticmethod
def FilenameFromScriptName(scriptName):
(root, ext) = os.path.splitext(scriptName)
return root + '.config.xml'
@ staticmethod
def GetBooleanAttribute(xmlElement, attribute):
if xmlElement is None or attribute is None:
return None
value = xmlElement.attrib.get(attribute)
if value is not None:
if value.lower() == "true":
value = True
elif value.lower() == "false":
value = False
else:
Exception("Error, could not parse {attr} attribute of tag {tag}. Expected 'true' or 'false', but got '{value}'.".format(attr=attribute, tag=xmlElement.tag, value=value))
return value
@staticmethod
def GetAbsoluteUsermapsFilename(filename, includedFilename):
if includedFilename is None:
return None
if os.path.isabs(includedFilename):
return includedFilename
if filename is None:
return None
drive, path = os.path.splitdrive(filename)
head, tail = os.path.split(path)
if len(head) > 0 and head != '/' and head != '\\': # For an absolute path the starting slash isn't removed from head.
return os.path.abspath(os.path.join(head, includedFilename))
return os.path.abspath(includedFilename)
@staticmethod
def GetUsermapsFromXmlElement(usermapsElem):
usermaps = []
if usermapsElem is not None and usermapsElem.tag == 'usermaps':
for usermapElem in usermapsElem.findall('map-user'):
usermaps.append(Config.UserMap.fromxmlelement(usermapElem))
return usermaps
@staticmethod
def GetUsermapsFromFile(filename, ignoreFiles=None):
usermaps = []
knownAccurevUsers = set()
directCount, indirectCount = 0, 0
if filename is not None:
if os.path.exists(filename):
with codecs.open(filename) as f:
mapXmlString = f.read()
mapXmlRoot = ElementTree.fromstring(mapXmlString)
if mapXmlRoot is not None:
userMapElements = []
if mapXmlRoot.tag == "usermaps":
userMapElements.append(mapXmlRoot)
else:
for userMapElem in mapXmlRoot.findall('usermaps'):
userMapElements.append(userMapElem)
fileList = [] # the linked files are processed after direct usermaps so that the direct usermaps override the same users in the linked files...
for userMapElem in userMapElements:
directUsermaps = Config.GetUsermapsFromXmlElement(userMapElem)
directCount += len(directUsermaps)
for user in directUsermaps:
if user.accurevUsername not in knownAccurevUsers:
usermaps.append(user)
knownAccurevUsers.add(user.accurevUsername)
else:
#print("Ignoring duplicated user:", user.accurevUsername)
pass
mapFile = userMapElem.attrib.get('filename')
if mapFile is not None:
fileList.append(mapFile)
for mapFile in fileList:
if ignoreFiles is None:
ignoreFiles = set()
mapFile = Config.GetAbsoluteUsermapsFilename(filename, mapFile) # Prevent circular loads.
if mapFile not in ignoreFiles:
ignoreFiles.add(mapFile)
includedUsermaps = Config.GetUsermapsFromFile(mapFile, ignoreFiles=ignoreFiles)
indirectCount += len(includedUsermaps)
for user in includedUsermaps:
if user.accurevUsername not in knownAccurevUsers:
usermaps.append(user)
knownAccurevUsers.add(user.accurevUsername)
else:
#print("Ignoring duplicated user:", user.accurevUsername)
pass
else:
print("Circular usermaps inclusion detected at file,", mapFile, "which was already processed.", file=sys.stderr)
print("usermaps: filename", filename, "direct", directCount, "included", indirectCount)
return usermaps
@classmethod
def fromxmlstring(cls, xmlString, filename=None):
# Load the XML
xmlRoot = ElementTree.fromstring(xmlString)
if xmlRoot is not None and xmlRoot.tag == "accurev2git":
accurev = Config.AccuRev.fromxmlelement(xmlRoot.find('accurev'))
git = Config.Git.fromxmlelement(xmlRoot.find('git'))
method = "diff" # Defaults to diff
methodElem = xmlRoot.find('method')
if methodElem is not None:
method = methodElem.text
mergeStrategy = "normal" # Defaults to normal
mergeStrategyElem = xmlRoot.find('merge-strategy')
if mergeStrategyElem is not None:
mergeStrategy = mergeStrategyElem.text
logFilename = None
logFileElem = xmlRoot.find('logfile')
if logFileElem is not None:
logFilename = logFileElem.text
usermaps = []
userMapsElem = xmlRoot.find('usermaps')
if userMapsElem is not None:
usermaps = Config.GetUsermapsFromXmlElement(userMapsElem)
knownAccurevUsers = set([x.accurevUsername for x in usermaps])
# Check if we need to load extra usermaps from a file.
mapFilename = userMapsElem.attrib.get("filename")
if mapFilename is not None:
if filename is not None:
mapFilename = Config.GetAbsoluteUsermapsFilename(filename, mapFilename) # Prevent circular loads.
includedUsermaps = Config.GetUsermapsFromFile(mapFilename)
for user in includedUsermaps:
if user.accurevUsername not in knownAccurevUsers:
usermaps.append(user)
else:
#print("Known user:", user.accurevUsername)
pass
return cls(accurev=accurev, git=git, usermaps=usermaps, method=method, mergeStrategy=mergeStrategy, logFilename=logFilename)
else:
# Invalid XML for an accurev2git configuration file.
return None
@staticmethod
def fromfile(filename):
config = None
if os.path.exists(filename):
with codecs.open(filename) as f:
configXml = f.read()
config = Config.fromxmlstring(configXml, filename=filename)
return config
def __init__(self, accurev=None, git=None, usermaps=None, method=None, mergeStrategy=None, logFilename=None):
self.accurev = accurev
self.git = git
self.usermaps = usermaps
self.method = method
self.mergeStrategy = mergeStrategy
self.logFilename = logFilename
def __repr__(self):
str = "Config(accurev=" + repr(self.accurev)
str += ", git=" + repr(self.git)
str += ", usermaps=" + repr(self.usermaps)
str += ", method=" + repr(self.method)
str += ", mergeStrategy=" + repr(self.mergeStrategy)
str += ", logFilename=" + repr(self.logFilename)
str += ")"
return str
# Prescribed recepie:
# - Get the list of tracked streams from the config file.
# - For each stream in the list
# + If this stream is new (there is no data in git for it yet)
# * Create the git branch for the stream
# * Get the stream create (mkstream) transaction number and set it to be the start-transaction. Note: The first stream in the depot has no mkstream transaction.
# + otherwise
# * Get the last processed transaction number and set that to be the start-transaction.
# * Obtain a diff from accurev listing all of the files that have changed and delete them all.
# + Get the end-transaction from the user or from accurev's highest/now keyword for the hist command.
# + For all transactions between the start-transaction and end-transaction
# * Checkout the git branch at latest (or just checkout if no-commits yet).
# * Populate the retrieved transaction with the recursive option but without the overwrite option (quick).
# * Preserve empty directories by adding .gitignore files.
# * Commit the current state of the directory but don't respect the .gitignore file contents. (in case it was added to accurev in the past).
# * Increment the transaction number by one
# * Obtain a diff from accurev listing all of the files that have changed and delete them all.
class AccuRev2Git(object):
gitRefsNamespace = 'refs/ac2git/'
gitNotesRef_state = 'ac2git'
gitNotesRef_accurevInfo = 'accurev'
commandFailureRetryCount = 3
commandFailureSleepSeconds = 3
cachedDepots = None
def __init__(self, config):
self.config = config
self.cwd = None
self.gitRepo = None
# Returns True if the path was deleted, otherwise false
def DeletePath(self, path):
if os.path.lexists(path):
if os.path.islink(path):
os.unlink(path)
elif os.path.isfile(path):
try:
os.unlink(path)
except OSError:
os.chmod(path, stat.S_IWRITE )
os.unlink(path)
elif os.path.isdir(path):
shutil.rmtree(path)
return not os.path.lexists(path)
def ClearGitRepo(self):
# Delete everything except the .git folder from the destination (git repo)
logger.debug( "Clear git repo." )
for root, dirs, files in os.walk(self.gitRepo.path, topdown=False):
for name in files:
path = os.path.join(root, name)
if git.GetGitDirPrefix(path) is None:
self.DeletePath(path)
for name in dirs:
path = os.path.join(root, name)
if git.GetGitDirPrefix(path) is None:
self.DeletePath(path)
def PreserveEmptyDirs(self):
preservedDirs = []
for root, dirs, files in os.walk(self.gitRepo.path, topdown=True):
for name in dirs:
path = ToUnixPath(os.path.join(root, name))
# Preserve empty directories that are not under the .git/ directory.
if git.GetGitDirPrefix(path) is None and len(os.listdir(path)) == 0:
filename = os.path.join(path, '.gitignore')
with codecs.open(filename, 'w', 'utf-8') as file:
#file.write('# accurev2git.py preserve empty dirs\n')
preservedDirs.append(filename)
if not os.path.exists(filename):
logger.error("Failed to preserve directory. Couldn't create '{0}'.".format(filename))
return preservedDirs
def DeleteEmptyDirs(self):
deletedDirs = []
for root, dirs, files in os.walk(self.gitRepo.path, topdown=True):
for name in dirs:
path = ToUnixPath(os.path.join(root, name))
# Delete empty directories that are not under the .git/ directory.
if git.GetGitDirPrefix(path) is None:
dirlist = os.listdir(path)
count = len(dirlist)
delete = (len(dirlist) == 0)
if len(dirlist) == 1 and '.gitignore' in dirlist:
with codecs.open(os.path.join(path, '.gitignore')) as gi:
contents = gi.read().strip()
delete = (len(contents) == 0)
if delete:
if not self.DeletePath(path):
logger.error("Failed to delete empty directory '{0}'.".format(path))
raise Exception("Failed to delete '{0}'".format(path))
else:
deletedDirs.append(path)
return deletedDirs
def GetGitUserFromAccuRevUser(self, accurevUsername):
if accurevUsername is not None:
for usermap in self.config.usermaps:
if usermap.accurevUsername == accurevUsername:
return (usermap.gitName, usermap.gitEmail)
logger.error("Cannot find git details for accurev username {0}".format(accurevUsername))
return (accurevUsername, None)
def GetGitTimezoneFromDelta(self, time_delta):
seconds = time_delta.total_seconds()
absSec = abs(seconds)
offset = (int(absSec / 3600) * 100) + (int(absSec / 60) % 60)
if seconds < 0:
offset = -offset
return offset
def GetDeltaFromGitTimezone(self, timezone):
# Git timezone strings follow the +0100 format
tz = int(timezone)
tzAbs = abs(tz)
tzdelta = timedelta(seconds=((int(tzAbs / 100) * 3600) + ((tzAbs % 100) * 60)))
return tzdelta
def GetGitDatetime(self, accurevUsername, accurevDatetime):
usertime = accurevDatetime
tz = None
if accurevUsername is not None:
for usermap in self.config.usermaps:
if usermap.accurevUsername == accurevUsername:
tz = usermap.timezone
break
if tz is None:
# Take the following default times 48 hours from Epoch as reference to compute local time.
refTimestamp = 172800
utcRefTime = datetime.utcfromtimestamp(refTimestamp)
refTime = datetime.fromtimestamp(refTimestamp)
tzdelta = (refTime - utcRefTime)
usertime = accurevDatetime + tzdelta
tz = self.GetGitTimezoneFromDelta(tzdelta)
else:
match = re.match(r'^[+-][0-9]{4}$', tz)
if match:
# This is the git style format
tzdelta = self.GetDeltaFromGitTimezone(tz)
usertime = accurevDatetime + tzdelta
tz = int(tz)
else:
# Assuming it is an Olson timezone format
userTz = pytz.timezone(tz)
usertime = userTz.localize(accurevDatetime)
tzdelta = usertime.utcoffset() # We need two aware times to get the datetime.timedelta.
usertime = accurevDatetime + tzdelta # Adjust the time by the timezone since localize din't.
tz = self.GetGitTimezoneFromDelta(tzdelta)
return usertime, tz
def GetFirstTransaction(self, depot, streamName, startTransaction=None, endTransaction=None, useCache=False):
invalidRetVal = (None, None)
# Get the stream creation transaction (mkstream). Note: The first stream in the depot doesn't have an mkstream transaction.
tr = accurev.ext.get_mkstream_transaction(stream=streamName, depot=depot, useCache=useCache)
if tr is None:
logger.warning("Failed to find the mkstream transaction for stream {s}. Trying to get first transaction.".format(s=streamName))
hist, histXml = self.TryHist(depot=depot, timeSpec="highest-1", streamName=streamName)
if hist is not None and len(hist.transactions) > 0:
tr = hist.transactions[-1] # Get first transaction
hist, histXml = self.TryHist(depot=depot, timeSpec=tr.id) # Make the first transaction be the mkstream transaction.
if startTransaction is not None:
startTrHist, startTrXml = self.TryHist(depot=depot, timeSpec=startTransaction)
if startTrHist is None:
return invalidRetVal
startTr = startTrHist.transactions[0]
if tr.id < startTr.id:
logger.info( "The first transaction (#{0}) for stream {1} is earlier than the conversion start transaction (#{2}).".format(tr.id, streamName, startTr.id) )
tr = startTr
hist = startTrHist
histXml = startTrXml
if endTransaction is not None:
endTrHist, endTrHistXml = self.TryHist(depot=depot, timeSpec=endTransaction)
if endTrHist is None:
return invalidRetVal
endTr = endTrHist.transactions[0]
if endTr.id < tr.id:
logger.info( "The first transaction (#{0}) for stream {1} is later than the conversion end transaction (#{2}).".format(tr.id, streamName, startTr.id) )
tr = None
return invalidRetVal
return hist, histXml
def TryGitCommand(self, cmd, allowEmptyString=False, retry=True):
rv = None
for i in range(0, AccuRev2Git.commandFailureRetryCount):
rv = self.gitRepo.raw_cmd(cmd)
if not retry:
break
if rv is not None:
rv = rv.strip()
if not allowEmptyString and len(rv) == 0:
rv = None
else:
break
time.sleep(AccuRev2Git.commandFailureSleepSeconds)
return rv
def GetLastCommitHash(self, branchName=None, ref=None, retry=True):
cmd = []
commitHash = None
if ref is not None:
cmd = [ u'git', u'show-ref', u'--hash', ref ]
else:
cmd = [u'git', u'log', u'-1', u'--format=format:%H']
if branchName is not None:
cmd.append(branchName)
commitHash = self.TryGitCommand(cmd=cmd, retry=retry)
if commitHash is None:
logger.error("Failed to retrieve last git commit hash. Command `{0}` failed.".format(' '.join(cmd)))
return commitHash
def GetTreeFromRef(self, ref):
treeHash = None
cmd = [u'git', u'log', u'-1', u'--format=format:%T']
if ref is not None:
cmd.append(ref)
treeHash = self.TryGitCommand(cmd=cmd)
if treeHash is None:
logger.error("Failed to retrieve tree hash. Command `{0}` failed.".format(' '.join(cmd)))
return treeHash
def UpdateAndCheckoutRef(self, ref, commitHash, checkout=True):
if ref is not None and commitHash is not None and len(ref) > 0 and len(commitHash) > 0:
# refs/heads are branches which are updated automatically when you commit to them (provided we have them checked out).
# so at least raise a warning for the user.
# If we were asked to update a ref, not updating it is considered a failure to commit.
if self.gitRepo.raw_cmd([ u'git', u'update-ref', ref, commitHash ]) is None:
logger.error( "Failed to update ref {ref} to commit {hash}".format(ref=ref, hash=commitHash) )
return False
if checkout and ref != 'HEAD' and self.gitRepo.checkout(branchName=ref) is None: # no point in checking out HEAD if that's what we've updated!
logger.error( "Failed to checkout ref {ref} to commit {hash}".format(ref=ref, hash=commitHash) )
return False
return True
return None
def SafeCheckout(self, ref, doReset=False, doClean=False):
status = self.gitRepo.status()
if status is None:
logger.error("git status - command failed!")
logger.error(" exit code: {0}".format(self.gitRepo.lastReturnCode))
logger.error(" stderr: {0}".format(self.gitRepo.lastStderr))
logger.error(" stdout: {0}".format(self.gitRepo.lastStdout))
raise Exception("SafeCheckout - failed to invoke `git status`")
if doReset:
logger.debug( "Reset current branch - '{br}'".format(br=status.branch) )
self.gitRepo.reset(isHard=True)
if doClean:
logger.debug( "Clean current branch - '{br}'".format(br=status.branch) )
self.gitRepo.clean(directories=True, force=True, forceSubmodules=True, includeIgnored=True)
pass
if ref is not None and status.branch != ref:
logger.debug( "Checkout {ref}".format(ref=ref) )
self.gitRepo.checkout(branchName=ref)
status = self.gitRepo.status()
logger.debug( "On branch {branch} - {staged} staged, {changed} changed, {untracked} untracked files{initial_commit}.".format(branch=status.branch, staged=len(status.staged), changed=len(status.changed), untracked=len(status.untracked), initial_commit=', initial commit' if status.initial_commit else '') )
if status is None:
raise Exception("Invalid initial state! The status command return is invalid.")
if status.branch is None or status.branch != ref:
# The parser for the status isn't very smart and git doesn't necessarily report the name of the ref that you have checked out. So, check if the current HEAD points to the desired ref by comparing hashes.
headHash = self.gitRepo.raw_cmd(['git', 'log', '--format=%H', 'HEAD', '-1'])
refHash = self.gitRepo.raw_cmd(['git', 'log', '--format=%H', ref, '-1'])
if headHash is None:
raise Exception("Failed to determine the hash of the HEAD commit!")
elif refHash is None:
raise Exception("Failed to determine the hash of the {ref} commit!".format(ref=ref))
elif refHash != headHash:
raise Exception("Invalid initial state! The status command returned an invalid name for current branch. Expected {ref} but got {statusBranch}.".format(ref=ref, statusBranch=status.branch))
if len(status.staged) != 0 or len(status.changed) != 0 or len(status.untracked) != 0:
raise Exception("Invalid initial state! There are changes in the tracking repository. Staged {staged}, changed {changed}, untracked {untracked}.".format(staged=status.staged, changed=status.changed, untracked=status.untracked))
def Commit(self, transaction=None, allowEmptyCommit=False, messageOverride=None, parents=None, treeHash=None, ref=None, checkout=True, authorIsCommitter=None):
usePlumbing = (parents is not None or treeHash is not None)
if authorIsCommitter is None:
authorIsCommitter = self.config.git.authorIsCommitter
# Custom messages for when we have a transaction.
trMessage, forTrMessage = '', ''
if transaction is not None:
trMessage = ' transaction {0}'.format(transaction.id)
forTrMessage = ' for{0}'.format(trMessage)
# Begin the commit processing.
if treeHash is None:
self.PreserveEmptyDirs()
# Add all of the files to the index
self.gitRepo.add(force=True, all=True, git_opts=[u'-c', u'core.autocrlf=false'])
# Create temporary file for the commit message.
messageFilePath = None
with tempfile.NamedTemporaryFile(mode='w+', prefix='ac2git_commit_', encoding='utf-8', delete=False) as messageFile:
messageFilePath = messageFile.name
emptyMessage = True
if messageOverride is not None:
if len(messageOverride) > 0:
messageFile.write(messageOverride)
emptyMessage = False
elif transaction is not None and transaction.comment is not None and len(transaction.comment) > 0:
# In git the # at the start of the line indicate that this line is a comment inside the message and will not be added.
# So we will just add a space to the start of all the lines starting with a # in order to preserve them.
messageFile.write(transaction.comment)
emptyMessage = False
if emptyMessage:
# `git commit` and `git commit-tree` commands, when given an empty file for the commit message, seem to revert to
# trying to read the commit message from the STDIN. This is annoying since we don't want to be opening a pipe to
# the spawned process all the time just to write an EOF character so instead we will just add a single space as the
# message and hope the user doesn't notice.
# For the `git commit` command it's not as bad since white-space is always stripped from commit messages. See the
# `git commit --cleanup` option for details.
messageFile.write(' ')
if messageFilePath is None:
logger.error("Failed to create temporary file for commit message{0}".format(forTrMessage))
return None
# Get the author's and committer's name, email and timezone information.
authorName, authorEmail, authorDate, authorTimezone = None, None, None, None
if transaction is not None:
authorName, authorEmail = self.GetGitUserFromAccuRevUser(transaction.user)
authorDate, authorTimezone = self.GetGitDatetime(accurevUsername=transaction.user, accurevDatetime=transaction.time)
# If the author-is-committer flag is set to true make the committer the same as the author.
committerName, committerEmail, committerDate, committerTimezone = None, None, None, None
if authorIsCommitter:
committerName, committerEmail, committerDate, committerTimezone = authorName, authorEmail, authorDate, authorTimezone
lastCommitHash = None
if parents is None:
lastCommitHash = self.GetLastCommitHash(ref=ref) # If ref is None, it will get the last commit hash from the HEAD ref.
if lastCommitHash is None:
parents = []
else:
parents = [ lastCommitHash ]
elif len(parents) != 0:
lastCommitHash = parents[0]
# Make the commit.
commitHash = None
if usePlumbing:
if treeHash is None:
treeHash = self.gitRepo.write_tree()
if treeHash is not None and len(treeHash.strip()) > 0:
treeHash = treeHash.strip()
commitHash = self.gitRepo.commit_tree(tree=treeHash, parents=parents, message_file=messageFilePath, committer_name=committerName, committer_email=committerEmail, committer_date=committerDate, committer_tz=committerTimezone, author_name=authorName, author_email=authorEmail, author_date=authorDate, author_tz=authorTimezone, allow_empty=allowEmptyCommit, git_opts=[u'-c', u'core.autocrlf=false'])
if commitHash is None:
logger.error( "Failed to commit tree {0}{1}. Error:\n{2}".format(treeHash, forTrMessage, self.gitRepo.lastStderr) )
else:
commitHash = commitHash.strip()
else:
logger.error( "Failed to write tree{0}. Error:\n{1}".format(forTrMessage, self.gitRepo.lastStderr) )
else:
commitResult = self.gitRepo.commit(message_file=messageFilePath, committer_name=committerName, committer_email=committerEmail, committer_date=committerDate, committer_tz=committerTimezone, author_name=authorName, author_email=authorEmail, author_date=authorDate, author_tz=authorTimezone, allow_empty_message=True, allow_empty=allowEmptyCommit, cleanup='whitespace', git_opts=[u'-c', u'core.autocrlf=false'])
if commitResult is not None:
commitHash = commitResult.shortHash
if commitHash is None:
commitHash = self.GetLastCommitHash()
elif "nothing to commit" in self.gitRepo.lastStdout:
logger.debug( "nothing to commit{0}...?".format(forTrMessage) )
else:
logger.error( "Failed to commit".format(trMessage) )
logger.error( "\n{0}\n{1}\n".format(self.gitRepo.lastStdout, self.gitRepo.lastStderr) )
# For detached head states (which occur when you're updating a ref and not a branch, even if checked out) we need to make sure to update the HEAD. Either way it doesn't hurt to
# do this step whether we are using plumbing or not...
if commitHash is not None:
if ref is None:
ref = 'HEAD'
if self.UpdateAndCheckoutRef(ref=ref, commitHash=commitHash, checkout=(checkout and ref != 'HEAD')) != True:
logger.error( "Failed to update ref {ref} with commit {h}{forTr}".format(ref=ref, h=commitHash, forTr=forTrMessage) )
commitHash = None
os.remove(messageFilePath)
if commitHash is not None:
if lastCommitHash == commitHash:
logger.error("Commit command returned True when nothing was committed...? Last commit hash {0} didn't change after the commit command executed.".format(lastCommitHash))
commitHash = None # Invalidate return value
else:
logger.error("Failed to commit{tr}.".format(tr=trMessage))
return commitHash
def GetStreamMap(self, printInfo=False):
streamMap = self.config.accurev.streamMap
if streamMap is None:
streamMap = OrderedDict()
if len(streamMap) == 0:
# When the stream map is missing or empty we intend to process all streams
includeDeactivatedItems = "hidden" not in self.config.accurev.excludeStreamTypes
streams = accurev.show.streams(depot=self.config.accurev.depot, includeDeactivatedItems=includeDeactivatedItems, includeOldDefinitions=False)
included, excluded = [], []
for stream in streams.streams:
if self.config.accurev.excludeStreamTypes is not None and stream.Type in self.config.accurev.excludeStreamTypes:
excluded.append(stream)
else:
included.append(stream)
streamMap[stream.name] = self.SanitizeBranchName(stream.name)
if printInfo:
logger.info("Auto-generated stream list ({0} included, {1} excluded):".format(len(included), len(excluded)))
logger.info(" Included streams ({0}):".format(len(included)))
for s in included:
logger.info(" + {0}: {1} -> {2}".format(s.Type, s.name, streamMap[s.name]))
logger.info(" Excluded streams ({0}):".format(len(excluded)))
for s in excluded:
logger.info(" - {0}: {1}".format(s.Type, s.name))
return streamMap
def FindNextChangeTransaction(self, streamName, startTrNumber, endTrNumber, deepHist=None):
# Iterate over transactions in order using accurev diff -a -i -v streamName -V streamName -t <lastProcessed>-<current iterator>
if self.config.method == "diff":
nextTr = startTrNumber + 1
diff, diffXml = self.TryDiff(streamName=streamName, firstTrNumber=startTrNumber, secondTrNumber=nextTr)
if diff is None:
return (None, None)
# Note: This is likely to be a hot path. However, it cannot be optimized since a revert of a transaction would not show up in the diff even though the
# state of the stream was changed during that period in time. Hence to be correct we must iterate over the transactions one by one unless we have
# explicit knowlege of all the transactions which could affect us via some sort of deep history option...
while nextTr <= endTrNumber and len(diff.elements) == 0:
nextTr += 1
diff, diffXml = self.TryDiff(streamName=streamName, firstTrNumber=startTrNumber, secondTrNumber=nextTr)
if diff is None:
return (None, None)
logger.debug("FindNextChangeTransaction diff: {0}".format(nextTr))
return (nextTr, diff)
elif self.config.method == "deep-hist":
if deepHist is None:
raise Exception("Script error! deepHist argument cannot be none when running a deep-hist method.")
# Find the next transaction
for tr in deepHist:
if tr.id > startTrNumber:
if tr.Type in ignored_transaction_types:
logger.debug("Ignoring transaction #{id} - {Type} (transaction type is in ignored_transaction_types list)".format(id=tr.id, Type=tr.Type))
else:
diff, diffXml = self.TryDiff(streamName=streamName, firstTrNumber=startTrNumber, secondTrNumber=tr.id)
if diff is None:
return (None, None)
elif len(diff.elements) > 0:
logger.debug("FindNextChangeTransaction deep-hist: {0}".format(tr.id))
return (tr.id, diff)
else:
logger.debug("FindNextChangeTransaction deep-hist skipping: {0}, diff was empty...".format(tr.id))
diff, diffXml = self.TryDiff(streamName=streamName, firstTrNumber=startTrNumber, secondTrNumber=endTrNumber)
return (endTrNumber + 1, diff) # The end transaction number is inclusive. We need to return the one after it.
elif self.config.method == "pop":
logger.debug("FindNextChangeTransaction pop: {0}".format(startTrNumber + 1))
return (startTrNumber + 1, None)
else:
logger.error("Method is unrecognized, allowed values are 'pop', 'diff' and 'deep-hist'")
raise Exception("Invalid configuration, method unrecognized!")
def DeleteDiffItemsFromRepo(self, diff):
# Delete all of the files which are even mentioned in the diff so that we can do a quick populate (wouth the overwrite option)
deletedPathList = []
for element in diff.elements:
for change in element.changes:
for stream in [ change.stream1, change.stream2 ]:
if stream is not None and stream.name is not None:
name = stream.name
if name.startswith('\\.\\') or name.startswith('/./'):
# Replace the accurev depot relative path start with a normal relative path.
name = name[3:]
if os.path.isabs(name):
# For os.path.join() to work we need a non absolute path so turn the absolute path (minus any drive letter or UNC path part) into a relative path w.r.t. the git repo.
name = os.path.splitdrive(name)[1][1:]
path = os.path.abspath(os.path.join(self.gitRepo.path, name))
# Ensure we restrict the deletion to the git repository and that we don't delete the git repository itself.
doClearAll = False
relPath = os.path.relpath(path, self.gitRepo.path)
relPathDirs = SplitPath(relPath)
if relPath.startswith('..'):
logger.error("Trying to delete path outside the worktree! Deleting worktree instead. git path: {gp}, depot path: {dp}".format(gp=path, dp=stream.name))
doClearAll = True
elif relPathDirs[0] == '.git':
logger.error("Trying to delete git directory! Ignored... git path: {gp}, depot path: {dp}".format(gp=path, dp=stream.name))
elif relPath == '.':
logger.error("Deleting the entire worktree due to diff with bad '..' elements! git path: {gp}, depot path: {dp}".format(gp=path, dp=stream.name))
doClearAll = True
if doClearAll:
self.ClearGitRepo()
return [ self.gitRepo.path ]
if os.path.lexists(path): # Ensure that broken links are also deleted!
if not self.DeletePath(path):
logger.error("Failed to delete '{0}'.".format(path))
raise Exception("Failed to delete '{0}'".format(path))
else:
deletedPathList.append(path)
return deletedPathList
def TryDiff(self, streamName, firstTrNumber, secondTrNumber):
for i in range(0, AccuRev2Git.commandFailureRetryCount):
diffXml = accurev.raw.diff(all=True, informationOnly=True, verSpec1=streamName, verSpec2=streamName, transactionRange="{0}-{1}".format(firstTrNumber, secondTrNumber), isXmlOutput=True, useCache=self.config.accurev.UseCommandCache())
if diffXml is not None:
diff = accurev.obj.Diff.fromxmlstring(diffXml)
if diff is not None:
break
if diff is None:
logger.error( "accurev diff failed! stream: {0} time-spec: {1}-{2}".format(streamName, firstTrNumber, secondTrNumber) )
return diff, diffXml
def TryHist(self, depot, timeSpec, streamName=None, transactionKind=None):
trHist = None
for i in range(0, AccuRev2Git.commandFailureRetryCount):
trHistXml = accurev.raw.hist(depot=depot, stream=streamName, timeSpec=timeSpec, transactionKind=transactionKind, useCache=self.config.accurev.UseCommandCache(), isXmlOutput=True, expandedMode=True, verboseMode=True)
if trHistXml is not None:
trHist = accurev.obj.History.fromxmlstring(trHistXml)
if trHist is not None:
break
return trHist, trHistXml
def TryPop(self, streamName, transaction, overwrite=False):
for i in range(0, AccuRev2Git.commandFailureRetryCount):
popResult = accurev.pop(verSpec=streamName, location=self.gitRepo.path, isRecursive=True, isOverride=overwrite, timeSpec=transaction.id, elementList='.')
if popResult:
break
else:
logger.error("accurev pop failed:")
for message in popResult.messages:
if message.error is not None and message.error:
logger.error(" {0}".format(message.text))
else:
logger.info(" {0}".format(message.text))
return popResult
def TryStreams(self, depot, timeSpec, stream=None):
streams = None
for i in range(0, AccuRev2Git.commandFailureRetryCount):
streamsXml = accurev.raw.show.streams(depot=depot, timeSpec=timeSpec, stream=stream, isXmlOutput=True, includeDeactivatedItems=True, includeHasDefaultGroupAttribute=True, useCache=self.config.accurev.UseCommandCache())
if streamsXml is not None:
streams = accurev.obj.Show.Streams.fromxmlstring(streamsXml)
if streams is not None:
break
return streams, streamsXml
def TryDepots(self):
depots = None
for i in range(0, AccuRev2Git.commandFailureRetryCount):
depotsXml = accurev.raw.show.depots(isXmlOutput=True, includeDeactivatedItems=True)
if depotsXml is not None:
depots = accurev.obj.Show.Depots.fromxmlstring(depotsXml)
if depots is not None:
break
return depots, depotsXml
def NormalizeAccurevXml(self, xml):
xmlNormalized = re.sub('TaskId="[0-9]+"', 'TaskId="0"', xml)
xmlDecoded = git.decode_proc_output(xmlNormalized)
return xmlDecoded
def WriteInfoFiles(self, path, depot, transaction, streamsXml=None, histXml=None, streamName=None, diffXml=None, useCommandCache=False):
streams = None
hist = None
diff = None
if streamsXml is not None:
streams = accurev.obj.Show.Streams.fromxmlstring(streamsXml)
if streams is None or streamsXml is None:
streams, streamsXml = self.TryStreams(depot=depot, timeSpec=transaction)
if streams is None or streamsXml is None:
return (None, None, None)
if histXml is not None:
hist = accurev.obj.History.fromxmlstring(histXml)
if hist is None or histXml is None:
hist, histXml = self.TryHist(depot=depot, timeSpec=transaction)
if hist is None or histXml is None:
return (None, None)
tr = hist.transactions[0]
if tr.id > 1 and tr.Type != "mkstream":
if diffXml is not None:
diff = accurev.obj.Diff.fromxmlstring(streamsXml)
if diff is None or diffXml is None:
if streamName is not None:
diff, diffXml = self.TryDiff(streamName=streamName, firstTrNumber=tr.id, secondTrNumber=(tr.id - 1))
if diff is None or diffXml is None:
return (None, None)
else:
return (None, None)
diffFilePath = os.path.join(self.gitRepo.path, 'diff.xml')
with codecs.open(diffFilePath, mode='w', encoding='utf-8') as f:
f.write(self.NormalizeAccurevXml(diffXml))
streamsFilePath = os.path.join(path, 'streams.xml')
with codecs.open(streamsFilePath, mode='w', encoding='utf-8') as f:
f.write(self.NormalizeAccurevXml(streamsXml))
histFilePath = os.path.join(path, 'hist.xml')
with codecs.open(histFilePath, mode='w', encoding='utf-8') as f:
f.write(self.NormalizeAccurevXml(histXml))
# GetDepotRefsNamespace
# When depot is None it returns the git ref namespace where all depots are under.
# When depot is not None it queries the stored depots for the depot name or number and returns the git ref namespace for that depot.
# If the depot name or number is not None and does not correspont to a depot in Accurev this function returns None.
def GetDepotRefsNamespace(self, depot=None):
depotsNS = '{refsNS}depots/'.format(refsNS=AccuRev2Git.gitRefsNamespace)
if depot is not None:
d = self.GetDepot(depot)
if d is not None:
depotNS = '{depotsNS}{depotNumber}/'.format(depotsNS=depotsNS, depotNumber=d.number)
return depotNS
return None # Invalid depot, no refs allowed.
return depotsNS
def ParseDepotRef(self, ref):
depotNumber, remainder = None, None
if ref is not None and isinstance(ref, str):
depotsNS = self.GetDepotRefsNamespace()
# Extract the depot number.
match = re.match(r'^{depotsNS}(\d+)/(.*)$'.format(depotsNS=depotsNS), ref)
if match is not None:
depotNumber = int(match.group(1))
remainder = match.group(2)
return depotNumber, remainder
def GetDepot(self, depot):
if AccuRev2Git.cachedDepots is not None:
d = AccuRev2Git.cachedDepots.getDepot(depot)
if d is not None:
return d
depotsRef = '{depotsNS}info'.format(depotsNS=self.GetDepotRefsNamespace())
# Check if the ref exists!
commitHash = self.GetLastCommitHash(self, ref=depotsRef)
haveCommitted = False
if commitHash is None:
# It doesn't exist, we can create it.
logger.debug( "Ref '{br}' doesn't exist.".format(br=depotsRef) )
# Delete everything in the index and working directory.
self.gitRepo.rm(fileList=['.'], force=True, recursive=True)
self.ClearGitRepo()
depots, depotsXml = self.TryDepots()
if depots is None or depotsXml is None:
return None
depotsFilePath = os.path.join(self.gitRepo.path, 'depots.xml')
with codecs.open(depotsFilePath, 'w') as f:
f.write(re.sub('TaskId="[0-9]+"', 'TaskId="0"', depotsXml))
commitHash = self.Commit(transaction=None, messageOverride="depots at ac2git invocation.", parents=[], ref=depotsRef)
if commitHash is None:
logger.debug( "First commit on the depots ref ({ref}) has failed. Aborting!".format(ref=depotsRef) )
return None
else:
logger.info( "Depots ref updated {ref} -> commit {hash}".format(hash=self.ShortHash(commitHash), ref=depotsRef) )
haveCommitted = True
else:
depotsXml, depots = self.GetDepotsInfo(ref=commitHash)
# Try and find the depot in the list of existing depots.
d = depots.getDepot(depot)
if d is not None:
AccuRev2Git.cachedDepots = depots
return d
if haveCommitted:
logger.info( "Failed to find depot {d} on depots ref {r} at commit {h}".format(d=depot, h=self.ShortHash(commitHash), r=depotsRef) )
return None
# We haven't committed anything yet so a depot might have been renamed since we started. Run the depots command again and commit it if there have been any changes.
self.gitRepo.checkout(branchName=depotsRef)
# Delete everything in the index and working directory.
self.gitRepo.rm(fileList=['.'], force=True, recursive=True)
self.ClearGitRepo()
depots, depotsXml = self.TryDepots()
if depots is None or depotsXml is None:
return None
depotsFilePath = os.path.join(self.gitRepo.path, 'depots.xml')
with codecs.open(depotsFilePath, 'w') as f:
f.write(re.sub('TaskId="[0-9]+"', 'TaskId="0"', depotsXml))
commitHash = self.Commit(transaction=None, messageOverride="depots at ac2git invocation.".format(trId=tr.id), ref=depotsRef)
if commitHash is None:
logger.debug( "Commit on the depots ref ({ref}) has failed. Couldn't find the depot {d}. Aborting!".format(ref=depotsRef, d=depot) )
return None
else:
logger.info( "Depots ref updated {ref} -> commit {hash}".format(hash=self.ShortHash(commitHash), ref=depotsRef) )
haveCommitted = True
# Try and find the depot in the list of existing depots.
d = depots.getDepot(depot)
if d is not None:
AccuRev2Git.cachedDepots = depots
return d
return None
def GetStreamRefsNamespace(self, depot, streamNumber=None):
depotNS = self.GetDepotRefsNamespace(depot=depot)
if depotNS is not None:
streamsNS = '{depotNS}streams/'.format(depotNS=depotNS)
if streamNumber is not None:
if not isinstance(streamNumber, int):
streamNumber = int(streamNumber)
streamNS = '{streamsNS}{streamNumber}'.format(streamsNS=streamsNS, streamNumber=streamNumber)
return streamNS
return streamsNS
return None
def ParseStreamRef(self, ref):
depotNumber, streamNumber, remainder = None, None, None
if ref is not None and isinstance(ref, str):
depotNumber, depotRemainder = self.ParseDepotRef(ref=ref)
if depotNumber is not None:
streamsNS = self.GetStreamRefsNamespace(depot=depotNumber)
if streamsNS is not None:
# Extract the stream number.
match = re.match(r'^{streamsNS}(\d+)/(.*)$'.format(streamsNS=streamsNS), ref)
if match is not None:
streamNumber = int(match.group(1))
remainder = match.group(2)
return (depotNumber, streamNumber, remainder)
def GetStreamRefs(self, depot, streamNumber):
stateRef, dataRef, hwmRef = None, None, None
streamNS = self.GetStreamRefsNamespace(depot, streamNumber=streamNumber)
if streamNS is not None:
dataRef = '{streamNS}/data'.format(streamNS=streamNS)
stateRef = '{streamNS}/info'.format(streamNS=streamNS)
hwmRef = '{streamNS}/hwm'.format(streamNS=streamNS) # High-water mark ref.
return (stateRef, dataRef, hwmRef)
# Gets the diff.xml contents and parsed accurev.obj.Diff object from the given \a ref (git ref or hash).
def GetDiffInfo(self, ref):
# Get the diff information. (if any)
diff = None
diffXml = self.gitRepo.raw_cmd(['git', 'show', '{hash}:diff.xml'.format(hash=ref)]) # Doesn't exist for the mkstream transaction (first commit)
if diffXml is not None and len(diffXml) != 0:
diff = accurev.obj.Diff.fromxmlstring(diffXml)
else:
logger.warning("Command failed! git show {hash}:diff.xml".format(hash=ref))
return (diffXml, diff)
# Gets the hist.xml contents and parsed accurev.obj.History object from the given \a ref (git ref or hash).
def GetHistInfo(self, ref):
# Get the hist information.
hist = None
histXml = self.gitRepo.raw_cmd(['git', 'show', '{hash}:hist.xml'.format(hash=ref)])
if histXml is not None and len(histXml) != 0:
hist = accurev.obj.History.fromxmlstring(histXml)
else:
raise Exception("Command failed! git show {hash}:hist.xml".format(hash=ref))
return (histXml, hist)
# Gets the streams.xml contents and parsed accurev.obj.Show.Streams object from the given \a ref (git ref or hash).
def GetStreamsInfo(self, ref):
# Get the stream information.
streams = None
streamsXml = self.gitRepo.raw_cmd(['git', 'show', '{hash}:streams.xml'.format(hash=ref)])
if streamsXml is not None and len(streamsXml) != 0:
streams = accurev.obj.Show.Streams.fromxmlstring(streamsXml)
else:
raise Exception("Command failed! git show {hash}:streams.xml".format(hash=ref))
return (streamsXml, streams)
# Gets the depots.xml contents and parsed accurev.obj.Show.Streams object from the given \a ref (git ref or hash).
def GetDepotsInfo(self, ref):
# Get the stream information.
depots = None
depotsXml = self.gitRepo.raw_cmd(['git', 'show', '{hash}:depots.xml'.format(hash=ref)])
if depotsXml is not None and len(depotsXml) != 0:
depots = accurev.obj.Show.Depots.fromxmlstring(depotsXml)
else:
raise Exception("Command failed! git show {hash}:depots.xml".format(hash=ref))
return (depotsXml, depots)
def RetrieveStreamInfo(self, depot, stream, stateRef, startTransaction, endTransaction):
logger.info( "Processing Accurev state for {0} : {1} - {2}".format(stream.name, startTransaction, endTransaction) )
# Check if the ref exists!
stateRefObj = self.gitRepo.raw_cmd(['git', 'show-ref', stateRef])
assert stateRefObj is None or len(stateRefObj) != 0, "Invariant error! Expected non-empty string returned by git show-ref, but got '{s}'".format(s=stateRefObj)
# Either checkout last state or make the initial commit for a new stateRef.
tr = None
commitHash = None
doInitialCheckout = False
if stateRefObj is not None:
# This means that the ref already exists so we should switch to it.
doInitialCheckout = True
histXml, hist = self.GetHistInfo(ref=stateRef)
tr = hist.transactions[0]
else:
logger.debug( "Ref '{br}' doesn't exist.".format(br=stateRef) )
# We are tracking a new stream
firstHist, firstHistXml = self.GetFirstTransaction(depot=depot, streamName=stream.name, startTransaction=startTransaction, endTransaction=endTransaction)
if firstHist is not None and len(firstHist.transactions) > 0:
tr = firstHist.transactions[0]
try:
destStream = self.GetDestinationStreamName(history=hist, depot=None)
except:
destStream = None
# Delete everything in the index and working directory.
self.gitRepo.rm(fileList=['.'], force=True, recursive=True)
self.ClearGitRepo()
self.WriteInfoFiles(path=self.gitRepo.path, depot=depot, streamName=stream.name, transaction=tr.id, useCommandCache=self.config.accurev.UseCommandCache())
commitHash = self.Commit(transaction=tr, messageOverride="transaction {trId}".format(trId=tr.id), parents=[], ref=stateRef, authorIsCommitter=True)
if commitHash is None:
logger.debug( "{0} first commit has failed. Is it an empty commit? Aborting!".format(stream.name) )
return (None, None)
else:
logger.info( "stream {streamName}: tr. #{trId} {trType} -> commit {hash} on {ref}".format(streamName=stream.name, trId=tr.id, trType=tr.Type, hash=self.ShortHash(commitHash), ref=stateRef) )
else:
logger.warning( "Failed to get the first transaction for {0} from accurev. Continuing...".format(stream.name) )
return (None, None)
# Get the end transaction.
endTrHist, endTrHistXml = self.TryHist(depot=depot, timeSpec=endTransaction)
if endTrHist is None:
logger.debug("accurev hist -p {0} -t {1}.1 failed.".format(depot, endTransaction))
return (None, None)
endTr = endTrHist.transactions[0]
logger.info("{0}: retrieving transaction range #{1} - #{2}".format(stream.name, tr.id, endTr.id))
if tr.id > endTr.id:
logger.info("{0}: nothing to do, last processed transaction {1} is greater than the end transaction {2}.".format(stream.name, tr.id, endTr.id))
return (tr, self.GetLastCommitHash(ref=stateRef))
# Iterate over all of the transactions that affect the stream we are interested in and maybe the "chstream" transactions (which affect the streams.xml).
deepHist = None
if self.config.method == "deep-hist":
ignoreTimelocks=False # The code for the timelocks is not tested fully yet. Once tested setting this to false should make the resulting set of transactions smaller
# at the cost of slightly larger number of upfront accurev commands called.
logger.debug("accurev.ext.deep_hist(depot={0}, stream={1}, timeSpec='{2}-{3}', ignoreTimelocks={4})".format(depot, stream.name, tr.id, endTr.id, ignoreTimelocks))
deepHist = accurev.ext.deep_hist(depot=depot, stream=stream.name, timeSpec="{0}-{1}".format(tr.id, endTr.id), ignoreTimelocks=ignoreTimelocks, useCache=self.config.accurev.UseCommandCache())
logger.info("Deep-hist returned {count} transactions to process.".format(count=len(deepHist)))
if deepHist is None:
raise Exception("accurev.ext.deep_hist() failed to return a result!")
elif len(deepHist) == 0:
return (None, None)
while True:
nextTr, diff = self.FindNextChangeTransaction(streamName=stream.name, startTrNumber=tr.id, endTrNumber=endTr.id, deepHist=deepHist)
if nextTr is None:
logger.debug( "FindNextChangeTransaction(streamName='{0}', startTrNumber={1}, endTrNumber={2}, deepHist={3}) failed!".format(stream.name, tr.id, endTr.id, deepHist) )
return (None, None)
logger.debug( "{0}: next transaction {1} (end tr. {2})".format(stream.name, nextTr, endTr.id) )
if nextTr <= endTr.id:
if doInitialCheckout:
# A postponed initialization of state. If there's nothing to do we should skip this checkout because
# it can be expensive. So only do it once and only when we will need to use it.
self.SafeCheckout(ref=stateRef, doReset=True, doClean=True)
doInitialCheckout = False
# Right now nextTr is an integer representation of our next transaction.
# Delete all of the files which are even mentioned in the diff so that we can do a quick populate (wouth the overwrite option)
if self.config.method == "pop":
self.ClearGitRepo()
else:
if diff is None:
return (None, None)
# The accurev hist command here must be used with the depot option since the transaction that has affected us may not
# be a promotion into the stream we are looking at but into one of its parent streams. Hence we must query the history
# of the depot and not the stream itself.
hist, histXml = self.TryHist(depot=depot, timeSpec=nextTr)
if hist is None:
logger.debug("accurev hist -p {0} -t {1}.1 failed.".format(depot, endTransaction))
return (None, None)
tr = hist.transactions[0]
stream = accurev.show.streams(depot=depot, stream=stream.streamNumber, timeSpec=tr.id, useCache=self.config.accurev.UseCommandCache()).streams[0]
self.WriteInfoFiles(path=self.gitRepo.path, depot=depot, streamName=stream.name, transaction=tr.id, useCommandCache=self.config.accurev.UseCommandCache())
# Commit
commitHash = self.Commit(transaction=tr, messageOverride="transaction {trId}".format(trId=tr.id), ref=stateRef, authorIsCommitter=True)
if commitHash is None:
if "nothing to commit" in self.gitRepo.lastStdout:
logger.info("stream {streamName}: tr. #{trId} is a no-op. Potential but unlikely error. Continuing.".format(streamName=stream.name, trId=tr.id))
else:
break # Early return from processing this stream. Restarting should clean everything up.
else:
if self.UpdateAndCheckoutRef(ref=stateRef, commitHash=commitHash) != True:
return (None, None)
logger.info( "stream {streamName}: tr. #{trId} {trType} -> commit {hash} on {ref}".format(streamName=stream.name, trId=tr.id, trType=tr.Type, hash=self.ShortHash(commitHash), ref=stateRef) )
else:
logger.info( "Reached end transaction #{trId} for {streamName} -> {ref}".format(trId=endTr.id, streamName=stream.name, ref=stateRef) )
break
return (tr, commitHash)
def GetHashForTransaction(self, ref, trNum):
# Find the commit hash on our ref that corresponds to the provided transaction number.
cmd = ['git', 'log', '--format=%H', '--grep', '^transaction {trId}$'.format(trId=trNum), ref]
lastCommitHash = self.gitRepo.raw_cmd(cmd)
if lastCommitHash is None:
raise Exception("Couldn't query {ref} for Accurev state information at transaction {trId}. {cmd}".format(ref=ref, trId=trNum, cmd=' '.join(cmd)))
lastCommitHash = lastCommitHash.strip()
if len(lastCommitHash) == 0:
logger.error( "Failed to load transaction ({trId}) from ref {ref}. '{cmd}' returned empty.".format(trId=trNum, ref=ref, cmd=' '.join(cmd)) )
return None
return lastCommitHash
def GetTransactionForRef(self, ref):
# Find the last transaction number that we processed.
lastCommitInfo = self.gitRepo.raw_cmd(['git', 'log', '--pretty=oneline', ref, '-1'])
if lastCommitInfo is None:
raise Exception("Couldn't load last transaction for ref: {ref}".format(ref=ref))
lastCommitInfo = lastCommitInfo.strip()
if len(lastCommitInfo) == 0:
raise Exception("Couldn't load last transaction for ref: {ref} (empty result)".format(ref=ref))
lastCommitInfo = lastCommitInfo.split(' ')
if len(lastCommitInfo) != 3:
raise Exception("Unexpected format for last commit message! Expected 3 space separated fields but read: {info}".format(info=' '.join(lastCommitInfo)))
return int(lastCommitInfo[2])
def GetGitLogList(self, ref, afterCommitHash=None, gitLogFormat=None):
# Get the list of new hashes that have been committed to the stateRef but we haven't processed on the ref just yet.
cmd = ['git', 'log']
if gitLogFormat is not None:
cmd.append('--format={f}'.format(f=gitLogFormat))
cmd.append(ref)
if afterCommitHash is not None:
cmd.append('^{lastHash}'.format(lastHash=afterCommitHash))
hashList = self.gitRepo.raw_cmd(cmd)
if hashList is None:
logger.debug("Couldn't get the commit hash list from the ref {ref}. '{cmd}'".format(ref=ref, cmd=' '.join(cmd)))
return None
hashList = hashList.strip()
if len(hashList) == 0:
return []
return hashList.split('\n')
# Uses the stateRef information to fetch the contents of the stream for each transaction that whose information was committed to the stateRef and commits it to the dataRef.
def RetrieveStreamData(self, stream, dataRef, stateRef):
# Check if the ref exists!
dataRefObj = self.gitRepo.raw_cmd(['git', 'show-ref', dataRef])
assert dataRefObj is None or len(dataRefObj) != 0, "Invariant error! Expected non-empty string returned by git show-ref, but got '{str}'".format(s=dataRefObj)
# Either checkout last state or make the initial commit for a new dataRef.
lastTrId = None
stateHashList = None
if dataRefObj is not None:
# Find the last transaction number that we processed on the dataRef.
lastTrId = self.GetTransactionForRef(ref=dataRef)
# Find the commit hash on our stateRef that corresponds to our last transaction number.
lastStateCommitHash = self.GetHashForTransaction(ref=stateRef, trNum=lastTrId)
if lastStateCommitHash is None:
logger.error( "{dataRef} is pointing to transaction {trId} which wasn't found on the state ref {stateRef}.".format(trId=lastTrId, dataRef=dataRef, stateRef=stateRef) )
return (None, None)
# Get the list of new hashes that have been committed to the stateRef but we haven't processed on the dataRef just yet.
stateHashList = self.GetGitLogList(ref=stateRef, afterCommitHash=lastStateCommitHash, gitLogFormat='%H')
if stateHashList is None:
logger.error("Couldn't get the commit hash list to process from the Accurev state ref {stateRef}.".format(stateRef=stateRef))
return (None, None)
elif len(stateHashList) == 0:
logger.error( "{dataRef} is upto date. Couldn't load any more transactions after tr. ({trId}) from Accurev state ref {stateRef}.".format(trId=lastTrId, dataRef=dataRef, stateRef=stateRef, lastHash=lastStateCommitHash) )
# Get the first transaction that we are about to process.
trHistXml, trHist = self.GetHistInfo(ref=lastStateCommitHash)
tr = trHist.transactions[0]
commitHash = self.GetHashForTransaction(ref=dataRef, trNum=tr.id)
return (tr, commitHash)
# This means that the ref already exists so we should switch to it.
# We shouldn't do this earlier since if there's nothing to do we can skip this expensive operation.
self.SafeCheckout(ref=dataRef, doReset=True, doClean=True)
else:
# Get all the hashes from the stateRef since we need to process them all.
stateHashList = self.GetGitLogList(ref=stateRef, gitLogFormat='%H')
if stateHashList is None:
logger.warning("Couldn't get the commit hash list to process from the Accurev state ref {stateRef}.".format(stateRef=stateRef))
return (None, None)
if len(stateHashList) == 0:
logger.error( "{dataRef} is upto date. No transactions available in Accurev state ref {stateRef}. git log {stateRef} returned empty.".format(dataRef=dataRef, stateRef=stateRef) )
return (None, None)
# Remove the first hash (last item) from the processing list and process it immediately.
stateHash = stateHashList.pop()
assert stateHash is not None and len(stateHash) != 0, "Invariant error! We shouldn't have empty strings in the stateHashList"
logger.info( "No {dr} found. Processing {h} on {sr} first.".format(dr=dataRef, h=self.ShortHash(stateHash), sr=stateRef) )
# Get the first transaction that we are about to process.
trHistXml, trHist = self.GetHistInfo(ref=stateHash)
tr = trHist.transactions[0]
lastTrId = tr.id
# Delete everything in the index and working directory.
self.gitRepo.rm(fileList=['.'], force=True, recursive=True)
self.ClearGitRepo()
# Populate the stream contents from accurev
popResult = self.TryPop(streamName=stream.name, transaction=tr, overwrite=True)
if not popResult:
logger.error( "accurev pop failed for {trId} on {dataRef}".format(trId=tr.id, dataRef=dataRef) )
return (None, None)
# Make first commit.
commitHash = self.Commit(transaction=tr, allowEmptyCommit=True, messageOverride="transaction {trId}".format(trId=tr.id), parents=[], ref=dataRef, authorIsCommitter=True)
if commitHash is None:
# The first streams mkstream transaction will be empty so we may end up with an empty commit.
logger.error( "{0} first commit has failed.".format(stream.name) )
return (None, None)
else:
if self.gitRepo.checkout(branchName=dataRef) is None:
logger.error( "{0} failed to checkout data ref {1}. Aborting!".format(stream.name, dataRef) )
return (None, None)
logger.info( "stream {streamName}: tr. #{trId} {trType} -> commit {hash} on {ref}".format(streamName=stream.name, trId=tr.id, trType=tr.Type, hash=self.ShortHash(commitHash), ref=dataRef) )
# Find the last transaction number that we processed on the dataRef.
lastStateTrId = self.GetTransactionForRef(ref=stateRef)
if lastStateTrId is None:
logger.error( "Failed to get last transaction processed on the {ref}.".format(ref=stateRef) )
return (None, None)
# Notify the user what we are processing.
logger.info( "Processing stream data for {0} : {1} - {2}".format(stream.name, lastTrId, lastStateTrId) )
# Process all the hashes in the list
for stateHash in reversed(stateHashList):
assert stateHash is not None, "Invariant error! Hashes in the stateHashList cannot be none here!"
assert len(stateHash) != 0, "Invariant error! Excess new lines returned by `git log`? Probably safe to skip but shouldn't happen."
# Get the diff information. (if any)
diffXml, diff = self.GetDiffInfo(ref=stateHash)
# Get the hist information.
histXml, hist = self.GetHistInfo(ref=stateHash)
# Get the stream information.
streamsXml, streams = self.GetStreamsInfo(ref=stateHash)
deletedPathList = None
usePopMethod = (self.config.method == "pop")
if diff is None:
logger.warning("Accurev diff is unavailable for this transaction. Fallback to `pop method`...")
usePopMethod = True
elif not usePopMethod:
try:
warning = "Error trying to delete changed elements. Fallback to `pop method`..."
deletedPathList = self.DeleteDiffItemsFromRepo(diff=diff)
# Remove all the empty directories (this includes directories which contain an empty .gitignore file since that's what we is done to preserve them)
warning = "Error trying to delete empty directories. Fallback to `pop method`..."
self.DeleteEmptyDirs()
except:
usePopMethod = True
logger.warning(warning)
# This might be ok only in the case when the files/directories were changed but not in the case when there
# was a deletion that occurred. Fallback to using the pop method just to be safe.
if usePopMethod:
self.ClearGitRepo()
tr = hist.transactions[0]
streamAtTr = streams.getStream(stream.streamNumber)
if streamAtTr is None:
raise Exception("Failed to find stream {name} ({num}) in {list}".format(name=stream.name, num=stream.streamNumber, list=[(s.name, s.streamNumber) for s in streams]))
else:
stream = streamAtTr
# Work out the source and destination streams for the promote (for the purposes of the commit message info).
destStreamName, destStreamNumber = hist.toStream()
destStream = None
if destStreamNumber is not None:
destStream = streams.getStream(destStreamNumber)
if destStream is None:
raise Exception("Failed to find stream {name} ({num}) in {list}".format(name=destStreamName, num=destStreamNumber, list=[(s.name, s.streamNumber) for s in streams]))
srcStream = None
try:
srcStreamName, srcStreamNumber = hist.fromStream()
if srcStreamNumber is not None:
srcStream = streams.getStream(srcStreamNumber)
if srcStream is None:
raise Exception("Failed to find stream {name} ({num}) in {list}".format(name=srcStreamName, num=srcStreamNumber, list=[(s.name, s.streamNumber) for s in streams]))
except:
srcStreamName, srcStreamNumber = None, None
# Populate
logger.debug( "{0} pop: {1} {2}{3}".format(stream.name, tr.Type, tr.id, " to {0}".format(destStreamName) if destStreamName is not None else "") )
popResult = self.TryPop(streamName=stream.name, transaction=tr, overwrite=usePopMethod)
if not popResult:
logger.error( "accurev pop failed for {trId} on {dataRef}".format(trId=tr.id, dataRef=dataRef) )
return (None, None)
# Make the commit. Empty commits are allowed so that we match the state ref exactly (transaction for transaction).
# Reasoning: Empty commits are cheap and since these are not intended to be seen by the user anyway so we may as well make them to have a simpler mapping.
commitHash = self.Commit(transaction=tr, allowEmptyCommit=True, messageOverride="transaction {trId}".format(trId=tr.id), ref=dataRef, authorIsCommitter=True)
if commitHash is None:
logger.error( "Commit failed for {trId} on {dataRef}".format(trId=tr.id, dataRef=dataRef) )
return (None, None)
else:
logger.info( "stream {streamName}: tr. #{trId} {trType} -> commit {hash} on {ref} (end tr. {endTrId})".format(streamName=stream.name, trId=tr.id, trType=tr.Type, hash=self.ShortHash(commitHash), ref=dataRef, endTrId=lastStateTrId) )
return (tr, commitHash)
# Retrieves all of the stream information from accurev, needed for later processing, and stores it in git using the \a dataRef and \a stateRef.
# The retrieval and processing of the accurev information is separated in order to optimize processing of subsets of streams in a depot. For example,
# if we have processed 7 streams in a depot and now wish to add an 8th we would have to start processing from the beginning because the merge points
# between branches will now most likely need to be reconsidered. If the retrieval of information from accurev is a part of the processing step then we
# have to redo a lot of the work that we have already done for the 7 streams. Instead we have the two steps decoupled so that all we need to do is
# download the 8th stream information from accurev (which we don't yet have) and do the reprocessing by only looking for information already in git.
def RetrieveStream(self, depot, stream, dataRef, stateRef, hwmRef, startTransaction, endTransaction):
prevHwm = None
if hwmRef is not None:
hwmRefText = self.ReadFileRef(ref=hwmRef)
if hwmRefText is not None and len(hwmRefText) > 0:
prevHwmMetadata = json.loads(hwmRefText)
prevHwm = prevHwmMetadata.get("high-water-mark")
startTransaction = CallOnNonNoneArgs(max, int(startTransaction), prevHwm) # make sure we start from the transaction we last processed.
logger.info( "Retrieving stream {0} info from Accurev for transaction range : {1} - {2}".format(stream.name, startTransaction, endTransaction) )
stateTr, stateHash = self.RetrieveStreamInfo(depot=depot, stream=stream, stateRef=stateRef, startTransaction=startTransaction, endTransaction=endTransaction)
logger.info( "Retrieving stream {0} data from Accurev for transaction range : {1} - {2}".format(stream.name, startTransaction if prevHwm is None else prevHwm, endTransaction) )
dataTr, dataHash = self.RetrieveStreamData(stream=stream, dataRef=dataRef, stateRef=stateRef) # Note: In case the last retrieval was interrupted, we will retrieve those transactions first.
if stateTr is not None and dataTr is not None:
newHwm = CallOnNonNoneArgs(max, dataTr.id, prevHwm)
if stateTr.id != dataTr.id:
logger.error( "Missmatch while retrieving stream {streamName} (id: streamId), the data ref ({dataRef}) is on tr. {dataTr} while the state ref ({stateRef}) is on tr. {stateTr}.".format(streamName=stream.name, streamId=stream.streamNumber, dataTr=dataTr.id, stateTr=stateTr.id, dataRef=dataRef, stateRef=stateRef) )
else:
newHwm = CallOnNonNoneArgs(max, int(endTransaction), newHwm)
# Success! Update the high water mark for the stream.
if hwmRef is not None:
metadata = { "high-water-mark": newHwm }
if self.WriteFileRef(ref=hwmRef, text=json.dumps(metadata)) != True:
logger.error( "Failed to write the high-water-mark to ref {ref}".format(ref=hwmRef) )
else:
logger.info( "Updated the high-water-mark to ref {ref} as {trId}".format(ref=hwmRef, trId=newHwm) )
elif stateTr is not None and dataTr is None:
logger.error( "Missmatch while retrieving stream {streamName} (id: {streamId}), the state ref ({stateRef}) is on tr. {stateTr} but the data ref ({dataRef}) wasn't retrieved.".format(streamName=stream.name, streamId=stream.streamNumber, stateTr=stateTr.id, dataRef=dataRef, stateRef=stateRef) )
elif stateTr is None:
logger.error( "While retrieving stream {streamName} (id: {streamId}), the state ref ({stateRef}) failed.".format(streamName=stream.name, streamId=stream.streamNumber, dataRef=dataRef, stateRef=stateRef) )
return dataTr, dataHash
def RetrieveStreams(self):
if self.config.accurev.commandCacheFilename is not None:
accurev.ext.enable_command_cache(self.config.accurev.commandCacheFilename)
streamMap = self.GetStreamMap()
depot = self.config.accurev.depot
endTrHist = accurev.hist(depot=depot, timeSpec=self.config.accurev.endTransaction)
if endTrHist is None or endTrHist.transactions is None or len(endTrHist.transactions) == 0:
logger.error( "Failed to get end transaction for depot {0}. `accurev hist -p {0} -t {1}` returned no transactions. Please make sure the depot name is spelled correctly and that the transaction number/keyword is valid.".format(depot, self.config.accurev.endTransaction) )
return
endTr = endTrHist.transactions[0]
# Retrieve stream information from Accurev and store it inside git.
for stream in streamMap:
streamInfo = None
try:
streamInfo = accurev.show.streams(depot=depot, stream=stream, useCache=self.config.accurev.UseCommandCache()).streams[0]
except IndexError:
logger.error( "Failed to get stream information. `accurev show streams -p {0} -s {1}` returned no streams".format(depot, stream) )
return
except AttributeError:
logger.error( "Failed to get stream information. `accurev show streams -p {0} -s {1}` returned None".format(depot, stream) )
return
if depot is None or len(depot) == 0:
depot = streamInfo.depotName
stateRef, dataRef, hwmRef = self.GetStreamRefs(depot=depot, streamNumber=streamInfo.streamNumber)
assert stateRef is not None and dataRef is not None and len(stateRef) != 0 and len(dataRef) != 0, "Invariant error! The state ({sr}) and data ({dr}) refs must not be None!".format(sr=stateRef, dr=dataRef)
tr, commitHash = self.RetrieveStream(depot=depot, stream=streamInfo, dataRef=dataRef, stateRef=stateRef, hwmRef=hwmRef, startTransaction=self.config.accurev.startTransaction, endTransaction=endTr.id)
if self.config.git.remoteMap is not None:
refspec = "{dataRef}:{dataRef} {stateRef}:{stateRef}".format(dataRef=dataRef, stateRef=stateRef)
for remoteName in self.config.git.remoteMap:
pushOutput = None
logger.info("Pushing '{refspec}' to '{remote}'...".format(remote=remoteName, refspec=refspec))
try:
pushCmd = "git push {remote} {refspec}".format(remote=remoteName, refspec=refspec)
pushOutput = subprocess.check_output(pushCmd.split(), stderr=subprocess.STDOUT).decode('utf-8')
logger.info("Push to '{remote}' succeeded:".format(remote=remoteName))
logger.info(pushOutput)
except subprocess.CalledProcessError as e:
logger.error("Push to '{remote}' failed!".format(remote=remoteName))
logger.error("'{cmd}', returned {returncode} and failed with:".format(cmd="' '".join(e.cmd), returncode=e.returncode))
logger.error("{output}".format(output=e.output.decode('utf-8')))
if self.config.accurev.commandCacheFilename is not None:
accurev.ext.disable_command_cache()
# Lists the .git/... directory that contains all the stream refs and returns the file list as its result
def GetAllKnownStreamRefs(self, depot):
refsPrefix = self.GetDepotRefsNamespace() # Search all depots
cmd = [ 'git', 'show-ref', '--' ]
cmdResult = self.gitRepo.raw_cmd(cmd)
if cmdResult is None:
raise Exception("Failed to execute 'git show-ref --'!")
lines = cmdResult.strip().split('\n')
if len(lines) == 0:
raise Exception("The 'git show-ref --' command output was empty!")
rv = []
for line in lines:
columns = line.split(' ')
commitHash, ref = columns[0], columns[1]
depotNumber, streamNumber, remainder = self.ParseStreamRef(ref=ref)
if None not in [ depotNumber, streamNumber ]:
rv.append(ref)
return rv
# Tries to get the stream name from the data that we have stored in git.
def GetStreamByName(self, depot, streamName):
depot = self.GetDepot(depot)
accurev_is_loggedin = False
try:
accurev_is_loggedin = accurev.ext.is_loggedin()
except:
pass
if depot is not None and accurev_is_loggedin:
try:
stream = accurev.show.streams(depot=depot.name, stream=streamName, useCache=self.config.accurev.UseCommandCache()).streams[0]
if stream is not None and stream.name is not None:
return stream
except:
logger.info("Failed to find stream '{0}' using `accurev show streams -s '{0}'`, trying to search through processed history (slow way)...".format(streamName))
else:
logger.info("Not logged into Accurev. Searching for stream '{0}' by name (the slow way)...".format(streamName))
# Without using Accurev we can search for it in our Git history but this is really slow...
streamNamesRefspec = u'{refsNS}cache/depots/{depotNumber}/stream_names'.format(refsNS=AccuRev2Git.gitRefsNamespace, depotNumber=depot.number)
streamNames = {} # This is so we cache the stream name to stream number mapping which can take about 10 seconds to compute in a large-ish repo...
streamNamesText = self.ReadFileRef(ref=streamNamesRefspec)
if streamNamesText is not None:
streamNames = json.loads(streamNamesText)
if streamName in streamNames:
commitHash = streamNames[streamName]
if commitHash is not None:
streamsXml, streams = self.GetStreamsInfo(ref=commitHash)
s = streams.getStream(streamName)
if s is not None:
logger.debug("Loaded cached stream '{name}' by name.".format(name=streamName))
return s # Found it!
logger.debug("Searching for stream '{name}' by name.".format(name=streamName))
refsPrefix = self.GetStreamRefsNamespace(depot.number)
refList = self.GetAllKnownStreamRefs(depot.number)
if refList is None:
refList = []
# The stream with the lowest number is most likely to have a transaction with a streams.xml that contains
# our stream name. Only if we are really unlucky will we have to search more than the lowest numbered stream.
# So, parse and extract the number from the ..._info refs, and remove the ..._data refs.
infoRefList = []
for ref in refList:
depotNumber, streamNumber, remainder = self.ParseStreamRef(ref=ref)
if streamNumber is not None and remainder == "info":
infoRefList.append( (streamNumber, ref) ) # The stream number is extracted and put as the first element for sorting.
infoRefList.sort()
if len(infoRefList) == 0:
logger.warning("The refs from which we search for stream information seem to be missing...")
for streamNumber, ref in infoRefList:
# Execute a `git log -S` command with the pickaxe option to find the stream name in the streams.xml
cmd = [ 'git', 'log', '--format=%H', '-Sname="{n}"'.format(n=streamName), ref, '--', 'streams.xml' ]
hashList = self.gitRepo.raw_cmd(cmd)
if hashList is not None:
hashList = hashList.strip()
if len(hashList) != 0:
hashList = hashList.split()
# If there is more than one element then the stream has probably been renamed so we will take the earliest commit in which
# the stream name appears.
commitHash = hashList[-1]
streamsXml, streams = self.GetStreamsInfo(ref=commitHash)
s = streams.getStream(streamName)
if s is not None:
streamNames[streamName] = commitHash # Write the commit hash where we found the stream name in the cache.
self.WriteFileRef(ref=streamNamesRefspec, text=json.dumps(streamNames)) # Do it for each stream since this is cheaper than searching.
return s
assert False, "Invariant error! We successfully found that the hash {h} on ref {r} mentions the stream {sn} but couldn't match it?!".format(h=commitHash, r=ref, sn=streamName)
return None
def GetRefMap(self, ref, mapType, afterCommitHash=None):
allowedMapTypes = [ "commit2tr", "tr2commit" ]
if ref is None or mapType is None:
raise Exception("None type arguments not allowed! ref: {ref}, mapType: {mapType}".format(ref=ref, mapType=mapType))
elif mapType not in allowedMapTypes:
raise Exception("mapType must be one of {types}".format(types=', '.join(allowedMapTypes)))
cmd = [ 'git', 'log', '--pretty=oneline', ref ]
if afterCommitHash is not None:
cmd.append( '^{lastHash}'.format(lastHash=afterCommitHash) )
cmdResult = self.gitRepo.raw_cmd(cmd)
strList = None
if cmdResult is not None:
cmdResult = cmdResult.strip()
if len(cmdResult) > 0:
strList = cmdResult.split('\n')
else:
logger.debug("GetRefMap(ref={ref}, mapType={t}) - command result is empty. Cmd: {cmd}".format(ref=ref, t=mapType, cmd=' '.join(cmd)))
return None
else:
logger.debug("GetRefMap(ref={ref}, mapType={t}) - command result was None. Cmd: {cmd}, Err: {err}".format(ref=ref, t=mapType, cmd=' '.join(cmd), err=self.gitRepo.lastStderr))
return None
refMap = OrderedDict()
if strList is not None:
for s in strList:
columns = s.split(' ')
if mapType == "commit2tr":
refMap[columns[0]] = int(columns[2])
elif mapType == "tr2commit":
refMap[int(columns[2])] = columns[0]
return refMap
def ShortHash(self, commitHash):
if commitHash is None:
return None
if not isinstance(commitHash, str):
return commitHash
return commitHash[:8]
def AddNote(self, transaction, commitHash, ref, note, committerName=None, committerEmail=None, committerDate=None, committerTimezone=None):
notesFilePath = None
if note is not None:
with tempfile.NamedTemporaryFile(mode='w+', prefix='ac2git_note_', encoding='utf-8', delete=False) as notesFile:
notesFilePath = notesFile.name
notesFile.write(note)
# Get the author's and committer's name, email and timezone information.
if transaction is not None:
committerName, committerEmail = self.GetGitUserFromAccuRevUser(transaction.user)
committerDate, committerTimezone = self.GetGitDatetime(accurevUsername=transaction.user, accurevDatetime=transaction.time)
if notesFilePath is not None:
rv = self.gitRepo.notes.add(messageFile=notesFilePath, obj=commitHash, ref=ref, force=True, committerName=committerName, committerEmail=committerEmail, committerDate=committerDate, committerTimezone=committerTimezone, authorName=committerName, authorEmail=committerEmail, authorDate=committerDate, authorTimezone=committerTimezone)
os.remove(notesFilePath)
if rv is not None:
logger.debug( "Added{ref} note for {hash}.".format(ref='' if ref is None else ' '+str(ref), hash=self.ShortHash(commitHash)) )
else:
logger.error( "Failed to add{ref} note for {hash}{trStr}".format(ref='' if ref is None else ' '+str(ref), hash=commitHash, trStr='' if transaction is None else ', tr. ' + str(transaction.id)) )
logger.error(self.gitRepo.lastStderr)
return rv
else:
logger.error( "Failed to create temporary file for script state note for {0}, tr. {1}".format(commitHash, transaction.id) )
return None
def ProcessStream(self, stream, branchName, startTrId=None, endTrId=None, streamMap=None):
if stream is not None:
stateRef, dataRef, hwmRef = self.GetStreamRefs(depot=stream.depotName, streamNumber=stream.streamNumber)
assert stateRef is not None and dataRef is not None and len(stateRef) != 0 and len(dataRef) != 0, "Invariant error! The state ({sr}) and data ({dr}) refs must not be None!".format(sr=stateRef, dr=dataRef)
if branchName is None:
branchName = stream.name
branchRef = 'refs/heads/{branchName}'.format(branchName=branchName)
sanitizedRef = self.SanitizeRefName(branchRef)
if branchRef != sanitizedRef:
logger.warning("Branch name '{0}' is not allowed, renamed to '{1}'.".format(branchRef[len("refs/heads/"):], sanitizedRef[len("refs/heads/"):]))
branchRef = sanitizedRef
branchList = self.gitRepo.branch_list()
if branchList is None:
return None
commitHash = None
lastDataCommitHash = None
if branchName in [ br.name if br is not None else None for br in branchList ]:
commitHash = self.GetLastCommitHash(branchName=branchName)
streamHistoryRef = self.GetStreamCommitHistoryRef(stream.depotName, stream.streamNumber)
cmd = [ u'git', u'log', u'-1', u'--format=format:%s', streamHistoryRef ]
trString = self.TryGitCommand(cmd=cmd)
if trString is None or len(trString) == 0:
logger.error("Branch {br} exists but no previous state was found for this branch. Cannot process branch, skipping...".format(br=branchName))
return None
# Here we know that the state must exist and be good!
lastTrId = int(trString.strip().split()[1])
# Find the commit hash on our dataRef that corresponds to our last transaction number.
lastDataCommitHash = self.GetHashForTransaction(ref=dataRef, trNum=lastTrId)
if lastDataCommitHash is None:
logger.error("Branch {br} exists and its last transaction was {lastTrId}. No new accurev data found, continuing...".format(br=branchName, lastTrId=lastTrId))
return None
# Get the list of new hashes that have been committed to the dataRef but we haven't processed on the dataRef just yet.
dataHashList = self.GetGitLogList(ref=dataRef, afterCommitHash=lastDataCommitHash, gitLogFormat='%H %s %T')
if dataHashList is None:
raise Exception("Couldn't get the commit hash list to process from the Accurev data ref {dataRef}.".format(dataRef=dataRef))
elif len(dataHashList) == 0:
logger.error( "{b} is upto date. Couldn't load any more transactions after tr. ({trId}).".format(trId=lastTrId, b=branchName) )
return self.GetLastCommitHash(branchName=branchName)
# Get the stateRef map of transaction numbers to commit hashes.
stateMap = self.GetRefMap(ref=stateRef, mapType="tr2commit")
assert stateMap is not None, "Invariant error! If the dataMap is not None then neither should the stateMap be!"
# Commit the new data with the correct commit messages.
for line in reversed(dataHashList):
columns = line.split(' ')
trId, treeHash = int(columns[2]), columns[3]
if endTrId is not None and endTrId < trId:
logger.debug( "ProcessStream(stream='{s}', branchName='{b}', endTrId='{endTrId}') - next tr. is {trId}, stopping.".format(s=stream.name, b=branchName, trId=trId, endTrId=endTrId) )
break
if startTrId is not None and trId < startTrId:
logger.debug( "ProcessStream(stream='{s}', branchName='{b}', startTrId='{startTrId}') - tr. {trId} is earlier than the start transaction, skipping.".format(s=stream.name, b=branchName, trId=trId, startTrId=startTrId) )
continue
# Get the transaction info.
stateHash = stateMap[trId]
if stateHash is None:
raise Exception("Failed to retrieve state information for tr. {trId}".format(trId))
trHistXml, trHist = self.GetHistInfo(ref=stateHash)
tr = trHist.transactions[0]
streamsXml, streams = self.GetStreamsInfo(ref=stateHash)
dstStreamName, dstStreamNumber = trHist.toStream()
dstStream = streams.getStream(dstStreamNumber)
srcStreamName, srcStreamNumber = trHist.fromStream()
srcStream = streams.getStream(srcStreamNumber)
parents = []
if commitHash is not None:
parents = [ commitHash ]
else:
# Invariant: The commitHash being None implies that this is
# the first commit for this stream and hence this must be a
# mkstream transaction.
# At the time the stream was created the current basis or time lock may not have
# been in effect. Hence we need the streams state at the given transaction.
streamAtTr = streams.getStream(stream.streamNumber)
# Note: if the streamMap is None the basisBranchName, basisCommitHash and streamTime will all be None and only the basisStream will be returned, hence this argument
# serves a dual purpose and can be used to control if this function attaches the processed branch to its basis. If you want an orphan branch pass in the streamMap
# as None.
basisStream, basisBranchName, basisCommitHash, streamTime = self.GetBasisCommitHash(streamAtTr.name, streamAtTr.streamNumber, streamAtTr.basisStreamNumber, streamAtTr.time, streams, streamMap, None, streamAtTr.startTime)
if basisBranchName is not None and basisCommitHash is None:
# The basis stream we found is tracked but there isn't a commit for it? This means that we are being processed first even though we should have processed the basis first...
self.ProcessStream(stream=basisStream, branchName=branchName, startTrId=startTrId, endTrId=endTrId, streamMap=streamMap)
# Try again, but this time we don't care if it fails since that must mean that we can't do anything about it.
basisStream, basisBranchName, basisCommitHash, streamTime = self.GetBasisCommitHash(streamAtTr.name, streamAtTr.streamNumber, streamAtTr.basisStreamNumber, streamAtTr.time, streams, streamMap, None, streamAtTr.startTime)
if basisCommitHash is None:
logger.info( "Creating orphan branch {branchName}.".format(branchName=branchName) )
else:
logger.info( "Creating branch {branchName} based on {basisBranchName} at {basisHash}".format(branchName=branchName, basisBranchName=basisBranchName, basisHash=basisCommitHash) )
parents = [ basisCommitHash ]
commitHash = self.CommitTransaction(tr=tr, stream=stream, parents=parents, treeHash=treeHash, branchName=branchName, srcStream=srcStream, dstStream=dstStream)
logger.info("Committed transaction {trId} to {br}. Commit {h}".format(trId=tr.id, br=branchName, h=self.ShortHash(commitHash)))
return True
return None
def ProcessStreams(self, orderByStreamNumber=False):
depot = self.config.accurev.depot
# Get the stream information for the configured streams from accurev (this is because stream names can change and accurev doesn't care about this while we do).
processingList = []
streamMap = self.GetStreamMap()
for stream in streamMap:
streamInfo = self.GetStreamByName(depot=depot, streamName=stream)
if depot is None or len(depot) == 0:
depot = streamInfo.depotName
elif depot != streamInfo.depotName:
logger.info("Stream {name} (id: {id}) is in depot {streamDepot} which is different than the configured depot {depot}. Ignoring...".format(name=streamInfo.name, id=streamInfo.streamNumber, streamDepot=streamInfo.depotName, depot=depot))
processingList.append( (streamInfo.streamNumber, streamInfo, streamMap[stream]) )
if orderByStreamNumber:
processingList.sort()
for streamNumber, stream, branchName in processingList:
oldCommitHash = self.GetLastCommitHash(branchName=branchName, retry=False)
self.ProcessStream(stream=stream, branchName=branchName)
newCommitHash = self.GetLastCommitHash(branchName=branchName)
# If a remote is configured and we have made a commit on this branch then do a push.
if self.config.git.remoteMap is not None and oldCommitHash != newCommitHash:
formatOptions = { "accurevNotes": AccuRev2Git.gitNotesRef_accurevInfo, "ac2gitNotes": AccuRev2Git.gitNotesRef_state, "branchName": branchName }
refspec = "{branchName}".format(**formatOptions)
if self.gitRepo.raw_cmd(['git', 'show-ref', '--hash', 'refs/notes/{accurevNotes}'.format(**formatOptions)]) is not None:
refspec += " refs/notes/{accurevNotes}:refs/notes/{accurevNotes}".format(**formatOptions)
if self.gitRepo.raw_cmd(['git', 'show-ref', '--hash', 'refs/notes/{ac2gitNotes}'.format(**formatOptions)]) is not None:
refspec += " refs/notes/{ac2gitNotes}:refs/notes/{ac2gitNotes}".format(**formatOptions)
for remoteName in self.config.git.remoteMap:
pushOutput = None
try:
pushCmd = "git push {remote} {refspec}".format(remote=remoteName, refspec=refspec)
pushOutput = subprocess.check_output(pushCmd.split(), stderr=subprocess.STDOUT).decode('utf-8')
logger.info("Push to '{remote}' succeeded:".format(remote=remoteName))
logger.info(pushOutput)
except subprocess.CalledProcessError as e:
logger.error("Push to '{remote}' failed!".format(remote=remoteName))
logger.debug("'{cmd}', returned {returncode} and failed with:".format(cmd="' '".join(e.cmd), returncode=e.returncode))
logger.debug("{output}".format(output=e.output.decode('utf-8')))
def AppendCommitMessageSuffixStreamInfo(self, suffixList, linePrefix, stream):
if stream is not None:
suffixList.append( ('{linePrefix}:'.format(linePrefix=linePrefix), '{name} (id: {id}; type: {Type})'.format(id=stream.streamNumber, name=stream.name, Type=stream.Type)) )
if stream.prevName is not None:
suffixList.append( ('{linePrefix}-prev-name:'.format(linePrefix=linePrefix), '{name}'.format(name=stream.prevName)) )
if stream.basis is not None:
suffixList.append( ('{linePrefix}-basis:'.format(linePrefix=linePrefix), '{name} (id: {id})'.format(name=stream.basis, id=stream.basisStreamNumber)) )
if stream.prevBasis is not None and len(stream.prevBasis) > 0:
suffixList.append( ('{linePrefix}-prev-basis:'.format(linePrefix=linePrefix), '{name} (id: {id})'.format(name=stream.prevBasis, id=stream.prevBasisStreamNumber)) )
if stream.time is not None and accurev.GetTimestamp(stream.time) != 0:
suffixList.append( ('{linePrefix}-timelock:'.format(linePrefix=linePrefix), '{time} (UTC)'.format(time=stream.time)) )
if stream.prevTime is not None and accurev.GetTimestamp(stream.prevTime) != 0:
suffixList.append( ('{linePrefix}-prev-timelock:'.format(linePrefix=linePrefix), '{prevTime} (UTC)'.format(prevTime=stream.prevTime)) )
def GenerateCommitMessageSuffix(self, transaction, stream=None, dstStream=None, srcStream=None, friendlyMessage=None):
suffixList = []
if friendlyMessage is not None:
suffixList.append(friendlyMessage)
suffixList.append( ('Accurev-transaction:', '{id} (type: {Type})'.format(id=transaction.id, Type=transaction.Type)) )
if stream is not None:
self.AppendCommitMessageSuffixStreamInfo(suffixList=suffixList, linePrefix='Accurev-stream', stream=stream)
if dstStream is not None:
self.AppendCommitMessageSuffixStreamInfo(suffixList=suffixList, linePrefix='Accurev-dst-stream', stream=dstStream)
if srcStream is not None:
self.AppendCommitMessageSuffixStreamInfo(suffixList=suffixList, linePrefix='Accurev-src-stream', stream=srcStream)
# Ensure that all the items are nicely column aligned by padding the titles with spaces after the colon.
longestSuffixTitle = 0
for suffix in suffixList:
if longestSuffixTitle < len(suffix[0]):
longestSuffixTitle = len(suffix[0])
suffixFormat = '{suffix: <' + str(longestSuffixTitle) + '} {info}'
lineList = []
for suffix in suffixList:
lineList.append(suffixFormat.format(suffix=suffix[0], info=suffix[1]))
return '\n'.join(lineList)
def GenerateCommitMessage(self, transaction, stream=None, dstStream=None, srcStream=None, title=None, friendlyMessage=None, cherryPickSrcHash=None):
messageSections = []
# The optional transaction key tag can be added to the footer or the header of the comment before anything else is done.
trComment = transaction.comment
messageKey = None
if self.config.git.messageKey is not None:
messageKey = self.config.git.messageKey.lower()
trKey = '{stream}/{transaction}'.format(stream=transaction.affectedStream()[0], transaction=transaction.id)
if trComment is None:
trComment = trKey
else:
if messageKey == "footer":
trComment = '\n\n'.join([trComment, trKey])
elif messageKey == "header":
trComment = '\n\n'.join([trKey, trComment])
else:
raise Exception("Unrecognized value '{v}' for message-key attribute of the git configuration file element.".format(v=self.config.git.messageKey))
# The messageStyle option determines additional information that is far more detailed than the simple transaction key and is processed here.
style = "notes"
if self.config.git.messageStyle is not None:
style = self.config.git.messageStyle.lower()
notes = None
if style == "clean":
messageSections.append(trComment)
elif style in [ "normal", "notes" ]:
if title is not None:
messageSections.append(title)
if trComment is not None:
messageSections.append(trComment)
suffix = self.GenerateCommitMessageSuffix(transaction=transaction, stream=stream, dstStream=dstStream, srcStream=srcStream, friendlyMessage=friendlyMessage)
if suffix is not None:
if style == "normal":
messageSections.append(suffix)
elif style == "notes":
notes = suffix
else:
raise Exception("Unrecognized git message style '{s}'".format(s=style))
if cherryPickSrcHash is not None:
if len(messageSections) > 0:
messageSections[0] = "(CP) {0}".format(messageSections[0])
messageSections.append("(cherry picked from commit {hash})".format(hash=cherryPickSrcHash))
return ('\n\n'.join(messageSections), notes)
def SanitizeRefComponent(self, component):
if component is None or len(component) == 0:
return component
# If it starts with a dot, remove the dot
if component[0] == '.':
component = component[1:]
# If it ends with .lock, remove the .lock
if component.endswith(".lock"):
component = component[:-len(".lock")]
return component
def SanitizeRefName(self, name):
if name is None or len(name) == 0:
return name
while "//" in name:
name = name.replace("//", "/")
illegalSequence = {
"..": "__",
"?": "_",
"*": "_",
"[": "_",
"\\": "/",
"@{": "_",
" ": "_"
}
for s in illegalSequence:
name = name.replace(s, illegalSequence[s])
# Remove control characters
nonControl = ""
for ch in name:
if ord(ch) <= 40:
nonControl += '_'
else:
nonControl += ch
name = nonControl
# Sanitize components
name = "/".join([self.SanitizeRefComponent(x) for x in name.split('/')])
illegalEnding = {
".": "",
"/": "/_"
}
for e in illegalEnding:
if name[-1] == e:
name = name[:-1] + illegalEnding[e]
if name == "@":
return "_"
return name
def SanitizeBranchName(self, name):
if name is None or len(name) == 0:
return name
sanitized = self.SanitizeRefName("refs/heads/{0}".format(name))
if sanitized is None or len(sanitized) == 0:
return sanitized
return sanitized[len("refs/heads/"):]
def BuildStreamTree(self, streams):
rv = {}
for s in streams:
rv[s.streamNumber] = { "parent": s.basisStreamNumber, "children": [], "self": s }
for s in streams:
if s.basisStreamNumber is None:
continue
if s.basisStreamNumber not in rv:
raise Exception("Incomplete set of streams given! Stream {s} is missing from the streams list, cannot build tree!".format(s=s.basisStreamNumber))
rv[s.basisStreamNumber]["children"].append(s.streamNumber)
return rv
def PruneStreamTree(self, streamTree, keepList):
rv = None
if streamTree is not None:
if keepList is None:
return streamTree
elif len(keepList) == 1:
return { keepList[0]: { "parent": None, "children": [], "self": streamTree[keepList[0]]["self"] } }
rv = streamTree.copy()
# Remove all the streams that are not in the keepList and take their children and add them to the parent stream.
for s in streamTree:
if s not in keepList:
# Find the next parent that we are keeping.
p = streamTree[s]["parent"]
while p is not None and p not in keepList:
p = streamTree[p]["parent"]
# If we found the parent then append our children to his/hers.
if p is not None:
c = streamTree[s]["children"]
rv[p]["children"].extend(c)
del rv[s]
# Set the parent for all the children to either None or the actual parent.
for c in streamTree[s]["children"]:
if c in rv:
rv[c]["parent"] = p
# Remove all the streams that are not in the keepList from each streams children list.
for s in rv:
children = []
for c in rv[s]["children"]:
if c in keepList:
children.append(c) # subset of children
rv[s]["children"] = children
return rv
def GetStreamCommitHistoryRef(self, depot, streamNumber):
depotObj = self.GetDepot(depot)
if depotObj is None:
raise Exception("Failed to get depot {depot}!".format(depot=depot))
depot = depotObj
if isinstance(streamNumber, int):
return u'{refsNS}state/depots/{depotNumber}/streams/{streamNumber}/commit_history'.format(refsNS=AccuRev2Git.gitRefsNamespace, depotNumber=depot.number, streamNumber=streamNumber)
return None
def LogBranchState(self, stream, tr, commitHash):
assert stream is not None and commitHash is not None and tr is not None, "LogBranchState(stream={s}, tr={t}, commitHash={h}) does not accept None arguments.".format(s=stream, t=tr, h=commitHash)
# Log this commit at this transaction in the state refs that keep track of this stream's history over time.
streamStateRefspec = self.GetStreamCommitHistoryRef(stream.depotName, stream.streamNumber)
if streamStateRefspec is None:
raise Exception("Failed to get hidden ref for stream {streamName} (id: {streamNumber}) depot {depotName}".format(streamName=stream.name, streamNumber=stream.streamNumber, depotName=stream.depotName))
# Write the empty tree to the git repository to ensure there is one.
emptyTree = self.gitRepo.empty_tree(write=True)
if emptyTree is None or len(emptyTree) == 0:
raise Exception("Failed to write empty tree to git repository!")
# Get the last known state
lastStateCommitHash = self.GetLastCommitHash(ref=streamStateRefspec)
if lastStateCommitHash is None:
# Since we will use git log --first-parent a lot we need to make sure we have a parentless commit to start off with.
lastStateCommitHash = self.Commit(transaction=tr, allowEmptyCommit=True, messageOverride='transaction {trId}'.format(trId=tr.id), parents=[], treeHash=emptyTree, ref=streamStateRefspec, checkout=False, authorIsCommitter=True)
if lastStateCommitHash is None:
raise Exception("Failed to add empty state commit for stream {streamName} (id: {streamNumber})".format(streamName=stream.name, streamNumber=stream.streamNumber))
logger.debug("Created state branch for stream {streamName} as {ref} - tr. {trType} {trId} - commit {h}".format(trType=tr.Type, trId=tr.id, streamName=stream.name, ref=streamStateRefspec, h=self.ShortHash(lastStateCommitHash)))
stateCommitHash = self.Commit(transaction=tr, allowEmptyCommit=True, messageOverride='transaction {trId}'.format(trId=tr.id), parents=[ lastStateCommitHash, commitHash ], treeHash=emptyTree, ref=streamStateRefspec, checkout=False, authorIsCommitter=True)
if stateCommitHash is None:
raise Exception("Failed to commit {Type} {tr} to hidden state ref {ref} with commit {h}".format(Type=tr.Type, tr=tr.id, ref=streamStateRefspec, h=self.ShortHash(commitHash)))
logger.debug("Committed stream state for {streamName} to {ref} - tr. {trType} {trId} - commit {h}".format(trType=tr.Type, trId=tr.id, streamName=stream.name, ref=streamStateRefspec, h=self.ShortHash(stateCommitHash)))
def TagTransaction(self, tagName, objHash, tr, stream, title=None, friendlyMessage=None, force=False):
tagMessage, notes = self.GenerateCommitMessage(transaction=tr, stream=stream, title=title, friendlyMessage=friendlyMessage)
# Create temporary file for the commit message.
messageFilePath = None
with tempfile.NamedTemporaryFile(mode='w+', prefix='ac2git_tag_', encoding='utf-8', delete=False) as messageFile:
messageFilePath = messageFile.name
emptyMessage = True
if tagMessage is not None:
if len(tagMessage) > 0:
messageFile.write(tagMessage)
emptyMessage = False
elif tr is not None and tr.comment is not None and len(tr.comment) > 0:
# In git the # at the start of the line indicate that this line is a comment inside the message and will not be added.
# So we will just add a space to the start of all the lines starting with a # in order to preserve them.
messageFile.write(tr.comment)
emptyMessage = False
if emptyMessage:
# `git commit` and `git commit-tree` commands, when given an empty file for the commit message, seem to revert to
# trying to read the commit message from the STDIN. This is annoying since we don't want to be opening a pipe to
# the spawned process all the time just to write an EOF character so instead we will just add a single space as the
# message and hope the user doesn't notice.
# For the `git commit` command it's not as bad since white-space is always stripped from commit messages. See the
# `git commit --cleanup` option for details.
messageFile.write(' ')
if messageFilePath is None:
logger.error("Failed to create temporary file for tag message. Transaction {trType} {trId}".format(trType=tr.Type, trId=tr.id))
return None
# Get the author's and committer's name, email and timezone information.
taggerName, taggerEmail, taggerDate, taggerTimezone = None, None, None, None
if tr is not None:
taggerName, taggerEmail = self.GetGitUserFromAccuRevUser(tr.user)
taggerDate, taggerTimezone = self.GetGitDatetime(accurevUsername=tr.user, accurevDatetime=tr.time)
rv = self.gitRepo.create_tag(name=tagName, obj=objHash, annotated=True, force=force, message_file=messageFilePath, tagger_name=taggerName, tagger_email=taggerEmail, tagger_date=taggerDate, tagger_tz=taggerTimezone, cleanup='whitespace')
os.remove(messageFilePath)
if rv is None:
# Depending on the version of Git we can't trust the return value of the `git tag` command.
# Hence we use `git log refs/tags/<tag name>` instead of peeling back the tag to ensure that
# it was correctly created.
commitHash = self.GetLastCommitHash(branchName="refs/tags/{0}".format(tagName), retry=True)
if commitHash != objHash:
# Note: This assumes that we are ONLY taggint commit objects. If this ever changes then
# we will need to properly peel back the tag and get the commit hash to which it points
# to so that we can directly compare it to the objHash.
logger.error("Failed to tag {trType} {trId}. Tag points to {commitHash} instead of {objHash}".format(trType=tr.Type, trId=tr.id, commitHash=commitHash, objHash=objHash))
return False
return True
def CommitTransaction(self, tr, stream, parents=None, treeHash=None, branchName=None, title=None, srcStream=None, dstStream=None, friendlyMessage=None, cherryPickSrcHash=None, refNamespace='refs/heads/'):
assert branchName is not None, "Error: CommitTransaction() is a helper for ProcessTransaction() and doesn't accept branchName as None."
branchRef = None
branchRef = '{ns}{branch}'.format(ns=refNamespace, branch=branchName)
checkout = (branchName is None)
commitMessage, notes = self.GenerateCommitMessage(transaction=tr, stream=stream, title=title, friendlyMessage=friendlyMessage, srcStream=srcStream, dstStream=dstStream, cherryPickSrcHash=cherryPickSrcHash)
commitHash = self.Commit(transaction=tr, allowEmptyCommit=True, messageOverride=commitMessage, parents=parents, treeHash=treeHash, ref=branchRef, checkout=checkout)
if commitHash is None:
raise Exception("Failed to commit {Type} {tr}".format(Type=tr.Type, tr=tr.id))
if notes is not None and self.AddNote(transaction=tr, commitHash=commitHash, ref=AccuRev2Git.gitNotesRef_accurevInfo, note=notes) is None:
raise Exception("Failed to add note for commit {h} (transaction {trId}) to {br}.".format(trId=tr.id, br=branchName, h=commitHash))
assert stream is not None, "Error: CommitTransaction() is a helper for ProcessTransaction() and doesn't accept stream as None."
self.LogBranchState(stream=stream, tr=tr, commitHash=commitHash)
return commitHash
def GitRevParse(self, ref):
if ref is not None:
commitHash = self.gitRepo.rev_parse(args=[str(ref)], verify=True)
if commitHash is None:
raise Exception("Failed to parse git revision {ref}. Err: {err}.".format(ref=ref, err=self.gitRepo.lastStderr))
return commitHash.strip()
return None
def GitDiff(self, ref1, ref2):
# The `git diff --stat` and `git diff` commands have different behavior w.r.t. .git/info/attributes file:
# http://stackoverflow.com/questions/10415100/want-to-exclude-file-from-git-diff#comment29471399_10421385
# therefore ensure not to use the `--stat` flag.
diff = self.gitRepo.diff(refs=[ref1, ref2], stat=False)
if diff is None:
raise Exception("Failed to diff {r1} to {r2}! Cmd: {cmd}, Err: {err}".format(r1=ref1, r2=ref2, cmd=' '.join(cmd), err=self.gitRepo.lastStderr))
return diff.strip()
def GitMergeBase(self, refs=[], isAncestor=False):
assert None not in refs, "None is not an accepted value for a ref. Given refs are {refs}".format(refs=refs)
hashes = []
for ref in refs:
hashes.append(self.GitRevParse(ref))
return self.gitRepo.merge_base(commits=hashes, is_ancestor=isAncestor)
def MergeIntoChildren(self, tr, streamTree, streamMap, affectedStreamMap, streams, streamNumber=None):
srcStream, dstStream = None, None
dstStreamName, dstStreamNumber = tr.affectedStream()
if dstStreamNumber is not None:
dstStream = streams.getStream(dstStreamNumber)
srcStreamName, srcStreamNumber = tr.fromStream()
if srcStreamNumber is not None:
srcStream = streams.getStream(srcStreamNumber)
if streamNumber is None:
for sn in streamTree:
if streamTree[sn]["parent"] is None:
stream, branchName, streamData, treeHash = self.UnpackStreamDetails(streams=streams, streamMap=streamMap, affectedStreamMap=affectedStreamMap, streamNumber=sn)
parents = None # If left as none it will become a cherry-pick.
if stream is None:
raise Exception("Couldn't get the stream from its number {n}".format(n=sn))
elif treeHash is None:
raise Exception("Couldn't get tree hash from stream {s} (branch {b}). tr {trId} {trType}".format(s=stream.name, b=branchName, trId=tr.id, trType=tr.Type))
commitHash = self.CommitTransaction(tr=tr, stream=stream, parents=parents, treeHash=treeHash, branchName=branchName, srcStream=srcStream, dstStream=dstStream, cherryPickSrcHash=None)
logger.info("{Type} {trId}. cherry-picked to {branch} {h}. Untracked parent stream {ps}.".format(Type=tr.Type, trId=tr.id, branch=branchName, h=self.ShortHash(commitHash), ps=dstStreamName))
# Recurse down into children.
self.MergeIntoChildren(tr=tr, streamTree=streamTree, streamMap=streamMap, affectedStreamMap=affectedStreamMap, streams=streams, streamNumber=sn)
else:
stream, branchName, streamData, treeHash = self.UnpackStreamDetails(streams=streams, streamMap=streamMap, affectedStreamMap=affectedStreamMap, streamNumber=streamNumber)
if stream is None:
raise Exception("Couldn't get the stream from its number {n}".format(n=sn))
elif streamNumber not in streamTree:
raise Exception("Requested stream {s} (branch {b}) is not in the supplied tree {tree}. tr {trId} {trType}".format(s=stream.name, b=branchName, trId=tr.id, trType=tr.Type, tree=streamTree))
lastCommitHash = self.GetLastCommitHash(branchName=branchName)
s = streamTree[streamNumber]
for c in s["children"]:
assert c is not None, "Invariant error! Invalid dictionary structure. Data: {d1}, from: {d2}".format(d1=s, d2=streamTree)
childStream, childBranchName, childStreamData, childTreeHash = self.UnpackStreamDetails(streams=streams, streamMap=streamMap, affectedStreamMap=affectedStreamMap, streamNumber=c)
if childStream is None:
raise Exception("Couldn't get the stream from its number {n}".format(n=c))
elif childTreeHash is None:
raise Exception("Couldn't get tree hash from stream {s}".format(s=childStream.name))
if childStream.time is not None and accurev.GetTimestamp(childStream.time) != 0:
logger.info("{trType} {trId}. Child stream {s} is timelocked to {t}. Skipping affected child stream.".format(trType=tr.Type, trId=tr.id, s=childBranchName, t=childStream.time))
continue
lastChildCommitHash = self.GetLastCommitHash(branchName=childBranchName)
if lastChildCommitHash is None:
lastChildCommitHash = lastCommitHash
assert lastChildCommitHash is not None, "No last commit hash for branch {br}".format(br=childBranchName)
# Do a diff
parents = None # Used to decide if we need to perform the commit. If None, don't commit, otherwise we manually set the parent chain.
diff = self.GitDiff(lastCommitHash, childStreamData["data_hash"])
if len(diff) == 0:
if self.GitMergeBase(refs=[ lastChildCommitHash, lastCommitHash ], isAncestor=True):
# Fast-forward the child branch to here.
if self.UpdateAndCheckoutRef(ref='refs/heads/{branch}'.format(branch=childBranchName), commitHash=lastCommitHash, checkout=False) != True:
raise Exception("Failed to fast-forward {branch} to {hash} (latest commit on {parentBranch}.".format(branch=childBranchName, hash=self.ShortHash(lastCommitHash), parentBranch=branchName))
logger.info("{trType} {trId}. Fast-forward {b} to {dst} {h} (affected child stream). Was at {ch}.".format(trType=tr.Type, trId=tr.id, b=childBranchName, dst=branchName, h=self.ShortHash(lastCommitHash), ch=self.ShortHash(lastChildCommitHash)))
self.LogBranchState(stream=childStream, tr=tr, commitHash=lastCommitHash) # Since we are not committing we need to manually store the ref state at this time.
else:
if self.config.git.emptyChildStreamAction == "merge":
# Merge by specifying the parent commits.
parents = [ lastChildCommitHash , lastCommitHash ] # Make this commit a merge of the parent stream into the child stream.
logger.info("{trType} {trId}. Merge {dst} into {b} {h} (affected child stream). {ch} was not an ancestor of {h}.".format(trType=tr.Type, trId=tr.id, b=childBranchName, dst=branchName, h=self.ShortHash(lastCommitHash), ch=self.ShortHash(lastChildCommitHash)))
elif self.config.git.emptyChildStreamAction == "cherry-pick":
parents = [ lastChildCommitHash ] # Make this commit a cherry-pick of the parent stream into the child stream.
logger.info("{trType} {trId}. Cherry pick {dst} into {b} {h} (affected child stream). {ch} was not an ancestor of {h}.".format(trType=tr.Type, trId=tr.id, b=childBranchName, dst=branchName, h=self.ShortHash(lastCommitHash), ch=self.ShortHash(lastChildCommitHash)))
else:
raise Exception("Unhandled option for self.config.git.emptyChildStreamAction. Option was set to: {0}".format(self.config.git.emptyChildStreamAction))
else:
parents = [ lastChildCommitHash ] # Make this commit a cherry-pick with no relationship to the parent stream.
logger.info("{trType} {trId}. Cherry pick {dst} {dstHash} into {b} - diff between {h1} and {dstHash} was not empty! (affected child stream)".format(trType=tr.Type, trId=tr.id, b=childBranchName, dst=branchName, dstHash=self.ShortHash(lastCommitHash), h1=self.ShortHash(childStreamData["data_hash"])))
if parents is not None:
assert None not in parents, "Invariant error! Either the source hash {sh} or the destination hash {dh} was none!".format(sh=parents[1], dh=parents[0])
cherryPickSrcHash = None
if len(parents) == 1 and parents[0] == lastChildCommitHash:
cherryPickSrcHash = lastCommitHash
commitHash = self.CommitTransaction(tr=tr, stream=childStream, treeHash=childTreeHash, parents=parents, branchName=childBranchName, srcStream=srcStream, dstStream=dstStream, cherryPickSrcHash=cherryPickSrcHash)
if commitHash is None:
raise Exception("Failed to commit transaction {trId} to branch {branchName}.".format(trId=tr.id, branchName=childBranchName))
# Recurse into each child and do the same for its children.
self.MergeIntoChildren(tr=tr, streamTree=streamTree, streamMap=streamMap, affectedStreamMap=affectedStreamMap, streams=streams, streamNumber=c)
def UnpackStreamDetails(self, streams, streamMap, affectedStreamMap, streamNumber):
# Get the information for the stream on which this transaction had occurred.
stream, branchName, streamData, treeHash = None, None, None, None
if streamNumber is not None:
if not isinstance(streamNumber, int):
streamNumber = int(streamNumber)
# Check if the destination stream is a part of our processing.
if streamMap is not None and str(streamNumber) in streamMap:
branchName = streamMap[str(streamNumber)]["branch"]
if affectedStreamMap is not None and streamNumber in affectedStreamMap:
streamData = affectedStreamMap[streamNumber]
treeHash = streamData["data_tree_hash"]
if treeHash is None:
raise Exception("Couldn't get tree hash from stream {s}".format(s=streamName))
# Get the deserialized stream object.
stream = streams.getStream(streamNumber)
return stream, branchName, streamData, treeHash
def GetTimestampForCommit(self, commitHash):
cmd = [u'git', u'log', u'-1', u'--format=format:%at', commitHash]
timestamp = self.TryGitCommand(cmd=cmd)
if timestamp is not None:
return int(timestamp)
return None
def GetOrphanCommit(self, ref, customFormat='%H'):
cmd = [u'git', u'log', u'-1', u'--format=format:{format}'.format(format=customFormat), u'--first-parent', u'--max-parents=0', ref]
return self.TryGitCommand(cmd=cmd)
def GetBasisCommitHash(self, streamName, streamNumber, streamBasisNumber, streamTime, streams, streamMap, affectedStreamMap, streamCreationTime):
# Get the current/new basis stream
basisStream, basisBranchName, basisStreamData, basisTreeHash = self.UnpackStreamDetails(streams=streams, streamMap=streamMap, affectedStreamMap=affectedStreamMap, streamNumber=streamBasisNumber)
minTimestamp = None if streamTime is None or accurev.GetTimestamp(streamTime) == 0 else accurev.GetTimestamp(streamTime)
while basisStream is not None and basisBranchName is None: # Find the first tracked basis stream.
# Since this is an untracked basis take the earlier timestamp between ours and its timestamp.
if basisStream.time is not None:
basisTimestamp = accurev.GetTimestamp(basisStream.time)
if basisTimestamp != 0:
minTimestamp = CallOnNonNoneArgs(min, minTimestamp, basisTimestamp)
# Make the basis streams basis our basis and try again...
basisStream, basisBranchName, basisStreamData, basisTreeHash = self.UnpackStreamDetails(streams=streams, streamMap=streamMap, affectedStreamMap=affectedStreamMap, streamNumber=basisStream.basisStreamNumber)
# Update the stream time to be what we expect.
minTime = accurev.UTCDateTimeOrNone(minTimestamp)
if minTimestamp is not None and minTime != streamTime:
logger.debug("GetBasisCommitHash: One of the parent streams had an earlier timelock.")
logger.debug(" - Stream timelock: {ts} ({t})".format(ts=accurev.GetTimestamp(streamTime), t=streamTime))
logger.debug(" - Earliest parent timelock: {ts} ({t})".format(ts=minTimestamp, t=accurev.UTCDateTimeOrNone(minTimestamp)))
if streamCreationTime is not None:
streamCreationTimestamp = accurev.GetTimestamp(streamCreationTime)
if minTimestamp is None:
logger.debug("GetBasisCommitHash: streamCreationTime specified but no timelock found.")
logger.debug(" - streamCreationTime will replace the timelock in further processing.")
minTime, minTimestamp = streamCreationTime, streamCreationTimestamp
elif streamCreationTimestamp < minTimestamp:
logger.warning("GetBasisCommitHash: streamCreationTime is earlier than the timelock!")
logger.warning(" - streamCreationTime will replace the timelock in further processing.")
minTime, minTimestamp = streamCreationTime, streamCreationTimestamp
if basisBranchName is not None:
basisBranchHistoryRef = self.GetStreamCommitHistoryRef(basisStream.depotName, basisStream.streamNumber)
timelockMessage = ''
timelockISO8601Str = None
if minTime is not None and accurev.GetTimestamp(minTime) != 0: # A timestamp of 0 indicates that a timelock was removed.
timelockISO8601Str = "{datetime}Z".format(datetime=minTime.isoformat('T')) # The time is in UTC and ISO8601 requires us to specify Z for UTC.
timelockMessage = ", before {s}".format(s=timelockISO8601Str)
parentHashes = None
earliestAllowedTimestamp = self.GetOrphanCommit(ref=basisBranchHistoryRef, customFormat='%at')
if earliestAllowedTimestamp is None:
logger.error("Failed to retrieve first commit hash for {ref}".format(ref=basisBranchHistoryRef))
return None, None, None, None
cmd = []
if minTime is not None and (accurev.GetTimestamp(minTime) < int(earliestAllowedTimestamp)):
# The timelock has been created before the creation date of the stream. We cannot return its
# state before this time so we must return its first known/possible state.
cmd = [u'git', u'log', u'-1', u'--format=format:%P', u'--reverse', u'--min-parents=1', u'--first-parent', basisBranchHistoryRef]
parentHashes = self.TryGitCommand(cmd=cmd)
logger.warning("Currently processed transaction requested its basis commit hash before its basis existed.")
logger.warning(" - Earliest time available: {t}.".format(t=accurev.UTCDateTimeOrNone(earliestAllowedTimestamp)))
logger.warning(" - Time requested: {t}.".format(t=minTime))
logger.warning(" Returning the earliest time available instead. TODO: What does Accurev actually do here? Should we look at the next basis in the chain?")
else:
cmd = [u'git', u'log', u'-1', u'--format=format:%P', u'--first-parent']
if timelockISO8601Str is not None:
cmd.append(u'--before={before}'.format(before=timelockISO8601Str))
cmd.append(basisBranchHistoryRef)
parentHashes = self.TryGitCommand(cmd=cmd)
if parentHashes is None:
logger.error("Failed to retrieve last git commit hash. Command `{0}` failed.".format(' '.join(cmd)))
return None, None, None, None
parents = parentHashes.split()
logger.debug("GetBasisCommitHash: Basis stream {basisName} (id: {basisSN}) at commit hash {h} is the basis for stream {name} (id: {sn}){timelockMsg}. (Retrieved from {ref})".format(name=streamName, sn=streamNumber, basisName=basisStream.name, basisSN=basisStream.streamNumber, ref=basisBranchHistoryRef, timelockMsg=timelockMessage, h=self.ShortHash(parents[1])))
logger.debug("GetBasisCommitHash: Commit hash for stream {name} (id: {sn}) was not found.".format(name=streamName, sn=streamNumber))
return basisStream, basisBranchName, parents[1], minTime
logger.debug("GetBasisCommitHash: Commit hash for stream {name} (id: {sn}) was not found.".format(name=streamName, sn=streamNumber))
return None, None, None, None
def TryInferSourceStream(self, streams, streamMap, affectedStreamMap, dstStreamNumber):
if dstStreamNumber is None:
# If we don't know where this was promoted to then we can't figure out where it was promoted from...
return None, None
# Convert the stream information into a tree containing only streams that were not affected by the transaction.
streamTree = self.BuildStreamTree(streams=streams.streams)
affectedSet = set([ sn for sn in affectedStreamMap ])
# Get the destination stream details.
stream, branchName, streamData, treeHash = self.UnpackStreamDetails(streams=streams, streamMap=streamMap, affectedStreamMap=affectedStreamMap, streamNumber=dstStreamNumber)
if stream is None:
raise Exception("Couldn't get the stream from its number {n}".format(n=sn))
# Diff the destination stream to its last commit.
if branchName is None:
#logger.debug("TryInferSourceStream: Can't infer source stream as the destination stream {s} (id: {id}) has no branch name.".format(s=stream.name, id=stream.streamNumber))
return None, None
lastCommitHash = self.GetLastCommitHash(branchName=branchName)
diff = self.GitDiff(lastCommitHash, streamData["data_hash"])
if len(diff) == 0:
# The diff is empty, meaning that this transaction didn't change the destination stream.
# It is impossible to determine the source stream for the promote in this case. Bail out!
logger.debug("TryInferSourceStream: Can't infer source stream as the destination stream {s} (id: {id}) has not changed.".format(s=stream.name, id=stream.streamNumber))
return None, None
possibleSrcStream = []
s = streamTree[dstStreamNumber]
for c in s["children"]:
assert c is not None, "Invariant error! Invalid dictionary structure. Data: {d1}, from: {d2}".format(d1=s, d2=streamTree)
if c in affectedSet:
# If this child stream was the source of the promote it wouldn't have been changed (affected) by the promote.
continue
childStream, childBranchName, childStreamData, childTreeHash = self.UnpackStreamDetails(streams=streams, streamMap=streamMap, affectedStreamMap=affectedStreamMap, streamNumber=c)
if childStream is None:
raise Exception("Couldn't get the stream from its number {n}".format(n=c))
assert childTreeHash is None, "Invariant: As an unaffected stream there should be no data for this transaction."
if childStream.time is not None and accurev.GetTimestamp(childStream.time) != 0:
logger.debug("TryInferSourceStream: Child stream {s} is timelocked to {t}. It can't be used for determining the source stream.".format(s=childBranchName, t=childStream.time))
continue
if childBranchName is None:
logger.debug("TryInferSourceStream: Child stream {s} (id: {id}) has no branch name, skipping.".format(s=childStream.name, id=childStream.streamNumber))
continue
lastChildCommitHash = self.GetLastCommitHash(branchName=childBranchName)
if lastChildCommitHash is None:
# If the child stream has no last commit hash then it is excluded from the candidacy.
continue
# Do a diff between the last commit on the child stream and the tree in the destination stream.
# If they are the same then we know that it is highly likely that this is the source of the promote.
diff = self.GitDiff(lastChildCommitHash, streamData["data_hash"])
if len(diff) == 0:
possibleSrcStream.append( (childStream.name, childStream.streamNumber) )
# If we have only a single stream that wasn't changed by this transaction, and its contents is the same as the
# destination stream's (i.e. it has become an empty stream), then it is most likely the source of the promote.
if len(possibleSrcStream) == 1:
return possibleSrcStream[0]
# No suitable source stream found.
return None, None
# Processes a single transaction whose id is the trId (int) and which has been recorded against the streams outlined in the affectedStreamMap.
# affectedStreamMap is a dictionary with the following format { <key:stream_num_str>: { "state_hash": <val:state_ref_commit_hash>, "data_hash": <val:data_ref_commit_hash> } }
# The streamMap is used so that we can translate streams and their basis into branch names { <key:stream_num_str>: { "stream": <val:config_strem_name>, "branch": <val:config_branch_name> } }
def ProcessTransaction(self, streamMap, trId, affectedStreamMap, prevAffectedStreamMap):
# For all affected streams the streams.xml and hist.xml contents should be the same for the same transaction id so get it from any one of them.
arbitraryStreamNumberStr = next(iter(affectedStreamMap))
arbitraryStreamData = affectedStreamMap[arbitraryStreamNumberStr]
streamsXml, streams = self.GetStreamsInfo(ref=arbitraryStreamData["state_hash"])
if streams is None:
raise Exception("Couldn't get streams for transaction {tr}. Aborting!".format(tr=trId))
# Get the transaction information.
trHistXml, trHist = self.GetHistInfo(ref=arbitraryStreamData["state_hash"])
if trHist is None or len(trHist.transactions) == 0 is None:
raise Exception("Couldn't get history for transaction {tr}. Aborting!".format(tr=trId))
tr = trHist.transactions[0]
# Get the name and number of the stream on which this transaction had occurred.
streamName, streamNumber = tr.affectedStream()
logger.debug( "Transaction #{tr} - {Type} by {user} to {stream} at {localTime} local time ({utcTime} UTC)".format(tr=tr.id, Type=tr.Type, utcTime=tr.time, localTime=utc2local(tr.time), user=tr.user, stream=streamName) )
# Get the information for the stream on which this transaction had occurred.
stream, branchName, streamData, treeHash = self.UnpackStreamDetails(streams=streams, streamMap=streamMap, affectedStreamMap=affectedStreamMap, streamNumber=streamNumber)
# Process the transaction based on type.
if tr.Type in ignored_transaction_types: # Ignored transactions.
logger.info("Ignoring transaction #{id} - {Type} (transaction type is in ignored_transaction_types list)".format(id=tr.id, Type=tr.Type))
elif tr.Type in [ "mkstream", "chstream" ]:
parents = None
lastCommitHash = None
title = None
targetStreams = []
prevBasisCommitHash = None
if tr.Type == "mkstream":
# Old versions of accurev don't tell you the name of the stream that was created in the mkstream transaction.
# The only way to find out what stream was created is to diff the output of the `accurev show streams` command
# between the mkstream transaction and the one that preceedes it. However, the mkstream transaction will only
# affect one stream so by the virtue of our datastructure the arbitraryStreamData should be the onlyone in our list
# and we already have its "streamNumber".
assert len(affectedStreamMap) == 1, "Invariant error! There is no way to know for what stream this mkstream transaction was made!"
stream, branchName, streamData, treeHash = self.UnpackStreamDetails(streams=streams, streamMap=streamMap, affectedStreamMap=affectedStreamMap, streamNumber=int(arbitraryStreamNumberStr))
parents = [] # First, orphaned, commit is denoted with an empty parents list.
title = 'Created {name}'.format(name=branchName)
targetStreams.append( (stream, branchName, streamData, treeHash, parents) )
elif tr.Type == "chstream":
assert tr.stream is not None, "Invariant error! Can't handle not having a stream in the accurev hist XML output for a chstream transaction..."
stream, branchName, streamData, treeHash = self.UnpackStreamDetails(streams=streams, streamMap=streamMap, affectedStreamMap=affectedStreamMap, streamNumber=tr.stream.streamNumber)
stream = tr.stream
if branchName is not None: # Only if this stream is being processed should we look it up.
title = 'Rebased {name}'.format(name=branchName)
lastCommitHash = self.GetLastCommitHash(branchName=branchName)
if lastCommitHash is None:
raise Exception("Error! Failed to get the last commit hash for branch {b} (stream: {s}), transaction {trType} {trId}!".format(trType=tr.Type, trId=tr.id, b=branchName, s=stream.name))
parents = [ lastCommitHash ]
targetStreams.append( (stream, branchName, streamData, treeHash, parents) )
else:
allStreamTree = self.BuildStreamTree(streams=streams.streams)
keepList = list(set([ sn for sn in affectedStreamMap ]))
assert tr.stream.streamNumber not in keepList, "The stream must be tracked otherwise we would be in the if clause."
keepList.append(tr.stream.streamNumber)
affectedStreamTree = self.PruneStreamTree(streamTree=allStreamTree, keepList=keepList)
streamNode = affectedStreamTree[stream.streamNumber]
for sn in streamNode["children"]:
stream, branchName, streamData, treeHash = self.UnpackStreamDetails(streams=streams, streamMap=streamMap, affectedStreamMap=affectedStreamMap, streamNumber=sn)
lastCommitHash = self.GetLastCommitHash(branchName=branchName)
if lastCommitHash is None:
raise Exception("Error! Failed to get the last commit hash for branch {b} (stream: {s}), transaction {trType} {trId}!".format(trType=tr.Type, trId=tr.id, b=branchName, s=stream.name))
parents = [ lastCommitHash ]
targetStreams.append( (stream, branchName, streamData, treeHash, parents) )
# Get the previous commit hash off which we would have been based at the time of the previous processed transaction.
prevArbitraryStreamNumberStr = next(iter(prevAffectedStreamMap))
prevArbitraryStreamData = prevAffectedStreamMap[prevArbitraryStreamNumberStr]
prevStreamsXml, prevStreams = self.GetStreamsInfo(ref=prevArbitraryStreamData["state_hash"])
if prevStreams is None:
raise Exception("Couldn't get streams for previous transaction (current transaction {tr}). Aborting!".format(tr=trId))
prevBasisStreamNumber = tr.stream.prevBasisStreamNumber
if prevBasisStreamNumber is None:
prevBasisStreamNumber = tr.stream.basisStreamNumber
prevTime = tr.stream.prevTime
if prevTime is None:
prevTime = tr.stream.time
prevBasisStream, prevBasisBranchName, prevBasisCommitHash, prevStreamTime = self.GetBasisCommitHash(tr.stream.name, tr.stream.streamNumber, prevBasisStreamNumber, prevTime, prevStreams, streamMap, affectedStreamMap, None)
if prevBasisBranchName is not None and prevBasisCommitHash is None:
raise Exception("Couldn't determine the last basis commit hash.")
# Stream renames can be looked up in the stream.prevName value here.
if tr.stream.prevName is not None and len(tr.stream.prevName.strip()) > 0:
# if the stream has been renamed, use its new name from now on.
logger.info("Stream renamed from {oldName} to {newName}. Branch name is {branch}, ignoring.".format(oldName=tr.stream.prevName, newName=tr.stream.name, branch=branchName))
else:
raise Exception("Not yet implemented! {trId} {trType}, unrecognized transaction type.".format(trId=tr.id, trType=tr.Type))
# Get the commit hash off which we should be based off from this chstream transaction forward.
basisStream, basisBranchName, basisCommitHash, streamTime = self.GetBasisCommitHash(stream.name, stream.streamNumber, stream.basisStreamNumber, stream.time, streams, streamMap, affectedStreamMap, None)
if basisCommitHash is None:
title = "{title} orphaned branch.".format(title=title)
else:
title = "{title} based on {basisBranchName} at {h}".format(title=title, basisBranchName=basisBranchName, h=self.ShortHash(basisCommitHash))
assert len(targetStreams) != 0, "Invariant error! There should be at least one targetStreams item in the list!"
for stream, branchName, streamData, treeHash, parents in targetStreams:
assert branchName is not None, "Invariant error! branchName cannot be None here!"
createTag = False
refNamespace = 'refs/heads/'
if stream.Type == "snapshot":
refNamespace = 'refs/tags/'
createTag = True
if prevBasisCommitHash != basisCommitHash:
amMergedIntoPrevBasis = ( len(parents) > 0 and prevBasisCommitHash is not None and self.GitMergeBase(refs=[ parents[0], prevBasisCommitHash ], isAncestor=True) )
if None in [ amMergedIntoPrevBasis ]:
raise Exception("Error! The git merge-base command failed!")
elif amMergedIntoPrevBasis:
# Fast-forward the timelocked or snapshot stream branch to the correct commit.
if createTag:
# Note: This is unlikely to execute as we tend to compare hashes from refs directly and annotated tags have a hash that is different from the commit to which
# they point. This is here in case we decide to peel the tags back to expose the commits in the core git processing logic.
logger.warning("{trInfo} Re-creating tag {dst} on {b} at {h}".format(trInfo=trInfoMsg, b=basisBranchName, h=self.ShortHash(basisCommitHash), dst=branchName))
if self.TagTransaction(tagName=branchName, objHash=basisCommitHash, tr=tr, stream=stream, title=title, force=True) != True:
raise Exception("Failed to re-create tag {branch} at {hash} (latest commit on {parentBranch}).".format(branch=branchName, hash=self.ShortHash(basisCommitHash), parentBranch=basisBranchName))
elif self.UpdateAndCheckoutRef(ref='{ns}{branch}'.format(ns=refNamespace, branch=branchName), commitHash=basisCommitHash, checkout=False) != True:
raise Exception("Failed to fast-forward {branch} to {hash} (latest commit on {parentBranch}). Old basis {oldHash} on {oldParentBranch}. Title: {title}".format(branch=branchName, hash=self.ShortHash(basisCommitHash), parentBranch=basisBranchName, oldHash=self.ShortHash(prevBasisCommitHash), oldParentBranch=prevBasisBranchName, title=title))
else:
logger.info("{trType} {trId}. Fast-forward {dst} to {b} {h}.".format(trType=tr.Type, trId=tr.id, b=basisBranchName, h=self.ShortHash(basisCommitHash), dst=branchName))
parents = None # Do not commit!
self.LogBranchState(stream=stream, tr=tr, commitHash=basisCommitHash) # Since we are not committing we need to manually store the ref state at this time.
else:
# Merge by specifying the parent commits.
if self.config.git.newBasisIsFirstParent:
parents.insert(0, basisCommitHash) # Make this commit a merge of the parent stream into the child stream.
else:
parents.append(basisCommitHash)
assert None not in parents, "Invariant error! Either the source hash or the destination hash in {p} was none!".format(p=parents)
trInfoMsg="{trType} {trId}.".format(trType=tr.Type, trId=tr.id)
if len(parents) == 1:
basisTreeHash = self.GetTreeFromRef(ref=basisCommitHash)
if basisTreeHash == treeHash:
# Fast-forward the created stream branch to the correct commit.
if createTag:
if self.TagTransaction(tagName=branchName, objHash=basisCommitHash, tr=tr, stream=stream, title=title) != True:
if branchName in self.gitRepo.tag_list():
# Snapshot streams do have `chstream` transactions. For what reason, I can't say, but I suspect that their creation used to be a 2 step process. Either way,
# we need to be able to handle that case. Simply recreate the tag at the right point and ignore the history. It's supposed to be immutable anyway...
logger.warning("{trInfo} Tag {dst} exists, re-creating on {b} at {h}".format(trInfo=trInfoMsg, b=basisBranchName, h=self.ShortHash(basisCommitHash), dst=branchName))
if self.TagTransaction(tagName=branchName, objHash=basisCommitHash, tr=tr, stream=stream, title=title, force=True) != True:
raise Exception("Failed to create tag {branch} at {hash} (latest commit on {parentBranch}).".format(branch=branchName, hash=self.ShortHash(basisCommitHash), parentBranch=basisBranchName))
elif self.UpdateAndCheckoutRef(ref='{ns}{branch}'.format(ns=refNamespace, branch=branchName), commitHash=basisCommitHash, checkout=False) != True:
raise Exception("Failed to fast-forward {branch} to {hash} (latest commit on {parentBranch}).".format(branch=branchName, hash=self.ShortHash(basisCommitHash), parentBranch=basisBranchName))
parents = None # Don't commit this mkstream since it doesn't introduce anything new.
logger.info("{trInfo} Created {tagMsg}{dst} on {b} at {h}".format(trInfo=trInfoMsg, b=basisBranchName, h=self.ShortHash(basisCommitHash), dst=branchName, tagMsg="tag " if createTag else ""))
self.LogBranchState(stream=stream, tr=tr, commitHash=basisCommitHash) # Since we are not committing we need to manually store the ref state at this time.
else:
logger.info("{trInfo} Created {dst} based on {b} at {h} (tree was not the same)".format(trInfo=trInfoMsg, b=basisBranchName, h=self.ShortHash(basisCommitHash), dst=branchName))
else:
logger.info("{trInfo} Merging {b} {h} as first parent into {dst}.".format(trInfo=trInfoMsg, b=basisBranchName, h=self.ShortHash(basisCommitHash), dst=branchName))
if parents is not None:
if treeHash is None:
if branchName is None:
raise Exception("Couldn't get tree for {trType} {trId} on untracked stream {s}. Message: {m}".format(trType=tr.Type, trId=tr.id, s=stream.name, m=title))
logger.warning("No associated data commit found! Assumption: The {trType} {trId} didn't actually change the stream. An empty commit will be generated on branch {b}. Continuing...".format(trType=tr.Type, trId=tr.id, b=branchName))
treeHash = self.GetTreeFromRef(ref=branchName)
if treeHash is None:
raise Exception("Couldn't get last tree for {trType} {trId} on untracked stream {s}. Message: {m}".format(trType=tr.Type, trId=tr.id, s=stream.name, m=title))
commitHash = self.CommitTransaction(tr=tr, stream=stream, treeHash=treeHash, parents=parents, branchName=branchName, title=title, refNamespace=refNamespace)
if commitHash is None:
raise Exception("Failed to commit chstream {trId}".format(trId=tr.id))
logger.info("{Type} {tr}. committed to {branch} {h}. {title}".format(Type=tr.Type, tr=tr.id, branch=branchName, h=self.ShortHash(commitHash), title=title))
if createTag:
if self.TagTransaction(tagName=branchName, objHash=commitHash, tr=tr, stream=stream, title=title) != True:
raise Exception("Failed to create tag {branch} at {hash}.".format(branch=branchName, hash=self.ShortHash(commitHash)))
logger.warning("{Type} {tr}. creating tag {branch} for branch {branch} at {h}. {title}".format(Type=tr.Type, tr=tr.id, branch=branchName, h=self.ShortHash(commitHash), title=title))
else:
logger.debug("{Type} {tr}. skiping commit to {branch}. (fast-forwarded to {h}) {title}".format(Type=tr.Type, tr=tr.id, branch=branchName, h=self.ShortHash(basisCommitHash), title=title))
# Process all affected streams.
allStreamTree = self.BuildStreamTree(streams=streams.streams)
keepList = [ sn for sn in affectedStreamMap ]
if branchName is not None:
keepList.append(stream.streamNumber) # The stream on which the chstream transaction occurred will never be affected so we have to keep it in there explicitly for the MergeIntoChildren() algorithm (provided it is being processed).
keepList = list(set(keepList)) # Keep only unique values
affectedStreamTree = self.PruneStreamTree(streamTree=allStreamTree, keepList=keepList)
self.MergeIntoChildren(tr=tr, streamTree=affectedStreamTree, streamMap=streamMap, affectedStreamMap=affectedStreamMap, streams=streams, streamNumber=stream.streamNumber if branchName is not None else None)
else:
if branchName is not None and treeHash is None:
raise Exception("Couldn't retrieve data for {trType} {trId} from stream {s}, branch {b}".format(trType=tr.Type, trId=tr.id, s=stream.name, b=branchName))
# The rest of the transactions can be processed by stream type. Normal streams that have children need to try and merge down while workspaces which don't have children can skip this step.
if stream.Type in [ "workspace" ]:
# Workspaces don't have child streams
if tr.Type not in [ "add", "keep", "co", "move" ]:
logger.warning("Unexpected transaction {Type} {tr}. occurred in workspace {w}.".format(Type=tr.Type, tr=tr.id, w=stream.name))
if branchName is not None:
commitHash = self.CommitTransaction(tr=tr, stream=stream, treeHash=treeHash, branchName=branchName)
logger.info("{Type} {tr}. committed to {branch} {h}.".format(Type=tr.Type, tr=tr.id, branch=branchName, h=self.ShortHash(commitHash)))
elif stream.Type in [ "normal" ]:
if tr.Type not in [ "promote", "defunct", "purge" ]:
logger.warning("Unexpected transaction {Type} {tr}. occurred in stream {s} of type {sType}.".format(Type=tr.Type, tr=tr.id, s=stream.name, sType=stream.Type))
# Promotes can be thought of as merges or cherry-picks in git and deciding which one we are dealing with
# is the key to having a good conversion.
# There are 4 situations that we should consider:
# 1. A promote from a child stream to a parent stream that promotes everything from that stream.
# This trivial case is the easiest to reason about and is obviously a merge.
# 2. A promote from a child stream to a parent stream that promotes only some of the things from that
# stream. (i.e. one of 2 transactions is promoted up, or a subset of files).
# This is slightly trickier to reason about since the transactions could have been promoted in order
# (from earliest to latest) in which case it is a sequence of merges or in any other case it should be
# a cherry-pick.
# 3. A promote from either an indirect descendant stream to this stream (a.k.a. cross-promote).
# This case can be considered as either a merge or a cherry-pick, but we will endevour to make it a merge.
# 4. A promote from either a non-descendant stream to this stream (a.k.a. cross-promote).
# This case is most obviously a cherry-pick.
if streamNumber is not None:
assert stream is not None, "Invariant error! How is it possible that at a promote transaction we don't have the destination stream? streams.xml must be invalid or incomplete!"
else:
raise Exception("Error! Could not determine the destination stream for promote {tr}.".format(tr=tr.id))
# Determine the stream from which the files in this this transaction were promoted.
srcStreamName, srcStreamNumber = trHist.fromStream()
if srcStreamNumber is None and tr.Type == 'promote':
if self.config.git.sourceStreamInferrence:
srcStreamName, srcStreamNumber = self.TryInferSourceStream(streams=streams, streamMap=streamMap, affectedStreamMap=affectedStreamMap, dstStreamNumber=streamNumber)
if srcStreamNumber is not None:
logger.info("{trType} {tr}. Source stream unavailable. Inferred source stream: {s} (id: {id})".format(tr=tr.id, trType=tr.Type, s=srcStreamName, id=srcStreamNumber))
else:
logger.debug("{trType} {tr}. Source stream unavailable. Failed to infer source stream.".format(tr=tr.id, trType=tr.Type))
else:
logger.debug("{trType} {tr}. Source stream unavailable.".format(tr=tr.id, trType=tr.Type))
srcStream, srcBranchName, srcStreamData, srcTreeHash = self.UnpackStreamDetails(streams=streams, streamMap=streamMap, affectedStreamMap=affectedStreamMap, streamNumber=srcStreamNumber)
lastSrcBranchHash = None
if srcBranchName is not None:
lastSrcBranchHash = self.GetLastCommitHash(branchName=srcBranchName)
commitHash = None
if srcBranchName is not None and branchName is not None:
# Do a git diff between the two data commits that we will be merging.
diff = self.GitDiff(streamData["data_hash"], lastSrcBranchHash)
if len(diff) == 0:
parents = [ self.GetLastCommitHash(branchName=branchName) ]
isAncestor = self.GitMergeBase(refs=[ lastSrcBranchHash, parents[0] ], isAncestor=True)
assert isAncestor is not None, "Invariant error! Failed to determine merge base between {c1} and {c2}!".format(c1=lastSrcBranchHash, c2=parents[0])
if not isAncestor:
parents.append(lastSrcBranchHash) # Make this commit a merge of the last commit on the srcStreamBranch into the branchName.
assert None not in parents, "Invariant error! Either the source hash {sh} or the destination hash {dh} was none!".format(sh=parents[1], dh=parents[0])
commitHash = self.CommitTransaction(tr=tr, stream=stream, parents=parents, treeHash=treeHash, branchName=branchName, srcStream=srcStream, dstStream=stream)
infoMessage = "{trType} {tr}. Merged {src} {srcHash} into {dst} {dstHash}.".format(tr=tr.id, trType=tr.Type, src=srcBranchName, dst=branchName, srcHash=self.ShortHash(lastSrcBranchHash), dstHash=self.ShortHash(commitHash))
if self.config.git.sourceStreamFastForward:
# This is a manual merge and the srcBranchName should be fastforwarded to this commit since its contents now matches the parent stream.
if self.UpdateAndCheckoutRef(ref='refs/heads/{branch}'.format(branch=srcBranchName), commitHash=commitHash, checkout=False) != True:
raise Exception("Failed to update source {branch} to {hash} latest commit.".format(branch=srcBranchName, hash=self.ShortHash(commitHash)))
infoMessage = "{msg} Fast-forward {src} from {srcHash} to {dst} at {dstHash}.".format(msg=infoMessage, src=srcBranchName, dst=branchName, srcHash=self.ShortHash(lastSrcBranchHash), dstHash=self.ShortHash(commitHash))
self.LogBranchState(stream=srcStream, tr=tr, commitHash=commitHash) # Since we are not committing we need to manually store the ref state at this time.
logger.info(infoMessage)
else:
commitHash = self.CommitTransaction(tr=tr, stream=stream, treeHash=treeHash, branchName=branchName, srcStream=None, dstStream=stream, cherryPickSrcHash=lastSrcBranchHash)
msg = "{trType} {tr}. Cherry pick {src} {srcHash} into {dst} {dstHash}. Diff {dataHash} to {srcHash} was not empty.".format(tr=tr.id, trType=tr.Type, src=srcBranchName, dst=branchName, dataHash=self.ShortHash(streamData["data_hash"]), srcHash=self.ShortHash(lastSrcBranchHash), dstHash=self.ShortHash(commitHash))
logger.info(msg)
elif branchName is not None:
# Cherry pick onto destination and merge into all the children.
commitHash = self.CommitTransaction(tr=tr, stream=stream, treeHash=treeHash, branchName=branchName, srcStream=None, dstStream=stream)
msgSuffix = ''
if srcStreamNumber is None:
msgSuffix = "Accurev 'from stream' information missing."
else:
msgSuffix = "Source stream {name} (id: {number}) is not tracked.".format(name=srcStreamName, number=srcStreamNumber)
logger.info("{trType} {tr}. Cherry pick into {dst} {dstHash}. {suffix}".format(trType=tr.Type, tr=tr.id, dst=branchName, dstHash=self.ShortHash(commitHash), suffix=msgSuffix))
else:
logger.info("{trType} {tr}. destination stream {dst} (id: {num}) is not tracked.".format(trType=tr.Type, tr=tr.id, dst=streamName, num=streamNumber))
# TODO: Fix issue '#51 - Make purges into merges' here
# ====================================================
# In the case that this transaction/commit makes the child stream have the same contents as the parent stream (i.e. after a `purge` transaction that deleted everything from the stream)
# We should either merge this stream into its parent or rebase it onto its parent (neither of which is ideal).
# - If we make a merge commit on the parent we are effectively lying, since no transaction actually occurred on the parent stream. Additionally, we probably have to propagate it to all
# of our sibling streams that are direct ancestors of our parent and don't have timelocks.
# - If we rebase the branch onto the parent we will need to label the commit so that we don't lose that history and are at the same time making the life of anyone who is
# trying to figure out what happened in the past more difficult. Accurev shows the purge as a transaction in the stream and so should we.
#
# Aside: The more I think about it the more Accurev seems to follow the cherry-picking workflow which might explain why it feels so broken. Trying to infer merges from it is also
# quite tricky...
# ----------------------------------------------------
# Process all affected streams (which are generally the child streams of this stream).
allStreamTree = self.BuildStreamTree(streams=streams.streams)
keepList = list(set([ sn for sn in affectedStreamMap ]))
if srcStreamNumber is not None and srcStreamNumber in keepList:
keepList.remove(srcStreamNumber) # The source stream should never be in the affected streams list.
logger.warning("{trType} {tr}. dst stream {dst}, src stream {src}. The src stream was found in the affected child streams list which shouldn't be possible. Removing from affected child streams.".format(trType=tr.Type, tr=tr.id, dst=streamName, src=srcStreamName))
affectedStreamTree = self.PruneStreamTree(streamTree=allStreamTree, keepList=keepList)
self.MergeIntoChildren(tr=tr, streamTree=affectedStreamTree, streamMap=streamMap, affectedStreamMap=affectedStreamMap, streams=streams, streamNumber=(None if commitHash is None else streamNumber))
else:
raise Exception("Not yet implemented! Unrecognized stream type {Type}. Stream {name}".format(Type=stream.Type, name=stream.name))
def ReadFileRef(self, ref):
rv = None
for i in range(0, AccuRev2Git.commandFailureRetryCount):
rv = self.gitRepo.raw_cmd([u'git', u'show', ref])
if rv is None:
return None # Non-zero return code means that the ref likely doesn't exist.
elif len(rv) > 0: # The processes sometimes return empty strings via Popen.communicate()... Need to retry.
return rv
return rv
def WriteFileRef(self, ref, text):
if ref is not None and text is not None and len(text) > 0:
filePath = None
with tempfile.NamedTemporaryFile(mode='w+', prefix='ac2git_ref_file_', encoding='utf-8', delete=False) as f:
filePath = f.name
f.write(text)
if filePath is not None:
cmd = [ u'git', u'hash-object', u'-w', u'{0}'.format(filePath) ]
objHash = ''
tryCount = 0
while objHash is not None and len(objHash) == 0 and tryCount < AccuRev2Git.commandFailureRetryCount:
objHash = self.gitRepo.raw_cmd(cmd)
objHash = objHash.strip()
tryCount += 1
os.remove(filePath)
updateRefRetr = None
if objHash is not None:
cmd = [ u'git', u'update-ref', ref, objHash ]
updateRefRetr = self.gitRepo.raw_cmd(cmd)
if objHash is None or updateRefRetr is None:
logger.debug("Error! Command {cmd}".format(cmd=' '.join(str(x) for x in cmd)))
logger.debug(" Failed with: {err}".format(err=self.gitRepo.lastStderr))
logger.error("Failed to record text for ref {r}, aborting!".format(r=ref))
raise Exception("Error! Failed to record text for ref {r}, aborting!".format(r=ref))
else:
logger.error("Failed to create temporary file for writing text to {r}".format(r=ref))
raise Exception("Error! Failed to record current state, aborting!")
return True
return False
def GetDepotHighWaterMark(self, depot):
streamRefs = self.GetAllKnownStreamRefs(depot)
lowestHwm = None
for sRef in streamRefs:
depotNumber, streamNumber, remainder = self.ParseStreamRef(sRef)
if streamNumber is not None and remainder == "hwm":
text = self.ReadFileRef(ref=sRef)
if text is None:
logger.error("Failed to read ref {r}!".format(r=sRef))
hwm = json.loads(text)
if lowestHwm is None or hwm["high-water-mark"] < lowestHwm:
lowestHwm = hwm["high-water-mark"]
return lowestHwm
def ProcessTransactions(self):
depot = self.GetDepot(self.config.accurev.depot)
if depot is None:
raise Exception("Failed to get depot {depot}!".format(depot=self.config.accurev.depot))
# Git refspec for the state ref in which we will store a blob.
stateRefspec = u'{refsNS}state/depots/{depotNumber}/last'.format(refsNS=AccuRev2Git.gitRefsNamespace, depotNumber=depot.number)
# Load the streamMap from the current configuration file.
streamMap = OrderedDict()
configStreamMap = self.GetStreamMap()
for configStream in configStreamMap:
branchName = configStreamMap[configStream]
logger.info("Getting stream information for stream '{name}' which will be committed to branch '{branch}'.".format(name=configStream, branch=branchName))
stream = self.GetStreamByName(depot.number, configStream)
if stream is None:
raise Exception("Failed to get stream information for {s}".format(s=configStream))
# Since we will be storing this state in JSON we need to make sure that we don't have
# numeric indices for dictionaries...
streamMap[str(stream.streamNumber)] = { "stream": configStream, "branch": branchName }
# Load the last known state of the conversion repository.
stateText = self.ReadFileRef(ref=stateRefspec)
if stateText is not None:
state = json.loads(stateText)
# Restore the last known git repository state. We could have been interrupted in the middle of merges or other things so we need to be
# able to restore all branches.
if state["branch_list"] is not None and len(state["branch_list"]) > 0:
# Restore all branches to the last saved state but do the branch that was current at the time last.
currentBranch = None
for br in state["branch_list"]:
if not br["is_current"]:
logger.debug( "Restore branch {branchName} at commit {commit}".format(branchName=br["name"], commit=br["commit"]) )
if self.UpdateAndCheckoutRef(ref='refs/heads/{branch}'.format(branch=br["name"]), commitHash=br["commit"], checkout=False) != True:
raise Exception("Failed to restore last state for branch {br} at {c}.".format(br=br["name"], c=br["commit"]))
else:
currentBranch = br
if currentBranch is not None:
logger.debug( "Checkout last processed transaction #{tr} on branch {branchName} at commit {commit}".format(tr=state["last_transaction"], branchName=currentBranch["name"], commit=currentBranch["commit"]) )
result = self.gitRepo.raw_cmd([u'git', u'checkout', u'-B', currentBranch["name"], currentBranch["commit"]])
if result is None:
raise Exception("Failed to restore last state. git checkout -B {br} {c}; failed.".format(br=currentBranch["name"], c=currentBranch["commit"]))
# Check for branches that exist in the git repository but that we will be creating later.
streamBranchList = [ state["stream_map"][s]["branch"] for s in state["stream_map"] ] # Get the list of all branches that we will create.
loadedBranchList = [ b["name"] for b in state["branch_list"] ] # Get the list of all branches that we will create.
branchList = self.gitRepo.branch_list()
branchNameSet = set([ b.name for b in branchList ])
for b in branchList:
if b.name in streamBranchList and (state["branch_list"] is None or b.name not in loadedBranchList): # state["branch_list"] is a list of the branches that we have already created.
logger.warning("Branch {branch} exists in the repo but will need to be created later.".format(branch=b.name))
backupNumber = 1
while self.gitRepo.raw_cmd(['git', 'checkout', '-b', 'backup/{branch}_{number}'.format(branch=b.name, number=backupNumber)]) is None:
# Make a backup of the branch.
backupNumber += 1
if self.gitRepo.raw_cmd(['git', 'branch', '-D', b.name]) is None: # Delete the branch even if not merged.
raise Exception("Failed to delete branch {branch}!".format(branch=b.name))
logger.warning("Branch {branch} has been renamed to backup/{branch}_{number}.".format(branch=b.name, number=backupNumber))
for missingBranch in (set(loadedBranchList) - branchNameSet):
logger.warning("Branch {branch} is missing from the repo!".format(branch=missingBranch))
# Check for added/deleted/renamed streams w.r.t. the new config file and last known conversion reporitory state.
newSet = set([x for x in streamMap])
oldSet = set([x for x in state["stream_map"]])
removedSet = oldSet - newSet
addedSet = newSet - oldSet
changedSet = newSet & oldSet # intersect
# Rename each of the branches that have changed names.
for streamNumberStr in changedSet:
newBranchName = streamMap[streamNumberStr]["branch"]
oldBranchName = state["stream_map"][streamNumberStr]["branch"]
if newBranchName != oldBranchName:
msg = "(no-op)"
if oldBranchName in branchNameSet:
cmd = [ u'git', u'branch', u'-m', oldBranchName, newBranchName ]
if self.TryGitCommand(cmd, allowEmptyString=True) is None:
raise Exception("Failed to rename branch {old} to {new}.\nErr: {err}".format(old=oldBranchName, new=newBranchName, err=self.gitRepo.lastStderr))
msg = "(done)"
logger.info("renamed: {streamName} (id: {streamNumber}) -> {branchName} (from {oldBranchName}) {msg}".format(streamNumber=streamNumberStr, streamName=streamMap[streamNumberStr]["stream"], branchName=newBranchName, oldBranchName=oldBranchName, msg=msg))
# Delete the branches for the streams that were removed from the config file.
for streamNumberStr in removedSet:
branchName = state["stream_map"][streamNumberStr]["branch"]
msg = "(no-op)"
if branchName in branchNameSet:
cmd = [ u'git', u'branch', u'-D', branchName ]
if self.TryGitCommand(cmd) is None:
raise Exception("Failed to delete branch {br}".format(br=branchName))
msg = "(done)"
logger.info("removed: {streamName} (id: {streamNumber}) -> {branchName}".format(streamNumber=streamNumberStr, streamName=state["stream_map"][streamNumberStr]["stream"], branchName=branchName, msg=msg))
# Cherry pick all transactions from the creation of the stream until the last processed transaction for each added stream.
for streamNumberStr in addedSet:
streamName = streamMap[streamNumberStr]["stream"]
branchName = streamMap[streamNumberStr]["branch"]
logger.info("adding: {streamName} (id: {streamNumber}) -> {branchName}".format(streamNumber=streamNumberStr, streamName=streamName, branchName=branchName))
stream = self.GetStreamByName(depot.number, streamName)
if self.ProcessStream(stream=stream, branchName=branchName, startTrId=int(self.config.accurev.startTransaction), endTrId=state["last_transaction"], streamMap=streamMap) is not None:
logger.warning("Merge information prior to transaction {trId} will not be available for the newly added stream {streamName} (id: {streamNumber}) tracked by branch {branchName}.".format(trId=state["last_transaction"], streamNumber=streamNumberStr, streamName=streamName, branchName=branchName))
logger.info("added: {streamName} (id: {streamNumber}) -> {branchName}".format(streamNumber=streamNumberStr, streamName=streamName, branchName=branchName))
# After all of the added/removed/renamed branches are handled we can continue with the new stream map.
state["stream_map"] = streamMap
else:
logger.info("No last state in {ref}, starting new conversion.".format(ref=stateRefspec))
# Default state
state = { "depot_number": depot.number,
"stream_map": streamMap,
"last_transaction": (int(self.config.accurev.startTransaction) - 1),
"branch_list": None }
# Get the list of transactions that we are processing, and build a list of known branch names for maintaining their states between processing stages.
transactionsMap = {} # is a dictionary with the following format { <key:tr_num>: { <key:stream_num>: { "state_hash": <val:commit_hash>, "data_hash": <val:data_hash> } } }
for streamNumberStr in state["stream_map"]:
streamNumber = int(streamNumberStr)
# Initialize the state that we load every time.
stateRef, dataRef, hwmRef = self.GetStreamRefs(depot=state["depot_number"], streamNumber=streamNumber)
# Get the state ref's known transactions list.
logger.info("Getting transaction to info commit mapping for stream number {s}. Ref: {ref}".format(s=streamNumber, ref=stateRef))
stateMap = self.GetRefMap(ref=stateRef, mapType="tr2commit")
if stateMap is None:
raise Exception("Failed to retrieve the state map for stream {s} (id: {id}).".format(s=state["stream_map"][streamNumberStr]["stream"], id=streamNumber))
logger.info("Merging transaction to info commit mapping for stream number {s} with previous mappings. Ref: {ref}".format(s=streamNumber, ref=stateRef))
for tr in reversed(stateMap):
if tr not in transactionsMap:
transactionsMap[tr] = {}
assert streamNumber not in transactionsMap[tr], "Invariant error! This should be the first time we are adding the stream {s} (id: {id})!".format(s=state["stream_map"][streamNumberStr]["stream"], id=streamNumber)
transactionsMap[tr][streamNumber] = { "state_hash": stateMap[tr] }
del stateMap # Make sure we free this, it could get big...
# Get the data ref's known transactions list.
logger.info("Getting transaction to data commit mapping for stream number {s}. Ref: {ref}".format(s=streamNumber, ref=stateRef))
dataMap = None
dataHashList = self.GetGitLogList(ref=dataRef, gitLogFormat='%H %s %T')
if dataHashList is None:
raise Exception("Couldn't get the commit hash list to process from the Accurev data ref {dataRef}.".format(dataRef=dataRef))
else:
dataMap = OrderedDict()
for line in reversed(dataHashList):
columns = line.split(' ')
trId, commitHash, treeHash = int(columns[2]), columns[0], columns[3]
dataMap[trId] = { "data_hash": commitHash, "data_tree_hash": treeHash }
if dataMap is None:
raise Exception("Failed to retrieve the data map for stream {s} (id: {id}).".format(s=state["stream_map"][streamNumberStr], id=streamNumber))
logger.info("Merging transaction to data commit mapping for stream number {s} with previous mappings. Ref: {ref}".format(s=streamNumber, ref=stateRef))
for tr in reversed(dataMap):
assert tr in transactionsMap and streamNumber in transactionsMap[tr], "Invariant error! The data ref should contain a subset of the state ref information, not a superset!"
transactionsMap[tr][streamNumber]["data_hash"] = dataMap[tr]["data_hash"]
transactionsMap[tr][streamNumber]["data_tree_hash"] = dataMap[tr]["data_tree_hash"]
del dataMap # Make sure we free this, it could get big...
# Other state variables
endTransaction = self.GetDepotHighWaterMark(self.config.accurev.depot)
logger.info("{depot} depot high-water mark is {hwm}.".format(depot=self.config.accurev.depot, hwm=endTransaction))
try:
endTransaction = min(int(endTransaction), int(self.config.accurev.endTransaction))
except:
pass # keywords highest, now or date time are ignored. We only read the config in case
# that the configured end transaction is lower than the lowest high-water-mark we
# have for the depot.
logger.info("Processing transactions for {depot} depot.".format(depot=self.config.accurev.depot))
knownBranchSet = set([ state["stream_map"][x]["branch"] for x in state["stream_map"] ]) # Get the list of all branches that we will create.
prevAffectedStreamMap = None
for tr in sorted(transactionsMap):
if tr <= state["last_transaction"]:
prevAffectedStreamMap = transactionsMap[tr]
del transactionsMap[tr] # ok since sorted returns a sorted list by copy.
continue
elif tr > endTransaction:
break
# Process the transaction!
self.ProcessTransaction(streamMap=state["stream_map"], trId=tr, affectedStreamMap=transactionsMap[tr], prevAffectedStreamMap=prevAffectedStreamMap)
# Store the state of the branches in the repo at this point in time so that we can restore it on next restart.
state["branch_list"] = []
for br in self.gitRepo.branch_list():
if br is None:
logger.error("Error: git.py failed to parse a branch name! Please ensure that the git.repo.branch_list() returns a list with no None items. Non-fatal, continuing.")
continue
elif br.name in knownBranchSet:
# We only care about the branches that we are processing, i.e. the branches that are in the streamMap.
brHash = OrderedDict()
brHash["name"] = br.name
brHash["commit"] = br.shortHash
brHash["is_current"] = br.isCurrent
state["branch_list"].append(brHash)
state["last_transaction"] = tr
if self.WriteFileRef(ref=stateRefspec, text=json.dumps(state)) != True:
raise Exception("Failed to write state to {ref}.".format(ref=stateRefspec))
prevAffectedStreamMap = transactionsMap[tr]
return True
def InitGitRepo(self, gitRepoPath):
gitRootDir, gitRepoDir = os.path.split(gitRepoPath)
if len(gitRepoDir) == 0 and len(gitRootDir) > 0: # The path was slash terminated.
gitRootDir, gitRepoDir = os.path.split(gitRootDir)
if len(gitRootDir) == 0 or os.path.isdir(gitRootDir):
if git.isRepo(gitRepoPath):
# Found an existing repo, just use that.
logger.info( "Using existing git repository." )
return True
logger.info( "Creating new git repository." )
# Create an empty first commit so that we can create branches as we please.
if git.init(path=gitRepoPath) is not None:
logger.info( "Created a new git repository." )
else:
logger.error( "Failed to create a new git repository." )
logger.error( "Try to create it manually using git and then restart:" )
logger.error( " git init {0}".format(gitRepoPath) )
sys.exit(1)
return True
else:
logger.error("{0} not found.\n".format(gitRootDir))
return False
# Returns a string representing the name of the stream on which a transaction was performed.
# If the history (an accurev.obj.History object) is given then it is attempted to retrieve it from the stream list first and
# should this fail then the history object's transaction's virtual version specs are used.
# If the transaction (an accurev.obj.Transaction object) is given it is attempted to retrieve the name of the stream from the
# virtual version spec.
# The `depot` argument is used both for the accurev.show.streams() command and to control its use. If it is None then the
# command isn't used at all which could mean a quicker conversion. When specified it indicates that the name of the stream
# from the time of the transaction should be retrieved. Otherwise the current name of the stream is returned (assumint it was
# renamed at some point).
def GetDestinationStreamName(self, history=None, transaction=None, depot=None):
# depot given as None indicates that accurev.show.streams() command is not to be run.
if history is not None:
if depot is None and len(history.streams) == 1:
return history.streams[0].name
elif len(history.transactions) > 0:
rv = self.GetDestinationStreamName(history=None, transaction=history.transactions[0], depot=depot)
if rv is not None:
return rv
if transaction is not None:
streamName, streamNumber = transaction.affectedStream()
if streamNumber is not None and depot is not None:
try:
stream = accurev.show.streams(depot=depot, stream=streamNumber, timeSpec=transaction.id, useCache=self.config.accurev.UseCommandCache()).streams[0] # could be expensive
if stream is not None and stream.name is not None:
return stream.name
except:
pass
return streamName
return None
# Start
# Begins a new AccuRev to Git conversion process discarding the old repository (if any).
def Start(self, isRestart=False, isSoftRestart=False):
global maxTransactions
if not os.path.exists(self.config.git.repoPath):
logger.error( "git repository directory '{0}' doesn't exist.".format(self.config.git.repoPath) )
logger.error( "Please create the directory and re-run the script.".format(self.config.git.repoPath) )
return 1
if isRestart:
logger.info( "Restarting the conversion operation." )
logger.info( "Deleting old git repository." )
git.delete(self.config.git.repoPath)
# From here on we will operate from the git repository.
if self.config.accurev.commandCacheFilename is not None:
self.config.accurev.commandCacheFilename = os.path.abspath(self.config.accurev.commandCacheFilename)
self.cwd = os.getcwd()
os.chdir(self.config.git.repoPath)
# This try/catch/finally block is here to ensure that we change directory back to self.cwd in order
# to allow other scripts to safely call into this method.
if self.InitGitRepo(self.config.git.repoPath):
self.gitRepo = git.open(self.config.git.repoPath)
status = self.gitRepo.status()
if status is None:
raise Exception("git state failed. Aborting! err: {err}".format(err=self.gitRepo.lastStderr))
elif status.initial_commit:
logger.debug( "New git repository. Initial commit on branch {br}".format(br=status.branch) )
else:
logger.debug( "Opened git repository on branch {br}".format(br=status.branch) )
# Configure the remotes
if self.config.git.remoteMap is not None and len(self.config.git.remoteMap) > 0:
remoteList = self.gitRepo.remote_list()
remoteAddList = [x for x in self.config.git.remoteMap.keys()]
for remote in remoteList:
if remote.name in self.config.git.remoteMap:
r = self.config.git.remoteMap[remote.name]
pushUrl1 = r.url if r.pushUrl is None else r.pushUrl
pushUrl2 = remote.url if remote.pushUrl is None else remote.pushUrl
if r.url != remote.url or pushUrl1 != pushUrl2:
raise Exception("Configured remote {r}'s urls don't match.\nExpected:\n{r1}\nGot:\n{r2}".format(r=remote.name, r1=r, r2=remote))
remoteAddList.remove(remote.name)
else:
logger.debug( "Unspecified remote {remote} ({url}) found. Ignoring...".format(remote=remote.name, url=remote.url) )
for remote in remoteAddList:
r = self.config.git.remoteMap[remote]
if self.gitRepo.remote_add(name=r.name, url=r.url) is None:
raise Exception("Failed to add remote {remote} ({url})!".format(remote=r.name, url=r.url))
logger.info( "Added remote: {remote} ({url}).".format(remote=r.name, url=r.url) )
if r.pushUrl is not None and r.url != r.pushUrl:
if self.gitRepo.remote_set_url(name=r.name, url=r.pushUrl, isPushUrl=True) is None:
raise Exception("Failed to set push url {url} for {remote}!".format(url=r.pushUrl, remote=r.name))
logger.info( "Added push url: {remote} ({url}).".format(remote=r.name, url=r.pushUrl) )
doLogout = False
if self.config.method != 'skip':
acInfo = accurev.info()
isLoggedIn = False
if self.config.accurev.username is None:
# When a username isn't specified we will use any logged in user for the conversion.
isLoggedIn = accurev.ext.is_loggedin(infoObj=acInfo)
else:
# When a username is specified that specific user must be logged in.
isLoggedIn = (acInfo.principal == self.config.accurev.username)
if not isLoggedIn:
# Login the requested user
if accurev.ext.is_loggedin(infoObj=acInfo):
# Different username, logout the other user first.
logoutSuccess = accurev.logout()
logger.info("Accurev logout for '{0}' {1}".format(acInfo.principal, 'succeeded' if logoutSuccess else 'failed'))
loginResult = accurev.login(self.config.accurev.username, self.config.accurev.password)
if loginResult:
logger.info("Accurev login for '{0}' succeeded.".format(self.config.accurev.username))
else:
logger.error("AccuRev login for '{0}' failed.\n".format(self.config.accurev.username))
logger.error("AccuRev message:\n{0}".format(loginResult.errorMessage))
return 1
doLogout = True
else:
logger.info("Accurev user '{0}', already logged in.".format(acInfo.principal))
# If this script is being run on a replica then ensure that it is up-to-date before processing the streams.
accurev.replica.sync()
self.gitRepo.raw_cmd([u'git', u'config', u'--local', u'gc.auto', u'0'])
if self.config.method in [ "deep-hist", "diff", "pop" ]:
logger.info("Retrieveing stream information from Accurev into hidden refs.")
self.RetrieveStreams()
elif self.config.method in [ "skip" ]:
logger.info("Skipping retrieval of stream information from Accurev.")
else:
raise Exception("Unrecognized method '{method}'".format(method=self.config.method))
if not isRestart and isSoftRestart:
logger.info( "Restarting the processing operation." )
if self.gitRepo.raw_cmd([ u'git', u'checkout', u'--orphan', u'__ac2git_temp__' ]) is None:
raise Exception("Failed to checkout empty branch.")
if self.gitRepo.raw_cmd([ u'git', u'read-tree', u'--empty' ]) is None:
raise Exception("Failed to clear the index.")
if self.gitRepo.raw_cmd([ u'git', u'clean', u'-dfx' ]) is None:
raise Exception("Failed to remove untracked files.")
refOutput = self.gitRepo.raw_cmd([ u'git', u'show-ref' ])
if refOutput is None:
raise Exception("Failed to retrieve refs.")
# Delete all the branches and refs that we won't need any more.
streamMap = self.GetStreamMap()
branchList = [streamMap[x] for x in streamMap]
deleteCmd = [ u'git', u'update-ref', u'-d' ]
for refEntry in refOutput.strip().split('\n'):
refEntry = refEntry.strip()
ref = refEntry.strip().split()[1]
delete = False
if ref.startswith('refs/heads/'):
# Find out if it is a tracked branch that we should delete.
branchName = ref[len('refs/heads/'):]
if branchName in branchList:
delete = True
elif ref.startswith('refs/ac2git/state/') or ref in [ 'refs/notes/ac2git', 'refs/notes/accurev' ]:
delete = True
if delete:
if self.gitRepo.raw_cmd( deleteCmd + [ ref ] ) is None:
raise Exception("Failed to delete ref {r}.".format(ref))
logger.debug("Deleting ref {r}".format(r=ref))
else:
#logger.debug("Skipping ref {r}".format(r=ref))
pass
# Checkout the master branch or an empty master branch if it doesn't exist.
if self.gitRepo.raw_cmd([ u'git', u'checkout', u'--orphan', u'master' ]) is None:
if self.gitRepo.raw_cmd([ u'git', u'checkout', u'master' ]) is None:
raise Exception("Failed to checkout master branch.")
if self.config.mergeStrategy in [ "normal" ]:
logger.info("Processing transactions from hidden refs. Merge strategy '{strategy}'.".format(strategy=self.config.mergeStrategy))
self.ProcessTransactions()
elif self.config.mergeStrategy in [ "orphanage" ]:
logger.info("Processing streams from hidden refs. Merge strategy '{strategy}'.".format(strategy=self.config.mergeStrategy))
self.ProcessStreams(orderByStreamNumber=False)
elif self.config.mergeStrategy in [ "skip", None ]:
logger.info("Skipping processing of Accurev data. No git branches will be generated/updated. Merge strategy '{strategy}'.".format(strategy=self.config.mergeStrategy))
pass # Skip the merge step.
else:
raise Exception("Unrecognized merge strategy '{strategy}'".format(strategy=self.config.mergeStrategy))
self.gitRepo.raw_cmd([u'git', u'config', u'--local', u'--unset-all', u'gc.auto'])
if doLogout:
if accurev.logout():
logger.info( "Accurev logout successful." )
else:
logger.error("Accurev logout failed.\n")
return 1
else:
logger.error( "Could not create git repository." )
# Restore the working directory.
os.chdir(self.cwd)
return 0
# ################################################################################################ #
# Script Functions #
# ################################################################################################ #
def DumpExampleConfigFile(outputFilename):
with codecs.open(outputFilename, 'w') as file:
file.write("""<accurev2git>
<!-- AccuRev details:
username: The username that will be used to log into AccuRev and retrieve and populate the history. This is optional and if it isn't provided you will need to login before running this script.
password: The password for the given username. Note that you can pass this in as an argument which is safer and preferred! This too is optional. You can login before running this script and it will work.
depot: The depot in which the stream/s we are converting are located
start-transaction: The conversion will start at this transaction. If interrupted the next time it starts it will continue from where it stopped.
end-transaction: Stop at this transaction. This can be the keword "now" if you want it to convert the repo up to the latest transaction.
command-cache-filename: The filename which will be given to the accurev.py script to use as a local command result cache for the accurev hist, accurev diff and accurev show streams commands.
-->
<accurev
username="joe_bloggs"
password="joanna"
depot="Trunk"
start-transaction="1"
end-transaction="now"
command-cache-filename="command_cache.sqlite3" >
<!-- The stream-list is optional. If not given all streams are processed
exclude-types: A comma separated list of stream types that are to be excluded from being automatically added. Doesn't apply to streams that were explicitly specified.
The stream types have to match the stream types returned by Accurev in its command line client's XML output and a special keyword "hidden" for excluding
hidden streams. Known Accurev stream types are "normal", "workspace", "snapshot", "passthrough".
Example list: "normal, workspace, snapshot, hidden".
-->
<!-- The branch-name attribute is also optional for each stream element. If provided it specifies the git branch name to which the stream will be mapped. -->
<stream-list exclude-types="workspace">
<stream branch-name="some_branch">some_stream</stream>
<stream>some_other_stream</stream>
</stream-list>
</accurev>
<!-- Git details:
repo-path: The system path where you want the git repo to be populated. Note: this folder should already exist.
message-style: [ "normal", "clean", "notes" ] - When set to "normal" accurev transaction information is included
at the end (in the footer) of each commit message. When set to "clean" the transaction comment is the commit message without any
additional information. When set to "notes" a note is added to each commit in the "accurev" namespace (to show them use `git log -notes=accurev`),
with the same accurev information that would have been shown in the commit message footer in the "normal" mode.
message-key: [ "footer", "header" ] - can be either "footer", "header" or omitted and adds a simple key with the destination-stream/transaction-number format either
before (header) or after (footer) the commit message which can be used to quickly go back to accurev and figure out where this transaction came from.
author-is-committer: [ "true", "false" ] - controls if the configured git user or the transaction user is used as the committer for the conversion. Setting
it to "false" makes the conversion use the configured git user to perform the commits while the author information will be taken from the Accurev transaction.
Setting it to "true" will make the conversion set the Accurev transaction user as both the author and the committer.
empty-child-stream-action: [ "merge", "cherry-pick" ] - controls whether the child streams that are affected by a transaction to its parent stream make a "merge" commit (merging the
parent branch into the child branch) or a "cherry-pick" commit that does not contain that linkage. The "merge" commit is only made if the child stream's
contents at this transaction matches the parent streams contents exactly (git diff between the two branches at this transaction would be the same).
source-stream-fast-forward: [ "true", "false" ] - when a promote is made and both the source and destination streams are known a merge commit is made on the destination stream. If
this option is set to "true" then the source stream's branch is moved up to the destination branch after the commit is made, otherwise it is left where
it was before.
source-stream-inferrence: [ "true", "false" ] - Experimental: when the promote transaction has no source stream in formation in its extended XML, which can be the case for older Accurev depots,
try and infer the source of the promote. If a child stream was not changed by the promote but the basis stream (destination of the promote)
ended up being the same as the child stream it is highly likely that this child stream is the source of the promote.
new-basis-is-first-parent: [ "true", "false" ] - If set to true, for a chstream transaction, the new basis transaction will be made the corresponding commit's first parent, while
the previous transaction made in the stream will be the second parent. If set to false the order of the two parents is reversed.
-->
<git
repo-path="/put/the/git/repo/here"
message-style="notes"
message-key="footer"
author-is-committer="true"
empty-child-stream-action="merge"
source-stream-fast-forward="false"
source-stream-inferrence="false"
new-basis-is-first-parent="true" >
<!-- Optional: You can add remote elements to specify the remotes to which the converted branches will be pushed. The push-url attribute is optional. -->
<remote name="origin" url="https://github.com/orao/ac2git.git" push-url="https://github.com/orao/ac2git.git" />
<remote name="backup" url="https://github.com/orao/ac2git.git" />
</git>
<method>deep-hist</method> <!-- The method specifies what approach is taken to retrieve information from Accurev. Allowed values are 'deep-hist', 'diff', 'pop' and 'skip'.
- deep-hist: Works by using the accurev.ext.deep_hist() function to return a list of transactions that could have affected the stream.
It then performs a diff between the transactions and only populates the files that have changed like the 'diff' method.
It is the quickest method but is only as reliable as the information that accurev.ext.deep_hist() provides.
- diff: This method's first commit performs a full `accurev pop` command on either the streams `mkstream` transaction or the start
transaction (whichever is highest). Subsequently it increments the transaction number by one and performs an
`accurev diff -a -i -v <stream> -V <stream>` to find all changed files. If not files have changed it takes the next transaction
and performs the diff again. Otherwise, any files returned by the diff are deleted and an `accurev pop -R` performed which only
downloads the changed files. This is slower than the 'deep-hist' method but faster than the 'pop' method by a large margin.
It's reliability is directly dependent on the reliability of the `accurev diff` command.
- pop: This is the naive method which doesn't care about changes and always performs a full deletion of the whole tree and a complete
`accurev pop` command. It is a lot slower than the other methods for streams with a lot of files but should work even with older
accurev releases. This is the method originally implemented by Ryan LaNeve in his https://github.com/rlaneve/accurev2git repo.
- skip: This will skip the querying of the Accurev server for information about the streams. It makes sense in an already converted repo
for which you only want to reprocess the already retrieved information without getting anything new.
-->
<merge-strategy>normal</merge-strategy> <!-- The merge-strategy specified how the information downloaded from the streams in accurev is processed to form git branches.
It can be one of the following options ["skip", "normal", "orphanage"]:
'skip' - Skips the processing step. The git repo won't have any visible git branches but will have hidden internal state which
tracks the accurev depot. When a merge strategy is next set to something other than 'skip' already retrieved information
won't be redownloaded from accurev and will be processed without executing any accurev commands (won't query the accurev server).
'normal' - Performs merges using a straightforward but imprefect algorithm. The algorithm has the preferred balance between performance
the resulting merges in git.
'orphanage' - Performs no merges but adds orphaned git branches which track the accurev streams. This is the old conversion method and is
here for legacy reasons. If streams are added later the resulting git repository commit hashes do not change but it will be
difficult to merge the branches in git at a later stage.
-->
<logfile>accurev2git.log</logfile>
<!-- The user maps are used to convert users from AccuRev into git. Please spend the time to fill them in properly. -->
<usermaps filename="usermaps.config.xml">
<!-- The filename attribute is optional and if included the provided file is opened and the usermaps from that file are used to complement
the usermaps provided below (only accurev users that haven't been specified below are loaded from the file). The file can have one or
more usermaps elements like this one, each of which can point to another file of its own. -->
<!-- The timezone attribute is optional. All times are retrieved in UTC from AccuRev and will converted to the local timezone by default.
If you want to override this behavior then set the timezone to either an Olson timezone string (e.g. Europe/Belgrade) or a git style
timezone string (e.g. +0100, sign and 4 digits required). -->
<map-user><accurev username="joe_bloggs" /><git name="Joe Bloggs" email="joe@bloggs.com" timezone="Europe/Belgrade" /></map-user>
<map-user><accurev username="joanna_bloggs" /><git name="Joanna Bloggs" email="joanna@bloggs.com" timezone="+0500" /></map-user>
<map-user><accurev username="joey_bloggs" /><git name="Joey Bloggs" email="joey@bloggs.com" /></map-user>
</usermaps>
</accurev2git>
""")
return 0
return 1
def AutoConfigFile(filename, args, preserveConfig=False):
if os.path.exists(filename):
# Backup the file
backupNumber = 1
backupFilename = "{0}.{1}".format(filename, backupNumber)
while os.path.exists(backupFilename):
backupNumber += 1
backupFilename = "{0}.{1}".format(filename, backupNumber)
shutil.copy2(filename, backupFilename)
config = Config.fromfile(filename=args.configFilename)
if config is None:
config = Config(accurev=Config.AccuRev(), git=Config.Git(repoPath=None), usermaps=[], logFilename=None)
elif not preserveConfig:
# preserve only the accurev username and passowrd
arUsername = config.accurev.username
arPassword = config.accurev.password
# reset config
config = Config(accurev=Config.AccuRev(), git=Config.Git(repoPath=None), usermaps=[], logFilename=None)
config.accurev.username = arUsername
config.accurev.password = arPassword
SetConfigFromArgs(config, args)
if config.accurev.username is None:
if config.accurev.username is None:
logger.error("No accurev username provided for auto-configuration.")
return 1
else:
info = accurev.info()
if info.principal != config.accurev.username:
if config.accurev.password is None:
logger.error("No accurev password provided for auto-configuration. You can either provide one on the command line, in the config file or just login to accurev before running the script.")
return 1
if not accurev.login(config.accurev.username, config.accurev.password):
logger.error("accurev login for '{0}' failed.".format(config.accurev.username))
return 1
elif config.accurev.password is None:
config.accurev.password = ''
if config.accurev.depot is None:
depots = accurev.show.depots()
if depots is not None and depots.depots is not None and len(depots.depots) > 0:
config.accurev.depot = depots.depots[0].name
logger.info("No depot specified. Selecting first depot available: {0}.".format(config.accurev.depot))
else:
logger.error("Failed to find an accurev depot. You can specify one on the command line to resolve the error.")
return 1
if config.git.repoPath is None:
config.git.repoPath = './{0}'.format(config.accurev.depot)
if config.logFilename is None:
config.logFilename = 'ac2git.log'
with codecs.open(filename, 'w') as file:
file.write("""<accurev2git>
<!-- AccuRev details:
username: The username that will be used to log into AccuRev and retrieve and populate the history
password: The password for the given username. Note that you can pass this in as an argument which is safer and preferred!
depot: The depot in which the stream/s we are converting are located
start-transaction: The conversion will start at this transaction. If interrupted the next time it starts it will continue from where it stopped.
end-transaction: Stop at this transaction. This can be the keword "now" if you want it to convert the repo up to the latest transaction.
command-cache-filename: The filename which will be given to the accurev.py script to use as a local command result cache for the accurev hist, accurev diff and accurev show streams commands.
-->
<accurev
username="{accurev_username}"
password="{accurev_password}"
depot="{accurev_depot}"
start-transaction="{start_transaction}"
end-transaction="{end_transaction}"
command-cache-filename="command_cache.sqlite3" >
<!-- The stream-list is optional. If not given all streams are processed
exclude-types: A comma separated list of stream types that are to be excluded from being automatically added. Doesn't apply to streams that were explicitly specified.
The stream types have to match the stream types returned by Accurev in its command line client's XML output and a special keyword "hidden" for excluding
hidden streams. Known Accurev stream types are "normal", "workspace", "snapshot", "passthrough".
Example list: "normal, workspace, snapshot, hidden".
-->
<!-- The branch-name attribute is also optional for each stream element. If provided it specifies the git branch name to which the stream will be mapped. -->
<stream-list{exclude_types}>""".format(accurev_username=config.accurev.username,
accurev_password=config.accurev.password,
accurev_depot=config.accurev.depot,
start_transaction=1, end_transaction="now",
exclude_types="" if config.excludeStreamTypes is None else " exclude-types=\"{0}\"".format(", ".join(config.excludeStreamTypes))))
if preserveConfig:
for stream in config.accurev.streamMap:
file.write("""
<stream branch-name="{branch_name}">{stream_name}</stream>""".format(stream_name=stream, branch_name=config.accurev.streamMap[stream]))
streams = accurev.show.streams(depot=config.accurev.depot, useCache=self.config.accurev.UseCommandCache())
if streams is not None and streams.streams is not None:
for stream in streams.streams:
if not (preserveConfig and stream in config.accurev.streamMap):
file.write("""
<stream branch-name="accurev/{stream_name}">{stream_name}</stream>""".format(stream_name=stream.name))
# TODO: Add depot and start/end transaction overrides for each stream...
file.write("""
</stream-list>
</accurev>
<!-- Git details:
repo-path: The system path where you want the git repo to be populated. Note: this folder should already exist.
message-style: [ "normal", "clean", "notes" ] - When set to "normal" accurev transaction information is included
at the end (in the footer) of each commit message. When set to "clean" the transaction comment is the commit message without any
additional information. When set to "notes" a note is added to each commit in the "accurev" namespace (to show them use `git log -notes=accurev`),
with the same accurev information that would have been shown in the commit message footer in the "normal" mode.
message-key: [ "footer", "header" ] - can be either "footer", "header" or omitted and adds a simple key with the destination-stream/transaction-number format either
before (header) or after (footer) the commit message which can be used to quickly go back to accurev and figure out where this transaction came from.
author-is-committer: [ "true", "false" ] - controls if the configured git user or the transaction user is used as the committer for the conversion. Setting
it to "false" makes the conversion use the configured git user to perform the commits while the author information will be taken from the Accurev transaction.
Setting it to "true" will make the conversion set the Accurev transaction user as both the author and the committer.
empty-child-stream-action: [ "merge", "cherry-pick" ] - controls whether the child streams that are affected by a transaction to its parent stream make a "merge" commit (merging the
parent branch into the child branch) or a "cherry-pick" commit that does not contain that linkage. The "merge" commit is only made if the child stream's
contents at this transaction matches the parent streams contents exactly (git diff between the two branches at this transaction would be the same).
source-stream-fast-forward: [ "true", "false" ] - when a promote is made and both the source and destination streams are known a merge commit is made on the destination stream. If
this option is set to "true" then the source stream's branch is moved up to the destination branch after the commit is made, otherwise it is left where
it was before.
source-stream-inferrence: [ "true", "false" ] - Experimental: when the promote transaction has no source stream in formation in its extended XML, which can be the case for older Accurev depots,
try and infer the source of the promote. If a child stream was not changed by the promote but the basis stream (destination of the promote)
ended up being the same as the child stream it is highly likely that this child stream is the source of the promote.
new-basis-is-first-parent: [ "true", "false" ] - If set to true, for a chstream transaction, the new basis transaction will be made the corresponding commit's first parent, while
the previous transaction made in the stream will be the second parent. If set to false the order of the two parents is reversed.
-->
<git
repo-path="{git_repo_path}"
message-style="{message_style}"
message-key="{message_key}"
author-is-committer="{author_is_committer}"
empty-child-stream-action="{empty_child_stream_action}"
source-stream-fast-forward="{source_stream_fast_forward}"
source-stream-inferrence="{source_stream_inferrence}"
new-basis-is-first-parent="{new_basis_is_first_parent}" >""".format(git_repo_path=config.git.repoPath,
message_style=config.git.messageStyle if config.git.messageStyle is not None else 'notes',
message_key=config.git.messageKey if config.git.messageKey is not None else 'footer',
author_is_committer="true" if config.git.authorIsCommitter else "false",
empty_child_stream_action=config.git.emptyChildStreamAction,
source_stream_fast_forward="true" if config.git.sourceStreamFastForward else "false",
source_stream_inferrence="true" if config.git.sourceStreamInferrence else "false",
new_basis_is_first_parent="true" if config.git.newBasisIsFirstParent else "false"))
if config.git.remoteMap is not None:
for remoteName in remoteMap:
remote = remoteMap[remoteName]
file.write(""" <remote name="{name}" url="{url}"{push_url_string} />""".format(name=remote.name, url=name.url, push_url_string='' if name.pushUrl is None else ' push-url="{url}"'.format(url=name.pushUrl)))
file.write(""" </git>
<method>{method}</method>
<merge-strategy>{merge_strategy}</merge-strategy>
<logfile>{log_filename}</logfile>
<!-- The user maps are used to convert users from AccuRev into git. Please spend the time to fill them in properly. -->""".format(method=config.method, merge_strategy=config.mergeStrategy, log_filename=config.logFilename))
file.write("""
<usermaps filename="usermaps.config.xml">
<!-- The filename attribute is optional and if included the provided file is opened and the usermaps from that file are used to complement
the usermaps provided below (only accurev users that haven't been specified below are loaded from the file). The file can have one or
more usermaps elements like this one, each of which can point to another file of its own. -->
<!-- The timezone attribute is optional. All times are retrieved in UTC from AccuRev and will converted to the local timezone by default.
If you want to override this behavior then set the timezone to either an Olson timezone string (e.g. Europe/Belgrade) or a git style
timezone string (e.g. +0100, sign and 4 digits required). -->
<!-- e.g.
<map-user><accurev username="joe_bloggs" /><git name="Joe Bloggs" email="joe@bloggs.com" timezone="Europe/Belgrade" /></map-user>
<map-user><accurev username="joanna_bloggs" /><git name="Joanna Bloggs" email="joanna@bloggs.com" timezone="+0500" /></map-user>
<map-user><accurev username="joey_bloggs" /><git name="Joey Bloggs" email="joey@bloggs.com" /></map-user>
-->""")
if preserveConfig:
for usermap in config.usermaps:
file.write("""
<map-user><accurev username="{accurev_username}" /><git name="{git_name}" email="{git_email}"{timezone_tag} /></map-user>""".format(accurev_username=usermap.accurevUsername, git_name=usermap.gitName, git_email=usermap.gitEmail, timezone_tag="" if usermap.timezone is None else ' timezone="{0}"'.format(usermap.timezone)))
users = accurev.show.users()
if users is not None and users.users is not None:
for user in users.users:
if not (preserveConfig and user.name in [x.accurevUsername for x in config.usermaps]):
file.write("""
<map-user><accurev username="{accurev_username}" /><git name="{accurev_username}" email="" /></map-user>""".format(accurev_username=user.name))
file.write("""
</usermaps>
</accurev2git>
""")
return 0
return 1
def ToUnixPath(path):
rv = SplitPath(path)
if rv is not None:
if rv[0] == '/':
rv = '/' + '/'.join(rv[1:])
else:
rv = '/'.join(rv)
return rv
def SplitPath(path):
rv = None
if path is not None:
path = str(path)
rv = []
drive, path = os.path.splitdrive(path)
head, tail = os.path.split(path)
while len(head) > 0 and head != '/' and head != '\\': # For an absolute path the starting slash isn't removed from head.
rv.append(tail)
head, tail = os.path.split(head)
if len(tail) > 0:
rv.append(tail)
if len(head) > 0: # For absolute paths.
rv.append(head)
if len(drive) > 0:
rv.append(drive)
rv.reverse()
return rv
def TryGetAccurevUserlist(username, password):
try:
info = accurev.info()
isLoggedIn = False
if username is not None and info.principal != username:
if password is not None:
isLoggedIn = accurev.login(username, password)
else:
isLoggedIn = accurev.ext.is_loggedin()
userList = None
if isLoggedIn:
users = accurev.show.users()
if users is not None:
userList = []
for user in users.users:
userList.append(user.name)
return userList
except:
return None
def GetMissingUsers(config):
# Try and validate accurev usernames
userList = TryGetAccurevUserlist(config.accurev.username, config.accurev.password)
missingList = None
if config is not None and config.usermaps is not None:
missingList = []
if userList is not None and len(userList) > 0:
for user in userList:
found = False
for usermap in config.usermaps:
if user == usermap.accurevUsername:
found = True
break
if not found:
missingList.append(user)
return missingList
def PrintMissingUsers(config):
missingUsers = GetMissingUsers(config)
if missingUsers is not None:
if len(missingUsers) > 0:
missingUsers.sort()
logger.info("Unmapped accurev users:")
for user in missingUsers:
logger.info(" {0}".format(user))
return True
return False
def SetConfigFromArgs(config, args):
if args.accurevUsername is not None:
config.accurev.username = args.accurevUsername
if args.accurevPassword is not None:
config.accurev.password = args.accurevPassword
if args.accurevDepot is not None:
config.accurev.depot = args.accurevDepot
if args.gitRepoPath is not None:
config.git.repoPath = args.gitRepoPath
if args.emptyChildStreamAction is not None:
config.git.emptyChildStreamAction = args.emptyChildStreamAction
if args.sourceStreamFastForward is not None:
config.git.sourceStreamFastForward = (args.sourceStreamFastForward == "true")
if args.sourceStreamInferrence is not None:
config.git.sourceStreamInferrence = (args.sourceStreamInferrence == "true")
if args.conversionMethod is not None:
config.method = args.conversionMethod
if args.mergeStrategy is not None:
config.mergeStrategy = args.mergeStrategy
if args.logFile is not None:
config.logFilename = args.logFile
def ValidateConfig(config):
# Validate the program args and configuration up to this point.
isValid = True
if config.accurev.depot is None:
logger.error("No AccuRev depot specified.\n")
isValid = False
if config.git.repoPath is None:
logger.error("No Git repository specified.\n")
isValid = False
return isValid
def InitializeLogging(filename, level):
global logger
if logger is None:
logger = logging.getLogger('ac2git')
logger.setLevel(level)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(level)
consoleFormatter = logging.Formatter('%(message)s')
consoleHandler.setFormatter(consoleFormatter)
logger.addHandler(consoleHandler)
if filename is not None:
fileHandler = logging.FileHandler(filename=filename)
fileHandler.setLevel(level)
fileFormatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fileHandler.setFormatter(fileFormatter)
logger.addHandler(fileHandler)
return True
return False
def PrintConfigSummary(config, filename):
if config is not None:
logger.info('Config info:')
logger.info(' now: {0}'.format(datetime.now()))
logger.info(' filename: {0}'.format(filename))
logger.info(' git:')
logger.info(' repo path: {0}'.format(config.git.repoPath))
logger.info(' message style: {0}'.format(config.git.messageStyle))
logger.info(' message key: {0}'.format(config.git.messageKey))
logger.info(' author is committer: {0}'.format(config.git.authorIsCommitter))
logger.info(' empty child stream action: {0}'.format(config.git.emptyChildStreamAction))
logger.info(' source stream fast forward: {0}'.format(config.git.sourceStreamFastForward))
logger.info(' source stream inferrence: {0}'.format(config.git.sourceStreamInferrence))
logger.info(' new basis is first parent: {0}'.format(config.git.newBasisIsFirstParent))
if config.git.remoteMap is not None:
for remoteName in config.git.remoteMap:
remote = config.git.remoteMap[remoteName]
logger.info(' remote: {name} {url}{push_url}'.format(name=remote.name, url=remote.url, push_url = '' if remote.pushUrl is None or remote.url == remote.pushUrl else ' (push:{push_url})'.format(push_url=remote.pushUrl)))
logger.info(' accurev:')
logger.info(' depot: {0}'.format(config.accurev.depot))
if config.accurev.streamMap is not None:
logger.info(' stream list:')
for stream in config.accurev.streamMap:
logger.info(' - {0} -> {1}'.format(stream, config.accurev.streamMap[stream]))
else:
logger.info(' stream list: all included')
logger.info(' start tran.: #{0}'.format(config.accurev.startTransaction))
logger.info(' end tran.: #{0}'.format(config.accurev.endTransaction))
logger.info(' username: {0}'.format(config.accurev.username))
logger.info(' command cache: {0}'.format(config.accurev.commandCacheFilename))
logger.info(' ignored transaction types (hard-coded): {0}'.format(", ".join(ignored_transaction_types)))
if config.accurev.excludeStreamTypes is not None:
logger.info(' excluded stream types: {0}'.format(", ".join(config.accurev.excludeStreamTypes)))
logger.info(' method: {0}'.format(config.method))
logger.info(' merge strategy: {0}'.format(config.mergeStrategy))
logger.info(' usermaps: {0}'.format(len(config.usermaps)))
logger.info(' log file: {0}'.format(config.logFilename))
logger.info(' verbose: {0}'.format( (logger.getEffectiveLevel() == logging.DEBUG) ))
def PrintStatus(state):
# Setup Git - TODO: this was copied from Accurev2Git.Start(), remove this duplication at some point...
try:
state.gitRepo = git.open(state.config.git.repoPath)
status = state.gitRepo.status()
if status is None:
logger.info("git state failed. Aborting! err: {err}".format(err=state.gitRepo.lastStderr))
return
except:
logger.exception("git state failed. Aborting! err: {err}".format(err=state.gitRepo.lastStderr))
return
# end TODO
# Setup AccuRev - TODO: this was copied from Accurev2Git.Start(), remove this duplication at some point...
acInfo = accurev.info()
doLogout = False
isLoggedIn = False
if state.config.accurev.username is None:
# When a username isn't specified we will use any logged in user for the conversion.
isLoggedIn = accurev.ext.is_loggedin(infoObj=acInfo)
else:
# When a username is specified that specific user must be logged in.
isLoggedIn = (acInfo.principal == state.config.accurev.username)
if not isLoggedIn:
# Login the requested user
if accurev.ext.is_loggedin(infoObj=acInfo):
# Different username, logout the other user first.
logoutSuccess = accurev.logout()
logger.info("Accurev logout for '{0}' {1}".format(acInfo.principal, 'succeeded' if logoutSuccess else 'failed'))
loginResult = accurev.login(state.config.accurev.username, state.config.accurev.password)
if loginResult:
logger.info("Accurev login for '{0}' succeeded.".format(state.config.accurev.username))
else:
logger.error("AccuRev login for '{0}' failed.\n".format(state.config.accurev.username))
logger.error("AccuRev message:\n{0}".format(loginResult.errorMessage))
return 1
doLogout = True
else:
logger.info("Accurev user '{0}', already logged in.".format(acInfo.principal))
# If this script is being run on a replica then ensure that it is up-to-date before processing the streams.
accurev.replica.sync()
# end TODO
# Get all of the streams that have been recorded in Git's hidden refs.
logger.info("Parsing hidden refs for downloaded AccuRev streams.")
refList = state.GetAllKnownStreamRefs(state.config.accurev.depot)
refMap = {}
for ref in refList:
depotNumber, streamNumber, remainder = state.ParseStreamRef(ref=ref)
if streamNumber not in refMap:
refMap[streamNumber] = { "depot": depotNumber }
if remainder in [ "info", "data" ]:
lastTrId = state.GetTransactionForRef(ref=ref)
refMap[streamNumber][remainder] = lastTrId
refMap[streamNumber][remainder + "-ref"] = ref
elif remainder == "hwm":
hwmRefText = state.ReadFileRef(ref=ref)
if hwmRefText is not None and len(hwmRefText) > 0:
hwmMetadata = json.loads(hwmRefText)
hwm = hwmMetadata.get("high-water-mark")
refMap[streamNumber][remainder] = int(hwm)
else:
logger.warning("Unknown ref: {0}".format(ref))
# Get the configured streams list
logger.info("Parsing configured AccuRev streams.")
streamMap = state.GetStreamMap()
streamList = []
logger.info("Stream information:")
for streamName, branchName in streamMap.items():
stream = state.GetStreamByName(state.config.accurev.depot, streamName)
streamList.append(stream)
symbol = '+'
info = 'no downloaded data'
if stream.streamNumber in refMap:
symbol = '='
refData = refMap[stream.streamNumber]
info, data, hwm = refData.get("info"), refData.get("data"), refData.get("hwm")
info = 'downloaded up to transaction {tr} (info: {info}, data: {data}, high-water-mark: {hwm})'.format(tr=hwm, info=info, data=data, hwm=hwm)
del refMap[stream.streamNumber]
logger.info(" {symbol} stream: {name} (id: {number}) - {info}".format(symbol=symbol, name=stream.name, number=stream.streamNumber, info=info))
# Print the information about streams that have downloaded data but are not configured
streams = None
for streamNumber, refData in refMap.items():
info, data, hwm = refData.get("info"), refData.get("data"), refData.get("hwm")
info = 'downloaded up to transaction {tr} (info: {info}, data: {data}, high-water-mark: {hwm})'.format(tr=hwm, info=info, data=data, hwm=hwm)
streamName = '[unknown]'
if "info-ref" in refData:
if streams is None:
streamsXml, streams = self.GetStreamsInfo(ref=refData["info-ref"])
stream = streams.getStream(streamNumber)
if stream is None:
streamsXml, streams = self.GetStreamsInfo(ref=refData["info-ref"])
stream = streams.getStream(streamNumber)
if stream is not None:
streamName = stream.name
logger.info(" - stream: {name} (id: {number}) - {info}".format(name=streamName, number=streamNumber, info=info))
if doLogout:
accurev.logout()
def PrintRunningTime(referenceTime):
outMessage = ''
# Custom formatting of the timestamp
m, s = divmod((datetime.now() - referenceTime).total_seconds(), 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
if d > 0:
outMessage += "{d: >2d}d, ".format(d=int(d))
outMessage += "{h: >2d}:{m:0>2d}:{s:0>5.2f}".format(h=int(h), m=int(m), s=s)
logger.info("Running time was {timeStr}".format(timeStr=outMessage))
# ################################################################################################ #
# Script Main #
# ################################################################################################ #
def AccuRev2GitMain(argv):
global state
configFilename = Config.FilenameFromScriptName(argv[0])
defaultExampleConfigFilename = '{0}.example.xml'.format(configFilename)
# Set-up and parse the command line arguments. Examples from https://docs.python.org/dev/library/argparse.html
parser = argparse.ArgumentParser(description="Conversion tool for migrating AccuRev repositories into Git. Configuration of the script is done with a configuration file whose filename is `{0}` by default. The filename can be overridden by providing the `-c` option described below. Command line arguments, if given, override the equivalent options in the configuration file.".format(configFilename))
parser.add_argument('-c', '--config', dest='configFilename', default=configFilename, metavar='<config-filename>', help="The XML configuration file for this script. This file is required for the script to operate. By default this filename is set to be `{0}`.".format(configFilename))
parser.add_argument('-u', '--accurev-username', dest='accurevUsername', metavar='<accurev-username>', help="The username which will be used to retrieve and populate the history from AccuRev.")
parser.add_argument('-p', '--accurev-password', dest='accurevPassword', metavar='<accurev-password>', help="The password for the provided accurev username.")
parser.add_argument('-d', '--accurev-depot', dest='accurevDepot', metavar='<accurev-depot>', help="The AccuRev depot in which the streams that are being converted are located. This script currently assumes only one depot is being converted at a time.")
parser.add_argument('-g', '--git-repo-path', dest='gitRepoPath', metavar='<git-repo-path>', help="The system path to an existing folder where the git repository will be created.")
parser.add_argument('-M', '--method', dest='conversionMethod', choices=['skip', 'pop', 'diff', 'deep-hist'], metavar='<conversion-method>', help="Specifies the method which is used to perform the conversion. Can be either 'pop', 'diff' or 'deep-hist'. 'pop' specifies that every transaction is populated in full. 'diff' specifies that only the differences are populated but transactions are iterated one at a time. 'deep-hist' specifies that only the differences are populated and that only transactions that could have affected this stream are iterated.")
parser.add_argument('-S', '--merge-strategy', dest='mergeStrategy', choices=['skip', 'normal', 'orphanage'], metavar='<merge-strategy>', help="Sets the merge strategy which dictates how the git repository branches are generated. Depending on the value chosen the branches can be orphan branches ('orphanage' strategy) or have merges where promotes have occurred with the 'normal' strategy. The 'skip' strategy forces the script to skip making the git branches and will cause it to only do the retrieving of information from accurev for use with some strategy at a later date.")
parser.add_argument('-E', '--empty-child-stream-action', dest='emptyChildStreamAction', choices=['merge', 'cherry-pick'], metavar='<empty-child-stream-action>', help="When a promote to a parent stream affects the child stream and the result of the two commits on the two branches in git results in a git diff operation returning empty then it could be said that this was in-fact a merge (of sorts). This option controlls whether such situations are treated as cherry-picks or merges in git.")
parser.add_argument('-K', '--source-stream-fast-forward', dest='sourceStreamFastForward', choices=['true', 'false'], metavar='<source-stream-fast-forward>', help="When both the source and destination streams are known this flag controlls whether the source branch is moved to the resulting merge commit (the destination branch is always updated/moved to this commit). This has an effect of making the history look like the letter K where the promotes come in and then branch from the merge commit instead of the previous commit which occured on the branch.")
parser.add_argument('--exp-source-stream-inferrence', dest='sourceStreamInferrence', choices=['true', 'false'], metavar='<source-stream-inferrence>', help="Experimental: When the source stream (source of a promote) is unknown, this flag controlls whether script will attempt to infer the source stream from the contents of the child streams. If a child stream was not changed by the promote but the basis stream (destination of the promote) ended up being the same as the child stream it is highly likely that this child stream is the source of the promote.")
parser.add_argument('-R', '--restart', dest='restart', action='store_const', const=True, help="Discard any existing conversion and start over.")
parser.add_argument('-r', '--soft-restart', dest='softRestart', action='store_const', const=True, help="Discard any existing processed branches and start the processing from the downloaded accurev data anew.")
parser.add_argument('-v', '--verbose', dest='debug', action='store_const', const=True, help="Print the script debug information. Makes the script more verbose.")
parser.add_argument('-L', '--log-file', dest='logFile', metavar='<log-filename>', help="Sets the filename to which all console output will be logged (console output is still printed).")
parser.add_argument('-q', '--no-log-file', dest='disableLogFile', action='store_const', const=True, help="Do not log info to the log file. Alternatively achieved by not specifying a log file filename in the configuration file.")
parser.add_argument('-l', '--reset-log-file', dest='resetLogFile', action='store_const', const=True, help="Instead of appending new log info to the file truncate it instead and start over.")
parser.add_argument('--example-config', nargs='?', dest='exampleConfigFilename', const=defaultExampleConfigFilename, default=None, metavar='<example-config-filename>', help="Generates an example configuration file and exits. If the filename isn't specified a default filename '{0}' is used. Commandline arguments, if given, override all options in the configuration file.".format(defaultExampleConfigFilename, configFilename))
parser.add_argument('-m', '--check-missing-users', dest='checkMissingUsers', choices=['warn', 'strict', 'ignore'], default='strict', help="When 'ignore' is used it disables the missing user check. When either 'warn' or 'strict' are used a list of usernames that are in accurev but were not found in the configured usermap is printed. Using 'strict' will cause the script to abort the conversion process if there are any missing users while using 'warn' will not.")
parser.add_argument('--auto-config', nargs='?', dest='autoConfigFilename', const=configFilename, default=None, metavar='<config-filename>', help="Auto-generate the configuration file from known AccuRev information. It is required that an accurev username and password are provided either in an existing config file or via the -u and -p options. If there is an existing config file it is backed up and only the accurev username and password will be copied to the new configuration file. If you wish to preserve the config but add more information to it then it is recommended that you use the --fixup-config option instead.")
parser.add_argument('--fixup-config', nargs='?', dest='fixupConfigFilename', const=configFilename, default=None, metavar='<config-filename>', help="Fixup the configuration file by adding updated AccuRev information. It is the same as the --auto-config option but the existing configuration file options are preserved. Other command line arguments that are provided will override the existing configuration file options for the new configuration file.")
parser.add_argument('-T', '--track', dest='track', action='store_const', const=True, help="Tracking mode. Sets the 'tracking' flag which makes the script run continuously in a loop. The configuration file is reloaded on each iteration so changes are picked up. Only makes sense for when you want this script to continuously track the accurev depot's newest transactions (i.e. you're using 'highest' or 'now' as your end transactions).")
parser.add_argument('-I', '--tracking-intermission', nargs='?', dest='intermission', type=int, const=300, default=0, metavar='<intermission-sec>', help="Sets the intermission (in seconds) between consecutive iterations of the script in 'tracking' mode. The script sleeps for <intermission-sec> seconds before continuing the next conversion. This is useless if the --track option is not used.")
parser.add_argument('-s', '--status', dest='status', action='store_true', default=False, help="Print the status of the conversion and exit.")
args = parser.parse_args()
# Dump example config if specified
doEarlyReturn = False
earlyReturnCode = 0
if args.exampleConfigFilename is not None:
earlyReturnCode = DumpExampleConfigFile(args.exampleConfigFilename)
doEarlyReturn = True
if args.autoConfigFilename is not None:
earlyReturnCode = AutoConfigFile(filename=args.autoConfigFilename, args=args, preserveConfig=False)
doEarlyReturn = True
if args.fixupConfigFilename is not None:
earlyReturnCode = AutoConfigFile(filename=args.fixupConfigFilename, args=args, preserveConfig=True)
doEarlyReturn = True
if doEarlyReturn:
return earlyReturnCode
loggerConfig = None
while True:
try:
startTime = datetime.now() # used to compute the running time of the script.
# Load the config file
config = Config.fromfile(filename=args.configFilename)
if config is None:
sys.stderr.write("Config file '{0}' not found.\n".format(args.configFilename))
return 1
elif config.git is not None:
if not os.path.isabs(config.git.repoPath):
config.git.repoPath = os.path.abspath(config.git.repoPath)
# Set the overrides for in the configuration from the arguments
SetConfigFromArgs(config=config, args=args)
if not ValidateConfig(config):
return 1
# Configure logging, but do it only once.
if logger is None:
loggingLevel = logging.DEBUG if args.debug else logging.INFO
if not InitializeLogging(config.logFilename, loggingLevel):
sys.stderr.write("Failed to initialize logging. Exiting.\n")
return 1
# Start the script
state = AccuRev2Git(config)
PrintConfigSummary(state.config, args.configFilename)
if args.status:
PrintMissingUsers(state.config)
PrintStatus(state)
return 0
if args.checkMissingUsers in [ "warn", "strict" ]:
if PrintMissingUsers(state.config) and args.checkMissingUsers == "strict":
sys.stderr.write("Found missing users. Exiting.\n")
return 1
logger.info("Restart:" if args.restart else "Soft restart:" if args.softRestart else "Start:")
rv = state.Start(isRestart=args.restart, isSoftRestart=args.softRestart)
PrintRunningTime(referenceTime=startTime)
if not args.track:
break
elif args.intermission is not None:
print("Tracking mode enabled: sleep for {0} seconds.".format(args.intermission))
time.sleep(args.intermission)
print("Tracking mode enabled: Continuing conversion.")
except:
if logger is not None:
PrintRunningTime(referenceTime=startTime)
logger.exception("The script has encountered an exception, aborting!")
raise
return rv
# ################################################################################################ #
# Script Start #
# ################################################################################################ #
if __name__ == "__main__":
AccuRev2GitMain(sys.argv)
| 64.324232
| 628
| 0.621941
|
e3e80de428a0ca14173e5a632a53d6bc81d00f8c
| 4,730
|
py
|
Python
|
cinder/tests/unit/objects/test_backup.py
|
tlakshman26/cinder-https-changes
|
c688a0af521e8679ac8f68d3dd035fe998e736d3
|
[
"Apache-2.0"
] | null | null | null |
cinder/tests/unit/objects/test_backup.py
|
tlakshman26/cinder-https-changes
|
c688a0af521e8679ac8f68d3dd035fe998e736d3
|
[
"Apache-2.0"
] | null | null | null |
cinder/tests/unit/objects/test_backup.py
|
tlakshman26/cinder-https-changes
|
c688a0af521e8679ac8f68d3dd035fe998e736d3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from cinder import context
from cinder import objects
from cinder.tests.unit import fake_volume
from cinder.tests.unit import objects as test_objects
fake_backup = {
'id': '1',
'volume_id': 'fake_id',
'status': "creating",
'size': 1,
'display_name': 'fake_name',
'display_description': 'fake_description',
'user_id': 'fake_user',
'project_id': 'fake_project',
'temp_volume_id': None,
'temp_snapshot_id': None,
}
class TestBackup(test_objects.BaseObjectsTestCase):
def setUp(self):
super(TestBackup, self).setUp()
# NOTE (e0ne): base tests contains original RequestContext from
# oslo_context. We change it to our RequestContext implementation
# to have 'elevated' method
self.context = context.RequestContext(self.user_id, self.project_id,
is_admin=False)
@staticmethod
def _compare(test, db, obj):
for field, value in db.items():
test.assertEqual(db[field], obj[field])
@mock.patch('cinder.db.backup_get', return_value=fake_backup)
def test_get_by_id(self, backup_get):
backup = objects.Backup.get_by_id(self.context, 1)
self._compare(self, fake_backup, backup)
@mock.patch('cinder.db.backup_create', return_value=fake_backup)
def test_create(self, backup_create):
backup = objects.Backup(context=self.context)
backup.create()
self.assertEqual(fake_backup['id'], backup.id)
self.assertEqual(fake_backup['volume_id'], backup.volume_id)
@mock.patch('cinder.db.backup_update')
def test_save(self, backup_update):
backup = objects.Backup._from_db_object(
self.context, objects.Backup(), fake_backup)
backup.display_name = 'foobar'
backup.save()
backup_update.assert_called_once_with(self.context, backup.id,
{'display_name': 'foobar'})
@mock.patch('cinder.db.backup_destroy')
def test_destroy(self, backup_destroy):
backup = objects.Backup(context=self.context, id=1)
backup.destroy()
self.assertTrue(backup_destroy.called)
admin_context = backup_destroy.call_args[0][0]
self.assertTrue(admin_context.is_admin)
def test_obj_field_temp_volume_snapshot_id(self):
backup = objects.Backup(context=self.context,
temp_volume_id='2',
temp_snapshot_id='3')
self.assertEqual('2', backup.temp_volume_id)
self.assertEqual('3', backup.temp_snapshot_id)
class TestBackupList(test_objects.BaseObjectsTestCase):
@mock.patch('cinder.db.backup_get_all', return_value=[fake_backup])
def test_get_all(self, backup_get_all):
backups = objects.BackupList.get_all(self.context)
self.assertEqual(1, len(backups))
TestBackup._compare(self, fake_backup, backups[0])
@mock.patch('cinder.db.backup_get_all_by_project',
return_value=[fake_backup])
def test_get_all_by_project(self, get_all_by_project):
backups = objects.BackupList.get_all_by_project(
self.context, self.project_id)
self.assertEqual(1, len(backups))
TestBackup._compare(self, fake_backup, backups[0])
@mock.patch('cinder.db.backup_get_all_by_host',
return_value=[fake_backup])
def test_get_all_for_volume(self, get_all_by_host):
fake_volume_obj = fake_volume.fake_volume_obj(self.context)
backups = objects.BackupList.get_all_by_host(self.context,
fake_volume_obj.id)
self.assertEqual(1, len(backups))
TestBackup._compare(self, fake_backup, backups[0])
@mock.patch('cinder.db.backup_get_all', return_value=[fake_backup])
def test_get_all_tenants(self, backup_get_all):
search_opts = {'all_tenants': 1}
backups = objects.BackupList.get_all(self.context, search_opts)
self.assertEqual(1, len(backups))
TestBackup._compare(self, fake_backup, backups[0])
| 39.747899
| 78
| 0.67019
|
e627b7b5ca87b963d9b8f0f4cff7a262ecc6d016
| 9,912
|
py
|
Python
|
test_asteroids/test_gamepiece.py
|
SvenMayer/asteroids
|
fa42b00d7e06dbd7321cf6e4dc689393a682ffca
|
[
"MIT"
] | null | null | null |
test_asteroids/test_gamepiece.py
|
SvenMayer/asteroids
|
fa42b00d7e06dbd7321cf6e4dc689393a682ffca
|
[
"MIT"
] | null | null | null |
test_asteroids/test_gamepiece.py
|
SvenMayer/asteroids
|
fa42b00d7e06dbd7321cf6e4dc689393a682ffca
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Sven Mayer
"""
import unittest
from asteroids import GamePiece
import numpy as np
class TestPhysicsEngine(unittest.TestCase):
def test_initialize_empty(self):
gamePiece = GamePiece.PhysicsEngine()
self.assertIsInstance(gamePiece, GamePiece.PhysicsEngine)
def test_initialize_wrong_position_parameters(self):
with self.assertRaises(ValueError):
GamePiece.PhysicsEngine((1., 2.))
def test_initialize(self):
pos = (1., 2., 0.)
gamePiece = GamePiece.PhysicsEngine(position=pos)
self.assertEqual(gamePiece.position, pos)
def test_increase_velo(self):
gamePiece = GamePiece.PhysicsEngine((0., 0., 0.), acceleration=10.)
gamePiece.thrust = 1
gamePiece.step(0.1)
self.assertAlmostEqual(gamePiece.velocity, (1., 0.))
def test_turn(self):
dt_step = 0.1
angular_velocity = 0.1 * np.pi
gamePiece = GamePiece.PhysicsEngine((0., 0., 0.),
angular_velocity=angular_velocity)
gamePiece.turn = 1
gamePiece.step(dt_step)
self.assertAlmostEqual(gamePiece.position[2],
dt_step*angular_velocity)
def test_new_position(self):
pos = (1., 2., np.pi/2.)
angular_velocity = 0.1*np.pi
acceleration = 1.5
gamePiece = GamePiece.PhysicsEngine(
pos, acceleration=acceleration, angular_velocity=angular_velocity)
gamePiece.thrust = 1
gamePiece.step(1.0)
new_velo_y = 1.5*1.0
new_pos = 1., 2.+new_velo_y*1.0, np.pi/2.
gamePiece.thrust = 0
gamePiece.turn = 1
gamePiece.step(3.0)
new2_angle = pos[2] + angular_velocity*3.0
new2_pos = new_pos[0], new_pos[1]+new_velo_y*3.0, new2_angle
gamePiece.turn = 0
gamePiece.thrust = 1
gamePiece.step(1.0)
new3_velo_x = 0. + acceleration * 1.0 * np.cos(new2_angle)
new3_velo_y = new_velo_y + acceleration * 1.0 * np.sin(new2_angle)
new3_pos = (new2_pos[0] + new3_velo_x * 1.0,
new2_pos[1] + new3_velo_y * 1.0,
new2_angle)
self.assertAlmostEqual(gamePiece.position[0]+gamePiece.position[1]+gamePiece.position[2],
new3_pos[0]+new3_pos[1]+new3_pos[2])
def test_set_position_wrong_type(self):
gamePiece = GamePiece.PhysicsEngine(position=(0., 0., 0.),
start_velocity=(1., 1.),
angular_velocity=1.5)
with self.assertRaises(TypeError):
gamePiece.position = (1., 2., 3., 4.)
def test_spin_and_start_velo(self):
position = (0., 0., 0.)
start_velocity = 1., 2.
angular_velocity = np.pi/4.
gamePiece = GamePiece.PhysicsEngine(position=position,
start_velocity=start_velocity,
angular_velocity=angular_velocity)
turnDir = -1
gamePiece.turn = turnDir
dt = 1.5
gamePiece.step(dt)
new_position = (
position[0] + start_velocity[0] * dt,
position[1] + start_velocity[1] * dt,
(position[2] + angular_velocity * dt * turnDir) % (2. * np.pi))
self.assertAlmostEqual(new_position[0]+new_position[1]+
new_position[2],
gamePiece.position[0]+gamePiece.position[1]+
gamePiece.position[2])
class TestConvexPolygon(unittest.TestCase):
def test_init(self):
xy = (0., 0.), (1., 2.), (-2., 1.), (-1., -1.)
con_pol = GamePiece.ConvexPolygon(xy)
self.assertIsInstance(con_pol, GamePiece.ConvexPolygon)
def test_side_vectors(self):
xy = (0., 0.), (1., 2.), (-2., 1.)
side = []
len_side = []
unity_side = []
for i in range(-1, len(xy)-1):
side.append((xy[i+1][0]-xy[i][0], xy[i+1][1]-xy[i][1]))
len_side.append(np.sqrt(side[-1][0]**2. + side[-1][1]**2.))
unity_side.append((side[-1][0]/len_side[-1], side[-1][1]/len_side[-1]))
unity_side = unity_side[1:] + unity_side[:1]
con_pol = GamePiece.ConvexPolygon(xy)
self.assertAlmostEqual(unity_side[0][0]+unity_side[1][0]+
unity_side[2][0]+unity_side[0][1]+
unity_side[1][1]+unity_side[2][1],
con_pol.side[0][0]+con_pol.side[1][0]+
con_pol.side[2][0]+con_pol.side[0][1]+
con_pol.side[1][1]+con_pol.side[2][1])
def test_side_normal_vector(self):
xy = (0., 0.), (0., 1.), (1., 1.)
con_pol = GamePiece.ConvexPolygon(xy)
self.assertAlmostEqual(np.abs(con_pol.side_normal[0][0]+
con_pol.side_normal[0][1])+
np.abs(con_pol.side_normal[1][0]+
con_pol.side_normal[1][1])+
np.abs(con_pol.side_normal[2][0]+
con_pol.side_normal[2][1]),
1. + 0. + 1. )
def test_projection(self):
xy = [(0., 0.), (1., 1.), (-1., 1.)]
poly = GamePiece.ConvexPolygon(xy)
proj = poly.projection((1./np.sqrt(2.), 1./np.sqrt(2.)))
self.assertAlmostEqual(proj[0] + proj[1] + proj[2], np.sqrt(2.))
def test_collision(self):
poly1 = GamePiece.ConvexPolygon([(0., 0.), (1., 1.), (0., 1.)])
poly2 = GamePiece.ConvexPolygon([(1., 0.), (0.8, 1.), (2., 1.)])
self.assertTrue(poly1.collides(poly2))
def test_nocollision(self):
poly1 = GamePiece.ConvexPolygon([(0., 0.), (1., 1.), (0., 1.)])
poly2 = GamePiece.ConvexPolygon([(1., 0.), (1., 1.), (2., 1.)])
self.assertFalse(poly1.collides(poly2))
def test_rotate(self):
poly = GamePiece.ConvexPolygon([(0., 0.), (0., 1.), (1., 0.)])
poly.rotate(np.pi/2.)
self.assertAlmostEqual(poly.xy[1][0], -1.)
def test_pt_inside(self):
poly1 = GamePiece.ConvexPolygon([(0., 0.), (1., 1.), (0., 1.)])
# Only points within the polygon's area return True.
# Points outside the polygon's area as well as points on the edge
# return False.
self.assertTrue(poly1.point_inside((0.05, 0.2)))
self.assertFalse(poly1.point_inside((2., 2.)))
self.assertFalse(poly1.point_inside((1., 1.)))
class TestPoint(unittest.TestCase):
def test_init(self):
GamePiece.Point((10., 2.))
class TestGamePiece(unittest.TestCase):
def test_init_wrong_type(self):
with self.assertRaises(TypeError):
GamePiece.GamePiece(1.0, 10)
def test_init_polygon_xy_missing(self):
with self.assertRaises(TypeError):
GamePiece.GamePiece(1.0, 'polygon')
def test_init_point(self):
gb = GamePiece.GamePiece(1.0, 'point', position=(1., 1., 0.))
self.assertIsInstance(gb._gb_repr, GamePiece.Point)
def test_col_poly_poly(self):
gb1 = GamePiece.GamePiece(1.0, 'polygon',
xy=[(0., 0.), (2., 0.), (0., 1.)])
gb2 = GamePiece.GamePiece(1.0, 'polygon',
xy=[(1., 0.), (4., 2.), (4., 0.)])
gb3 = GamePiece.GamePiece(1.0, 'polygon',
xy=[(-1., -1.), (-4., -2.), (-4., -0.)])
self.assertTrue(gb1.collides(gb2))
self.assertFalse(gb1.collides(gb3))
def test_col_pt_pt(self):
pt1 = GamePiece.GamePiece(1.0, 'point', position=(1., 0., 0.))
pt2 = GamePiece.GamePiece(1.0, 'point', position=(1., 0., 0.))
self.assertFalse(pt1.collides(pt2))
def test_col_poly_pt(self):
pt1 = GamePiece.GamePiece(1.0, 'point', position=(1., 1., 0.))
pt2 = GamePiece.GamePiece(1.0, 'point', position=(2., 2., 0.))
poly1 = GamePiece.GamePiece(
1.0, 'polygon', xy=[(0., 0.), (2., 0.), (2., 2.), (0., 2.)])
self.assertTrue(pt1.collides(poly1))
self.assertTrue(poly1.collides(pt1))
self.assertFalse(pt2.collides(poly1))
self.assertFalse(poly1.collides(pt2))
class TestShip(unittest.TestCase):
def test_init(self):
ship = GamePiece.Ship(size=2.4)
self.assertAlmostEqual(ship.size, 2.4)
def test_gun_position(self):
ship = GamePiece.Ship(size=10., position=(0., 0., 0.))
self.assertAlmostEqual(ship.gunposition[0], 2.*10./3.)
self.assertAlmostEqual(ship.gunposition[1], 0.)
self.assertAlmostEqual(ship.gunposition[2], 0.)
class TestAsteroidBase(unittest.TestCase):
def test_init(self):
with self.assertRaises(TypeError):
GamePiece.AsteroidBase(position=(0., 0., 0.), size=1.0,
angular_velocity=0., start_velocity=(0., 0.))
class TestAsteroids(unittest.TestCase):
def test_init_(self):
asteroid = GamePiece.Asteroid1(size=3.6, position=(0., 0., 0.),
angular_velocity=10.,
start_velocity=(1.2, 2.1))
self.assertAlmostEqual(asteroid.size+asteroid.velocity[0]+
asteroid.position[0]+asteroid._angular_velocity,
3.6+1.2+0.+10.)
class TestProjectile(unittest.TestCase):
def test_init_(self):
projectile = GamePiece.Projectile(size=1., position=(2., 3., 4.),
velocity=(4.2, 5.3))
self.assertAlmostEqual(projectile.size+projectile.position[1]+
projectile.velocity[1], 1.+3.+5.3)
if __name__ == u"__main__":
unittest.main()
| 40.457143
| 97
| 0.547014
|
7cf24a061d5c88964588138b47e8fbae38a16b1e
| 682
|
py
|
Python
|
btlejack/version.py
|
mh-/btlejack
|
8cac2a2b2cf770b97400ee0d094d0c01a192008b
|
[
"MIT"
] | 1
|
2020-02-12T09:07:34.000Z
|
2020-02-12T09:07:34.000Z
|
btlejack/version.py
|
nviennot/btlejack
|
d05abdc27558e439170e078dbc81fba966255489
|
[
"MIT"
] | null | null | null |
btlejack/version.py
|
nviennot/btlejack
|
d05abdc27558e439170e078dbc81fba966255489
|
[
"MIT"
] | 1
|
2019-08-26T04:38:06.000Z
|
2019-08-26T04:38:06.000Z
|
"""
Btlejack single version module
This module contains the current version of both client and firmware software
(VERSION), but also the release number (RELEASE).
Please note that both client and firmware version shall match or Btlejack will
issue a warning during execution. When implementing new features, or modifying
the internals of either Btlejack's client and firmware, a new version number
should be assigned. Btlejack's firmware shall be updated to reflect this version
number, and also updated into btlejack's package.
The release number allows small modifications due to a release error or update,
such as a wrong packaging or typos.
"""
VERSION = '1.3'
RELEASE = '0'
| 37.888889
| 80
| 0.796188
|
5e03f6e9c979e613ed59de9d7df0af2a4446684d
| 9,988
|
py
|
Python
|
3rdParty/patool/tests/archives/__init__.py
|
UnicornParson/ConsoleUtils
|
95f445fd4006791331541e1057035c86cd8eeddb
|
[
"Apache-2.0"
] | null | null | null |
3rdParty/patool/tests/archives/__init__.py
|
UnicornParson/ConsoleUtils
|
95f445fd4006791331541e1057035c86cd8eeddb
|
[
"Apache-2.0"
] | null | null | null |
3rdParty/patool/tests/archives/__init__.py
|
UnicornParson/ConsoleUtils
|
95f445fd4006791331541e1057035c86cd8eeddb
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (C) 2010-2015 Bastian Kleineidam
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import os
import shutil
import patoolib
from .. import basedir, datadir
# All text files have '42' as content.
TextFileContent = '42'
class Content:
"""The test archives have one of several set of content files.
The different content file sets have each a constant defined
by this class.
"""
# Recursive archives for extraction have a text file in a directory:
# t/t.txt
# Recursive archives for creation have two text files in directories:
# foo dir/t.txt
# foo dir/bar/t.txt
Recursive = 'recursive'
# Singlefile archives for extraction have a text file t.txt
# Singlefile archives for creation have a text file `foo .txt'
Singlefile = 'singlefile'
# Multifile archives for extraction have two text files: t.txt and t2.txt
# Multifile archives for creation have two text files: foo .txt and foo2 .txt
Multifile = 'multifile'
class ArchiveTest (unittest.TestCase):
"""Helper class for archive tests, handling one commandline program."""
# set program to use for archive handling in subclass
program = None
def archive_commands (self, filename, **kwargs):
"""Run archive commands list, test, extract and create.
All keyword arguments are delegated to the create test function."""
self.archive_list(filename)
if not kwargs.get('skip_test'):
self.archive_test(filename)
self.archive_extract(filename, check=kwargs.get('check', Content.Recursive))
if not kwargs.get('skip_create'):
self.archive_create(filename, **kwargs)
def archive_extract (self, filename, check=Content.Recursive):
"""Test archive extraction."""
archive = os.path.join(datadir, filename)
self.assertTrue(os.path.isabs(archive), "archive path is not absolute: %r" % archive)
self._archive_extract(archive, check)
# archive name relative to tmpdir
relarchive = os.path.join("..", archive[len(basedir)+1:])
self._archive_extract(relarchive, check, verbosity=1)
def _archive_extract (self, archive, check, verbosity=0):
# create a temporary directory for extraction
tmpdir = patoolib.util.tmpdir(dir=basedir)
try:
olddir = patoolib.util.chdir(tmpdir)
try:
output = patoolib.extract_archive(archive, program=self.program, verbosity=verbosity, interactive=False)
if check:
self.check_extracted_archive(archive, output, check)
finally:
if olddir:
os.chdir(olddir)
finally:
shutil.rmtree(tmpdir)
def check_extracted_archive (self, archive, output, check):
if check == Content.Recursive:
# outdir is the 't' directory of the archive
self.assertEqual(output, 't')
self.check_directory(output, 't')
txtfile = os.path.join(output, 't.txt')
self.check_textfile(txtfile, 't.txt')
elif check == Content.Singlefile:
# a non-existing directory to ensure files do not exist in it
ned = get_nonexisting_directory(os.getcwd())
expected_output = os.path.basename(patoolib.util.get_single_outfile(ned, archive))
self.check_textfile(output, expected_output)
elif check == Content.Multifile:
txtfile = os.path.join(output, 't.txt')
self.check_textfile(txtfile, 't.txt')
txtfile2 = os.path.join(output, 't2.txt')
self.check_textfile(txtfile2, 't2.txt')
def check_directory (self, dirname, expectedname):
"""Check that directory exists."""
self.assertTrue(os.path.isdir(dirname), dirname)
self.assertEqual(os.path.basename(dirname), expectedname)
def check_textfile (self, filename, expectedname):
"""Check that filename exists and has the default content."""
self.assertTrue(os.path.isfile(filename), repr(filename))
self.assertEqual(os.path.basename(filename), expectedname)
self.assertEqual(get_filecontent(filename), TextFileContent)
def archive_list (self, filename):
"""Test archive listing."""
archive = os.path.join(datadir, filename)
for verbosity in (-1, 0, 1, 2):
patoolib.list_archive(archive, program=self.program, verbosity=verbosity, interactive=False)
def archive_test (self, filename):
"""Test archive testing."""
archive = os.path.join(datadir, filename)
for verbosity in (-1, 0, 1, 2):
patoolib.test_archive(archive, program=self.program, verbosity=verbosity, interactive=False)
def archive_create (self, archive, srcfiles=None, check=Content.Recursive):
"""Test archive creation."""
if srcfiles is None:
if check == Content.Recursive:
srcfiles = ('t',)
elif check == Content.Singlefile:
srcfiles = ('t.txt',)
elif check == Content.Multifile:
srcfiles = ('t.txt', 't2.txt',)
else:
raise ValueError('invalid check value %r' % check)
olddir = patoolib.util.chdir(datadir)
try:
# The format and compression arguments are needed for creating
# archives with unusual file extensions.
for verbosity in (-1, 0, 1, 2):
self._archive_create(archive, srcfiles, program=self.program, verbosity=verbosity)
finally:
if olddir:
os.chdir(olddir)
def _archive_create (self, archive, srcfiles, program=None, verbosity=0):
"""Create archive from filename."""
for srcfile in srcfiles:
self.assertFalse(os.path.isabs(srcfile))
self.assertTrue(os.path.exists(srcfile))
# create a temporary directory for creation
tmpdir = patoolib.util.tmpdir(dir=basedir)
try:
archive = os.path.join(tmpdir, archive)
self.assertTrue(os.path.isabs(archive), "archive path is not absolute: %r" % archive)
patoolib.create_archive(archive, srcfiles, verbosity=verbosity, interactive=False, program=program)
self.assertTrue(os.path.isfile(archive))
self.check_created_archive_with_test(archive)
self.check_created_archive_with_diff(archive, srcfiles)
finally:
shutil.rmtree(tmpdir)
def check_created_archive_with_test(self, archive):
command = patoolib.test_archive
program = self.program
# special case for programs that cannot test what they create
if self.program in ('compress', 'py_gzip'):
program = 'gzip'
elif self.program == 'py_bz2':
program = 'bzip2'
elif self.program == 'py_lzma':
program = 'xz'
elif self.program == 'zip':
program = 'unzip'
elif self.program in ('rzip', 'shorten'):
program = 'py_echo'
command = patoolib.list_archive
elif self.program == 'lcab':
program = 'cabextract'
elif self.program == 'genisoimage':
program = '7z'
elif self.program == 'shar':
return
command(archive, program=program)
def check_created_archive_with_diff(self, archive, srcfiles):
"""Extract created archive again and compare the contents."""
# diff srcfile and output
diff = patoolib.util.find_program("diff")
if not diff:
return
program = self.program
# special case for programs that cannot extract what they create
if self.program == 'compress':
program = 'gzip'
elif self.program == 'zip':
program = 'unzip'
elif self.program == 'lcab':
program = 'cabextract'
elif self.program == 'shar':
program = 'unshar'
elif self.program == 'genisoimage':
program = '7z'
tmpdir = patoolib.util.tmpdir(dir=basedir)
try:
olddir = patoolib.util.chdir(tmpdir)
try:
output = patoolib.extract_archive(archive, program=program, interactive=False)
if len(srcfiles) == 1:
source = os.path.join(datadir, srcfiles[0])
patoolib.util.run_checked([diff, "-urN", source, output])
else:
for srcfile in srcfiles:
source = os.path.join(datadir, srcfile)
target = os.path.join(output, srcfile)
patoolib.util.run_checked([diff, "-urN", source, target])
finally:
if olddir:
os.chdir(olddir)
finally:
shutil.rmtree(tmpdir)
def get_filecontent(filename):
"""Get file data as text."""
with open(filename) as fo:
return fo.read()
def get_nonexisting_directory(basedir):
"""Note: this is _not_ intended to be used to create a directory."""
d = os.path.join(basedir, "foo")
while os.path.exists(d):
d += 'a'
if len(d) > 100:
# wtf
raise ValueError('could not construct unique directory name at %r' % basedir)
return d
| 41.272727
| 120
| 0.621446
|
1a064cb1560dcaba4b3d1e5b59ac0b81d28bd50a
| 4,763
|
py
|
Python
|
python/IECoreMaya/SplineParameterUI.py
|
gcodebackups/cortex-vfx
|
72fa6c6eb3327fce4faf01361c8fcc2e1e892672
|
[
"BSD-3-Clause"
] | 5
|
2016-07-26T06:09:28.000Z
|
2022-03-07T03:58:51.000Z
|
python/IECoreMaya/SplineParameterUI.py
|
turbosun/cortex
|
4bdc01a692652cd562f3bfa85f3dae99d07c0b15
|
[
"BSD-3-Clause"
] | null | null | null |
python/IECoreMaya/SplineParameterUI.py
|
turbosun/cortex
|
4bdc01a692652cd562f3bfa85f3dae99d07c0b15
|
[
"BSD-3-Clause"
] | 3
|
2015-03-25T18:45:24.000Z
|
2020-02-15T15:37:18.000Z
|
##########################################################################
#
# Copyright (c) 2008, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import IECoreMaya
import maya.cmds
from ParameterUI import ParameterUI
import re
class SplineParameterUI( ParameterUI ) :
def __init__( self, node, parameter, **kw ) :
ParameterUI.__init__(
self,
node,
parameter,
maya.cmds.rowLayout(
numberOfColumns = 3,
rowAttach = [ ( 1, "top", 0 ), ( 2, "both", 0 ), ( 3, "both", 0 ) ]
),
**kw
)
maya.cmds.text(
label = self.label(),
font = "smallPlainLabelFont",
align = "right",
annotation = self.description(),
)
self.__gradientControl = maya.cmds.gradientControl()
self.__button = maya.cmds.button( label = ">")
self.__editWindow = None
self.replace( node, parameter )
def replace( self, node, parameter ) :
if not parameter.isSame( self.parameter ) :
if self.__editWindow :
maya.cmds.deleteUI( self.__editWindow, window=True )
self.__editWindow = None
ParameterUI.replace( self, node, parameter )
maya.cmds.gradientControl( self.__gradientControl, edit=True, attribute=self.plugName() )
maya.cmds.button( self.__button, edit=True, command=self.__openEditWindow )
## Returns True if we're a color ramp and False if we're a greyscale curve.
def __colored( self ) :
plugName = self.plugName()
attrName = plugName.split( "." )[-1]
return maya.cmds.objExists( plugName + "[0]." + attrName + "_ColorR" )
def __openEditWindow( self, unused ) :
if not self.__editWindow :
self.__editWindow = maya.cmds.window( re.sub( "[\|\.]" , "_", self.plugName() ), title=self.nodeName() + " " + self.label(), retain=True, widthHeight=[ 600, 300 ] )
layout = maya.cmds.formLayout()
positionControl = maya.cmds.attrFieldSliderGrp( label = "Selected position", columnWidth=[ ( 1, 100 ) ] )
if self.__colored() :
valueControl = maya.cmds.attrColorSliderGrp( label = "Selected colour", showButton=False, columnWidth=[ ( 1, 90 ) ] )
else :
valueControl = maya.cmds.attrFieldSliderGrp( label = "Selected value", columnWidth=[ ( 1, 90 ) ] )
gradientControl = maya.cmds.gradientControl(
attribute=self.plugName(),
selectedColorControl=valueControl,
selectedPositionControl=positionControl
)
maya.cmds.formLayout( layout,
edit=True,
attachForm = [
( positionControl, "left", 5 ),
( positionControl, "bottom", 15 ),
( valueControl, "bottom", 15 ),
( gradientControl, "top", 5 ),
( gradientControl, "left", 5 ),
( gradientControl, "right", 5 ),
],
attachControl = [
( gradientControl, "bottom", 5, positionControl ),
( valueControl, "left", 5, positionControl ),
]
)
maya.cmds.showWindow( self.__editWindow )
ParameterUI.registerUI( IECore.TypeId.SplinefColor3fParameter, SplineParameterUI )
ParameterUI.registerUI( IECore.TypeId.SplinefColor4fParameter, SplineParameterUI )
ParameterUI.registerUI( IECore.TypeId.SplineffParameter, SplineParameterUI )
ParameterUI.registerUI( IECore.TypeId.SplineddParameter, SplineParameterUI )
| 36.083333
| 167
| 0.685912
|
2af05d763b814d5f2e66adb7c818663081682280
| 2,192
|
py
|
Python
|
public/js/detect-wheel.py
|
BalasaravananB/Visualizer
|
16451c1be9edcaa5be5303b2b8834ff26012b456
|
[
"MIT"
] | null | null | null |
public/js/detect-wheel.py
|
BalasaravananB/Visualizer
|
16451c1be9edcaa5be5303b2b8834ff26012b456
|
[
"MIT"
] | null | null | null |
public/js/detect-wheel.py
|
BalasaravananB/Visualizer
|
16451c1be9edcaa5be5303b2b8834ff26012b456
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
import codecs, json
import sys
from detecto import core, utils, visualize
# import argparse
# # construct the argument parser and parse the arguments
# ap = argparse.ArgumentParser()
# ap.add_argument("-i", "--image", required = True, help = "Path to the image")
# args = vars(ap.parse_args())
# load the image, clone it for output, and then convert it to grayscale
args = sys.argv
image = args[1]
baseurl = args[2]
carid = args[3]
width = 668
# 800
height =501
# 600
# baseurl = '/bala/projects/inoru/WheelVisualizer/'
storageurl = '/storage/custom-detection-dataset/'
dummyPath =baseurl+storageurl+"/resized/"
img = cv2.imread(image, cv2.IMREAD_COLOR)
# img = cv2.resize(img, (0, 0), fx = 0.1, fy = 0.1)
img = cv2.resize(img, (width, height))
cv2.imwrite(dummyPath+carid+'_car.png', img)
# Specify the path to your image
image = utils.read_image(dummyPath+carid+'_car.png')# Specify the path to your image
model = core.Model.load(baseurl+storageurl+'model_weights.pth', ['wheel'])
#Getting Prediction Values
predictions = model.predict(image)
# # predictions format: (labels, boxes, scores)
labels, boxes, scores = predictions
# visualize.show_labeled_image(image, boxes, labels)
# # Blue color in BGR
color = (255, 0, 0)
# # Line thickness of 2 px
thickness = 2
points = [];
for key, value in enumerate(boxes):
if scores[key] > 0.6 :
rect = value.tolist()
startX = rect[0]
startY = rect[1]
endX = rect[0]
endY = rect[1]
x = (rect[0] +rect[2])/2
y = (rect[1] +rect[3])/2
w = (rect[2]-rect[0])
h = (rect[3]-rect[1])
points.append([x,y,w,h,startX,startY,endX,endY])
# print(points)
# cv2.rectangle(img,(value[0],value[1]),(value[2],value[3]), color, thickness)
# cv2.circle(img, (value[0],value[1]), 1, color, 5)
# cv2.circle(img, (value[2],value[3]), 1, color, thickness)
#
# cv2.imwrite(dummyPath+carid+'_car.png', img)
print(points)
#
# # cv2.imshow('img', img)
# # cv2.waitKey(0)
| 25.195402
| 94
| 0.606752
|
5a14a036467caa799f81567ddfea14a3584fecdb
| 3,883
|
py
|
Python
|
spectrochempy/core/writers/writecsv.py
|
spectrochempy/spectrochempy
|
829b290f465e630078785e303dbab197cd78b815
|
[
"Apache-2.0",
"CECILL-B",
"BSD-3-Clause"
] | 44
|
2020-05-14T01:56:40.000Z
|
2022-03-23T11:16:30.000Z
|
spectrochempy/core/writers/writecsv.py
|
spectrochempy/spectrochempy
|
829b290f465e630078785e303dbab197cd78b815
|
[
"Apache-2.0",
"CECILL-B",
"BSD-3-Clause"
] | 210
|
2020-05-22T17:33:22.000Z
|
2022-03-20T16:50:30.000Z
|
spectrochempy/core/writers/writecsv.py
|
spectrochempy/spectrochempy
|
829b290f465e630078785e303dbab197cd78b815
|
[
"Apache-2.0",
"CECILL-B",
"BSD-3-Clause"
] | 9
|
2020-05-16T15:36:02.000Z
|
2022-03-23T11:16:56.000Z
|
# -*- coding: utf-8 -*-
# ======================================================================================================================
# Copyright (©) 2015-2022 LCS - Laboratoire Catalyse et Spectrochimie, Caen, France. =
# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT - See full LICENSE agreement in the root directory =
# ======================================================================================================================
"""
Plugin module to extend NDDataset with a JCAMP-DX export method.
"""
# import os as os
import csv
from spectrochempy.core import preferences as prefs
from spectrochempy.core.writers.exporter import Exporter, exportermethod
__all__ = ["write_csv"]
__dataset_methods__ = __all__
# .......................................................................................................................
def write_csv(*args, **kwargs):
"""
Write a dataset in CSV format.
Currently only implemented for 1D datasets
or ND datasets with only one dimension of length larger than one.
Parameters
----------
filename: str or pathlib objet, optional
If not provided, a dialog is opened to select a file for writing.
protocol : {'scp', 'matlab', 'jcamp', 'csv', 'excel'}, optional
Protocol used for writing. If not provided, the correct protocol
is inferred (whnever it is possible) from the file name extension.
directory : str, optional
Where to write the specified `filename`. If not specified, write in the current directory.
description: str, optional
A Custom description.
delimiter : str, optional
Set the column delimiter in CSV file.
By default it is ',' or the one set in SpectroChemPy `Preferences`.
Returns
-------
out : `pathlib` object
Path of the saved file.
Examples
--------
>>> ds = scp.NDDataset([1, 2, 3])
>>> f1 = ds.write_csv('myfile')
>>> ds = scp.read('irdata/nh4y-activation.spg')
>>> f2 = ds[0].write_csv('single_spectrum.csv')
"""
exporter = Exporter()
kwargs["filetypes"] = ["CSV files (*.csv)"]
kwargs["suffix"] = ".csv"
return exporter(*args, **kwargs)
@exportermethod
def _write_csv(*args, **kwargs):
dataset, filename = args
dataset.filename = filename
delimiter = kwargs.get("delimiter", prefs.csv_delimiter)
# check dimensionality of the dataset
if dataset.squeeze().ndim > 1:
raise NotImplementedError("Only implemented for 1D NDDatasets")
# squeeze if necessary
if dataset.ndim > 1:
dataset = dataset.squeeze()
# Make csv file for 1D dataset: first and 2d column are the unique axis and data, respectively
with filename.open("w", newline="") as fid:
writer = csv.writer(fid, delimiter=delimiter)
if dataset.ndim == 1: # if statement for future implementation for ndim > 1....
if dataset.coordset is not None:
col_coord = True
title_1 = dataset.coordset[-1].title
if dataset.coordset[-1].units is not None:
title_1 += " / " + str(dataset.coordset[-1].units)
else:
col_coord = False
if dataset.units is not None:
title_2 = dataset.title + " / " + str(dataset.units)
else:
title_2 = dataset.title
if col_coord:
coltitles = [title_1, title_2]
else:
coltitles = [title_2]
writer.writerow(coltitles)
if col_coord:
for i, data in enumerate(dataset.data):
writer.writerow([dataset.coordset[-1].data[i], data])
else:
for i, data in enumerate(dataset.data):
writer.writerow([data])
return filename
| 34.981982
| 121
| 0.55112
|
cca2affe97dd2ddb6598a8f092f3cddfbc6d281b
| 1,933
|
py
|
Python
|
obfusc8/test/test_obf.py
|
tum-i22/indistinguishability-obfuscation
|
67e5b7f8eedbb65434c252611b303b210c672b5b
|
[
"Apache-2.0"
] | 19
|
2015-05-22T17:41:50.000Z
|
2020-09-09T11:08:53.000Z
|
obfusc8/test/test_obf.py
|
tum-i22/indistinguishability-obfuscation
|
67e5b7f8eedbb65434c252611b303b210c672b5b
|
[
"Apache-2.0"
] | 1
|
2021-03-19T16:07:00.000Z
|
2021-03-19T16:47:26.000Z
|
obfusc8/test/test_obf.py
|
nathanawmk/indistinguishability-obfuscation
|
67e5b7f8eedbb65434c252611b303b210c672b5b
|
[
"Apache-2.0"
] | 4
|
2020-10-13T05:10:09.000Z
|
2021-04-06T07:49:06.000Z
|
import unittest
from itertools import product
from obfusc8.circuit import *
from obfusc8.blocks import UniversalCircuit
from obfusc8.bp import BranchingProgram
from obfusc8.rbp import RandomizedBranchingProgram
from obfusc8.obf import *
class TestFixBP(unittest.TestCase):
def setUp(self):
self.inputLength = 2
outputLength = 1
numberOfGates = 2
inputs = [Input("x"+str(x)) for x in range(0, self.inputLength)]
# (-(x0 & x1) & (-x2 & x3)) & ((x4 & x5) & -(x6 & -x7))
self.circuit = Circuit(NotGate(AndGate(inputs[0], inputs[1])))
uc = UniversalCircuit(self.inputLength, outputLength, numberOfGates)
bp = BranchingProgram.fromCircuit(uc)
self.fixedBP = fixBP(bp, self.circuit)
def test_fixed_bp_same_functionality(self):
for test in list(product([0,1], repeat=self.inputLength)):
test = list(test)
circuitResult = self.circuit.evaluate(test)
bpResult = self.fixedBP.evaluate(test)
self.assertEqual(circuitResult, bpResult, 'Wrong evaluation on input %s. Was %s instead of %s'%(test, circuitResult, bpResult))
class TestFixRBP(unittest.TestCase):
def setUp(self):
self.inputLength = 2
outputLength = 1
numberOfGates = 1
inputs = [Input("x"+str(x)) for x in range(0, self.inputLength)]
# (-(x0 & x1) & (-x2 & x3)) & ((x4 & x5) & -(x6 & -x7))
self.circuit = Circuit(AndGate(inputs[0], inputs[1]))
uc = UniversalCircuit(self.inputLength, outputLength, numberOfGates)
rbp = RandomizedBranchingProgram.fromCircuit(uc, 1049, rndMatSize=1)
self.fixedRBP = fixRBP(rbp, self.circuit)
def test_fixed_rbp_same_functionality(self):
for test in list(product([0,1], repeat=self.inputLength)):
test = list(test)
correct = self.circuit.evaluate(test)
rbpResult = self.fixedRBP.evaluate(test)
self.assertEqual(correct, rbpResult, 'Wrong evaluation on input %s. Was %s instead of %s'%(test, rbpResult, correct))
if __name__ == '__main__':
unittest.main()
| 33.912281
| 130
| 0.712882
|
9a0798f4c76713a0f3e11ce4d9e7f17b32c48b52
| 28,012
|
py
|
Python
|
autotest/gcore/misc.py
|
FeU-aKlos/gdal
|
bba6781133815248c9329842d365f8812b74c33f
|
[
"Apache-2.0"
] | 3,100
|
2015-01-02T10:33:40.000Z
|
2022-03-31T02:06:51.000Z
|
autotest/gcore/misc.py
|
FeU-aKlos/gdal
|
bba6781133815248c9329842d365f8812b74c33f
|
[
"Apache-2.0"
] | 3,496
|
2015-01-06T16:53:30.000Z
|
2022-03-31T20:18:51.000Z
|
autotest/gcore/misc.py
|
FeU-aKlos/gdal
|
bba6781133815248c9329842d365f8812b74c33f
|
[
"Apache-2.0"
] | 2,036
|
2015-01-08T20:22:12.000Z
|
2022-03-31T10:24:08.000Z
|
#!/usr/bin/env pytest
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Various test of GDAL core.
# Author: Even Rouault <even dot rouault at spatialys.com>
#
###############################################################################
# Copyright (c) 2009-2013, Even Rouault <even dot rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import shutil
from osgeo import gdal
import gdaltest
import pytest
###############################################################################
# Test that the constructor of GDALDataset() behaves well with a big number of
# opened/created datasets
def test_misc_1():
tab_ds = [None] * 5000
drv = gdal.GetDriverByName('MEM')
for i, _ in enumerate(tab_ds):
name = 'mem_%d' % i
tab_ds[i] = drv.Create(name, 1, 1, 1)
assert tab_ds[i] is not None
###############################################################################
# Test that OpenShared() works as expected by opening a big number of times
# the same dataset with it. If it did not work, that would exhaust the system
# limit of maximum file descriptors opened at the same time
def test_misc_2():
tab_ds = [None for i in range(5000)]
for i, _ in enumerate(tab_ds):
tab_ds[i] = gdal.OpenShared('data/byte.tif')
assert tab_ds[i] is not None
###############################################################################
# Test OpenShared() with a dataset whose filename != description (#2797)
def test_misc_3():
with gdaltest.error_handler():
ds = gdal.OpenShared('../gdrivers/data/paux/small16.aux')
ds.GetRasterBand(1).Checksum()
cache_size = gdal.GetCacheUsed()
with gdaltest.error_handler():
ds2 = gdal.OpenShared('../gdrivers/data/paux/small16.aux')
ds2.GetRasterBand(1).Checksum()
cache_size2 = gdal.GetCacheUsed()
if cache_size != cache_size2:
print("--> OpenShared didn't work as expected")
ds = None
ds2 = None
###############################################################################
# Test Create() with invalid arguments
def test_misc_4():
gdal.PushErrorHandler('CPLQuietErrorHandler')
# Test a few invalid argument
drv = gdal.GetDriverByName('GTiff')
drv.Create('tmp/foo', 0, 100, 1)
drv.Create('tmp/foo', 100, 1, 1)
drv.Create('tmp/foo', 100, 100, -1)
drv.Delete('tmp/foo')
gdal.PopErrorHandler()
###############################################################################
def get_filename(drv, dirname):
filename = '%s/foo' % dirname
if drv.ShortName == 'GTX':
filename += '.gtx'
elif drv.ShortName == 'RST':
filename += '.rst'
elif drv.ShortName == 'SAGA':
filename += '.sdat'
elif drv.ShortName == 'ADRG':
filename = '%s/ABCDEF01.GEN' % dirname
elif drv.ShortName == 'SRTMHGT':
filename = '%s/N48E002.HGT' % dirname
elif drv.ShortName == 'ECW':
filename += '.ecw'
elif drv.ShortName == 'KMLSUPEROVERLAY':
filename += '.kmz'
elif drv.ShortName == 'RRASTER':
filename += '.grd'
return filename
###############################################################################
# Test Create() with various band numbers (including 0) and datatype
def _misc_5_internal(drv, datatype, nBands):
dirname = 'tmp/tmp/tmp_%s_%d_%s' % (drv.ShortName, nBands, gdal.GetDataTypeName(datatype))
# print('drv = %s, nBands = %d, datatype = %s' % (drv.ShortName, nBands, gdal.GetDataTypeName(datatype)))
try:
os.mkdir(dirname)
except OSError:
try:
os.stat(dirname)
# Hum the directory already exists... Not expected, but let's try to go on
except OSError:
pytest.fail(
'Cannot create %s for drv = %s, nBands = %d, datatype = %s' % (dirname, drv.ShortName, nBands, gdal.GetDataTypeName(datatype))
)
filename = get_filename(drv, dirname)
ds = drv.Create(filename, 100, 100, nBands, datatype)
if ds is not None and not (drv.ShortName == 'GPKG' and nBands == 0):
set_gt = (2, 1.0 / 10, 0, 49, 0, -1.0 / 10)
ds.SetGeoTransform(set_gt)
ds.SetProjection('GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["degree",0.01745329251994328]]')
# PNM and MFF have no SetGeoTransform() method implemented
if drv.ShortName not in ['PNM', 'MFF', 'NULL']:
got_gt = ds.GetGeoTransform()
for i in range(6):
assert got_gt[i] == pytest.approx(set_gt[i], abs=1e-10), \
'Did not get expected GT for drv = %s, nBands = %d, datatype = %s' % (drv.ShortName, nBands, gdal.GetDataTypeName(datatype))
# if ds.RasterCount > 0:
# ds.GetRasterBand(1).Fill(255)
ds = None
ds = gdal.Open(filename)
if ds is None:
# reason = 'Cannot reopen %s for drv = %s, nBands = %d, datatype = %s' % (dirname, drv.ShortName, nBands, gdal.GetDataTypeName(datatype))
# gdaltest.post_reason(reason)
# TODO: Why not return -1?
pass
# else:
# if ds.RasterCount > 0:
# print ds.GetRasterBand(1).Checksum()
ds = None
try:
shutil.rmtree(dirname)
except OSError:
pytest.fail(
'Cannot remove %s for drv = %s, nBands = %d, datatype = %s' % (dirname, drv.ShortName, nBands, gdal.GetDataTypeName(datatype))
)
def test_misc_5():
gdal.PushErrorHandler('CPLQuietErrorHandler')
try:
shutil.rmtree('tmp/tmp')
except OSError:
pass
try:
os.mkdir('tmp/tmp')
except OSError:
try:
os.stat('tmp/tmp')
# Hum the directory already exists... Not expected, but let's try to go on
except OSError:
pytest.fail('Cannot create tmp/tmp')
# This is to speed-up the runtime of tests on EXT4 filesystems
# Do not use this for production environment if you care about data safety
# w.r.t system/OS crashes, unless you know what you are doing.
gdal.SetConfigOption('OGR_SQLITE_SYNCHRONOUS', 'OFF')
# Test Create() with various band numbers, including 0
for i in range(gdal.GetDriverCount()):
drv = gdal.GetDriver(i)
md = drv.GetMetadata()
if drv.ShortName == 'PDF':
# PDF Create() is vector-only
continue
if drv.ShortName == 'MBTiles':
# MBTiles only support some precise resolutions
continue
if 'DCAP_CREATE' in md and 'DCAP_RASTER' in md:
datatype = gdal.GDT_Byte
for nBands in range(6):
_misc_5_internal(drv, datatype, nBands)
for nBands in [1, 3]:
for datatype in (gdal.GDT_UInt16,
gdal.GDT_Int16,
gdal.GDT_UInt32,
gdal.GDT_Int32,
gdal.GDT_Float32,
gdal.GDT_Float64,
gdal.GDT_CInt16,
gdal.GDT_CInt32,
gdal.GDT_CFloat32,
gdal.GDT_CFloat64):
_misc_5_internal(drv, datatype, nBands)
gdal.PopErrorHandler()
###############################################################################
class misc_6_interrupt_callback_class(object):
def __init__(self):
pass
def cbk(self, pct, message, user_data):
# pylint: disable=unused-argument
return pct <= 0.5
###############################################################################
# Test CreateCopy() with a source dataset with various band numbers (including 0) and datatype
def misc_6_internal(datatype, nBands, setDriversDone):
ds = gdal.GetDriverByName('MEM').Create('', 10, 10, nBands, datatype)
if nBands > 0:
ds.GetRasterBand(1).Fill(255)
ds.SetGeoTransform([2, 1.0 / 10, 0, 49, 0, -1.0 / 10])
ds.SetProjection('GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["degree",0.01745329251994328]]')
ds.SetMetadata(['a'])
for i in range(gdal.GetDriverCount()):
drv = gdal.GetDriver(i)
md = drv.GetMetadata()
if ('DCAP_CREATECOPY' in md or 'DCAP_CREATE' in md) and 'DCAP_RASTER' in md:
# print ('drv = %s, nBands = %d, datatype = %s' % (drv.ShortName, nBands, gdal.GetDataTypeName(datatype)))
skip = False
# FIXME: A few cases that crashes and should be investigated
if drv.ShortName == 'JPEG2000':
if (nBands == 2 or nBands >= 5) or \
not (datatype == gdal.GDT_Byte or datatype == gdal.GDT_Int16 or datatype == gdal.GDT_UInt16):
skip = True
if skip is False:
dirname = 'tmp/tmp/tmp_%s_%d_%s' % (drv.ShortName, nBands, gdal.GetDataTypeName(datatype))
try:
os.mkdir(dirname)
except OSError:
try:
os.stat(dirname)
# Hum the directory already exists... Not expected, but let's try to go on
except OSError:
reason = 'Cannot create %s before drv = %s, nBands = %d, datatype = %s' % (dirname, drv.ShortName, nBands, gdal.GetDataTypeName(datatype))
pytest.fail(reason)
filename = get_filename(drv, dirname)
dst_ds = drv.CreateCopy(filename, ds)
has_succeeded = dst_ds is not None
if dst_ds:
# check that domain == None doesn't crash
dst_ds.GetMetadata(None)
dst_ds.GetMetadataItem('', None)
dst_ds = None
size = 0
stat = gdal.VSIStatL(filename)
if stat is not None:
size = stat.size
try:
shutil.rmtree(dirname)
except OSError:
reason = 'Cannot remove %s after drv = %s, nBands = %d, datatype = %s' % (dirname, drv.ShortName, nBands, gdal.GetDataTypeName(datatype))
pytest.fail(reason)
if has_succeeded and drv.ShortName not in setDriversDone and nBands > 0:
setDriversDone.add(drv.ShortName)
# The first list of drivers fail to detect short writing
# The second one is because they are verbose in stderr
if 'DCAP_VIRTUALIO' in md and size != 0 and \
drv.ShortName not in ['JPEG2000', 'KMLSUPEROVERLAY', 'HF2', 'ZMap', 'DDS'] and \
drv.ShortName not in ['GIF', 'JP2ECW', 'JP2Lura']:
for j in range(10):
truncated_size = (size * j) / 10
vsimem_filename = ('/vsimem/test_truncate/||maxlength=%d||' % truncated_size) + get_filename(drv, '')[1:]
# print('drv = %s, nBands = %d, datatype = %s, truncated_size = %d' % (drv.ShortName, nBands, gdal.GetDataTypeName(datatype), truncated_size))
dst_ds = drv.CreateCopy(vsimem_filename, ds)
error_detected = False
if dst_ds is None:
error_detected = True
else:
gdal.ErrorReset()
dst_ds = None
if gdal.GetLastErrorMsg() != '':
error_detected = True
if not error_detected:
msg = 'write error not decteded with with drv = %s, nBands = %d, datatype = %s, truncated_size = %d' % (drv.ShortName, nBands, gdal.GetDataTypeName(datatype), truncated_size)
print(msg)
gdaltest.post_reason(msg)
fl = gdal.ReadDirRecursive('/vsimem/test_truncate')
if fl is not None:
for myf in fl:
gdal.Unlink('/vsimem/test_truncate/' + myf)
fl = gdal.ReadDirRecursive('/vsimem/test_truncate')
if fl is not None:
print(fl)
if drv.ShortName not in ['ECW', 'JP2ECW', 'VRT', 'XPM', 'JPEG2000', 'FIT', 'RST', 'INGR', 'USGSDEM', 'KMLSUPEROVERLAY', 'GMT']:
dst_ds = drv.CreateCopy(filename, ds, callback=misc_6_interrupt_callback_class().cbk)
if dst_ds is not None:
dst_ds = None
try:
shutil.rmtree(dirname)
except OSError:
pass
pytest.fail('interruption did not work with drv = %s, nBands = %d, datatype = %s' % (drv.ShortName, nBands, gdal.GetDataTypeName(datatype)))
dst_ds = None
try:
shutil.rmtree(dirname)
except OSError:
pass
try:
os.mkdir(dirname)
except OSError:
reason = 'Cannot create %s before drv = %s, nBands = %d, datatype = %s' % (dirname, drv.ShortName, nBands, gdal.GetDataTypeName(datatype))
pytest.fail(reason)
ds = None
def test_misc_6():
gdal.PushErrorHandler('CPLQuietErrorHandler')
try:
shutil.rmtree('tmp/tmp')
except OSError:
pass
try:
os.mkdir('tmp/tmp')
except OSError:
try:
os.stat('tmp/tmp')
# Hum the directory already exists... Not expected, but let's try to go on
except OSError:
pytest.fail('Cannot create tmp/tmp')
# This is to speed-up the runtime of tests on EXT4 filesystems
# Do not use this for production environment if you care about data safety
# w.r.t system/OS crashes, unless you know what you are doing.
gdal.SetConfigOption('OGR_SQLITE_SYNCHRONOUS', 'OFF')
datatype = gdal.GDT_Byte
setDriversDone = set()
for nBands in range(6):
ret = misc_6_internal(datatype, nBands, setDriversDone)
if ret != 'success':
gdal.PopErrorHandler()
return ret
nBands = 1
for datatype in (gdal.GDT_UInt16,
gdal.GDT_Int16,
gdal.GDT_UInt32,
gdal.GDT_Int32,
gdal.GDT_Float32,
gdal.GDT_Float64,
gdal.GDT_CInt16,
gdal.GDT_CInt32,
gdal.GDT_CFloat32,
gdal.GDT_CFloat64):
ret = misc_6_internal(datatype, nBands, setDriversDone)
if ret != 'success':
gdal.PopErrorHandler()
return ret
gdal.PopErrorHandler()
###############################################################################
# Test gdal.InvGeoTransform()
def test_misc_7():
gt = (10, 0.1, 0, 20, 0, -1.0)
res = gdal.InvGeoTransform(gt)
expected_inv_gt = (-100.0, 10.0, 0.0, 20.0, 0.0, -1.0)
for i in range(6):
assert res[i] == pytest.approx(expected_inv_gt[i], abs=1e-6), res
gt = (10, 1, 1, 20, 2, 2)
res = gdal.InvGeoTransform(gt)
assert not res
gt = (10, 1e10, 1e10, 20, 2e10, 2e10)
res = gdal.InvGeoTransform(gt)
assert not res
gt = (10, 1e-10, 1e-10, 20, 2e-10, 2e-10)
res = gdal.InvGeoTransform(gt)
assert not res
# Test fix for #1615
gt = (-2, 1e-8, 1e-9, 52, 1e-9, -1e-8)
res = gdal.InvGeoTransform(gt)
expected_inv_gt = (-316831683.16831684, 99009900.990099, 9900990.099009901,
5168316831.683168, 9900990.099009901, -99009900.990099)
for i in range(6):
assert res[i] == pytest.approx(expected_inv_gt[i], abs=1e-6), res
res2 = gdal.InvGeoTransform(res)
for i in range(6):
assert res2[i] == pytest.approx(gt[i], abs=1e-6), res2
###############################################################################
# Test gdal.ApplyGeoTransform()
def test_misc_8():
try:
gdal.ApplyGeoTransform
except AttributeError:
pytest.skip()
gt = (10, 0.1, 0, 20, 0, -1.0)
res = gdal.ApplyGeoTransform(gt, 10, 1)
assert res == [11.0, 19.0]
###############################################################################
# Test setting and retrieving > 2 GB values for GDAL max cache (#3689)
def test_misc_9():
old_val = gdal.GetCacheMax()
gdal.SetCacheMax(3000000000)
ret_val = gdal.GetCacheMax()
gdal.SetCacheMax(old_val)
assert ret_val == 3000000000, 'did not get expected value'
###############################################################################
# Test VSIBufferedReaderHandle (fix done in r21358)
def test_misc_10():
try:
os.remove('data/byte.tif.gz.properties')
except OSError:
pass
f = gdal.VSIFOpenL('/vsigzip/./data/byte.tif.gz', 'rb')
gdal.VSIFReadL(1, 1, f)
gdal.VSIFSeekL(f, 0, 2)
gdal.VSIFSeekL(f, 0, 0)
data = gdal.VSIFReadL(1, 4, f)
gdal.VSIFCloseL(f)
import struct
ar = struct.unpack('B' * 4, data)
assert ar == (73, 73, 42, 0)
try:
os.remove('data/byte.tif.gz.properties')
except OSError:
pass
###############################################################################
# Test that we can open a symlink whose pointed filename isn't a real
# file, but a filename that GDAL recognizes
def test_misc_11():
if not gdaltest.support_symlink():
pytest.skip()
gdal.Unlink('tmp/symlink.tif')
os.symlink('GTIFF_DIR:1:data/byte.tif', 'tmp/symlink.tif')
ds = gdal.Open('tmp/symlink.tif')
if ds is None:
os.remove('tmp/symlink.tif')
pytest.fail()
desc = ds.GetDescription()
ds = None
os.remove('tmp/symlink.tif')
assert desc == 'GTIFF_DIR:1:data/byte.tif', 'did not get expected description'
###############################################################################
# Test CreateCopy() with a target filename in a non-existing dir
def test_misc_12():
if int(gdal.VersionInfo('VERSION_NUM')) < 1900:
pytest.skip('would crash')
import test_cli_utilities
gdal_translate_path = test_cli_utilities.get_gdal_translate_path()
for i in range(gdal.GetDriverCount()):
drv = gdal.GetDriver(i)
md = drv.GetMetadata()
if ('DCAP_CREATECOPY' in md or 'DCAP_CREATE' in md) and 'DCAP_RASTER' in md:
nbands = 1
if drv.ShortName == 'WEBP' or drv.ShortName == 'ADRG':
nbands = 3
datatype = gdal.GDT_Byte
if drv.ShortName == 'BT' or drv.ShortName == 'BLX':
datatype = gdal.GDT_Int16
elif drv.ShortName == 'GTX' or drv.ShortName == 'NTv2' or drv.ShortName == 'Leveller':
datatype = gdal.GDT_Float32
size = 1201
if drv.ShortName == 'BLX':
size = 128
src_ds = gdal.GetDriverByName('GTiff').Create('/vsimem/misc_12_src.tif', size, size, nbands, datatype)
set_gt = (2, 1.0 / size, 0, 49, 0, -1.0 / size)
src_ds.SetGeoTransform(set_gt)
src_ds.SetProjection('GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["degree",0.01745329251994328]]')
# Test to detect crashes
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = drv.CreateCopy('/nonexistingpath' + get_filename(drv, ''), src_ds)
gdal.PopErrorHandler()
if ds is None and gdal.GetLastErrorMsg() == '':
gdal.Unlink('/vsimem/misc_12_src.tif')
pytest.fail('CreateCopy() into non existing dir fails without error message for driver %s' % drv.ShortName)
ds = None
if gdal_translate_path is not None:
# Test to detect memleaks
ds = gdal.GetDriverByName('VRT').CreateCopy('tmp/misc_12.vrt', src_ds)
(out, _) = gdaltest.runexternal_out_and_err(gdal_translate_path + ' -of ' + drv.ShortName + ' tmp/misc_12.vrt /nonexistingpath/' + get_filename(drv, ''), check_memleak=False)
del ds
gdal.Unlink('tmp/misc_12.vrt')
# If DEBUG_VSIMALLOC_STATS is defined, this is an easy way
# to catch some memory leaks
if out.find('VSIMalloc + VSICalloc - VSIFree') != -1 and \
out.find('VSIMalloc + VSICalloc - VSIFree : 0') == -1:
if drv.ShortName == 'Rasterlite' and out.find('VSIMalloc + VSICalloc - VSIFree : 1') != -1:
pass
else:
print('memleak detected for driver %s' % drv.ShortName)
src_ds = None
gdal.Unlink('/vsimem/misc_12_src.tif')
###############################################################################
# Test CreateCopy() with incompatible driver types (#5912)
def test_misc_13():
# Raster-only -> vector-only
ds = gdal.Open('data/byte.tif')
gdal.PushErrorHandler()
out_ds = gdal.GetDriverByName('ESRI Shapefile').CreateCopy('/vsimem/out.shp', ds)
gdal.PopErrorHandler()
assert out_ds is None
# Raster-only -> vector-only
ds = gdal.OpenEx('../ogr/data/poly.shp', gdal.OF_VECTOR)
gdal.PushErrorHandler()
out_ds = gdal.GetDriverByName('GTiff').CreateCopy('/vsimem/out.tif', ds)
gdal.PopErrorHandler()
assert out_ds is None
###############################################################################
# Test ConfigureLogging()
def test_misc_14():
import collections
import logging
class MockLoggingHandler(logging.Handler):
def __init__(self, *args, **kwargs):
super(MockLoggingHandler, self).__init__(*args, **kwargs)
self.messages = collections.defaultdict(list)
def emit(self, record):
self.messages[record.levelname].append(record.getMessage())
logger = logging.getLogger('gdal_logging_test')
logger.setLevel(logging.DEBUG)
logger.propagate = False
handler = MockLoggingHandler(level=logging.DEBUG)
logger.addHandler(handler)
prev_debug = gdal.GetConfigOption("CPL_DEBUG")
try:
gdal.ConfigurePythonLogging(logger_name='gdal_logging_test', enable_debug=True)
assert gdal.GetConfigOption("CPL_DEBUG") == "ON", "should have enabled debug"
gdal.Debug("test1", "debug1")
gdal.Error(gdal.CE_Debug, gdal.CPLE_FileIO, "debug2")
gdal.Error(gdal.CE_None, gdal.CPLE_AppDefined, "info1")
gdal.Error(gdal.CE_Warning, gdal.CPLE_AssertionFailed, "warning1")
gdal.Error(gdal.CE_Failure, 99999, "error1")
expected = {
'DEBUG': ["test1: debug1", "FileIO: debug2"],
'INFO': ["AppDefined: info1"],
'WARNING': ["AssertionFailed: warning1"],
'ERROR': ["99999: error1"],
}
assert handler.messages == expected, "missing log messages"
gdal.SetErrorHandler('CPLDefaultErrorHandler')
handler.messages.clear()
gdal.SetConfigOption('CPL_DEBUG', "OFF")
gdal.ConfigurePythonLogging(logger_name='gdal_logging_test')
assert gdal.GetConfigOption("CPL_DEBUG") == "OFF", \
"shouldn't have enabled debug"
# these get suppressed by CPL_DEBUG
gdal.Debug("test1", "debug3")
# these don't
gdal.Error(gdal.CE_Debug, gdal.CPLE_None, "debug4")
assert handler.messages['DEBUG'] == ['debug4'], "unexpected log messages"
finally:
gdal.SetErrorHandler('CPLDefaultErrorHandler')
gdal.SetConfigOption('CPL_DEBUG', prev_debug)
logger.removeHandler(handler)
###############################################################################
# Test SetErrorHandler
def test_misc_15():
messages0 = []
def handle0(ecls, ecode, emsg):
messages0.append((ecls, ecode, emsg))
messages1 = []
def handle1(ecls, ecode, emsg):
messages1.append((ecls, ecode, emsg))
prev_debug = gdal.GetConfigOption("CPL_DEBUG")
try:
gdal.SetErrorHandler(handle0)
gdal.SetConfigOption('CPL_DEBUG', "ON")
gdal.Debug('foo', 'bar')
gdal.Error(gdal.CE_Debug, gdal.CPLE_FileIO, "debug2")
gdal.Error(gdal.CE_None, gdal.CPLE_AppDefined, "info1")
gdal.Error(gdal.CE_Warning, gdal.CPLE_AssertionFailed, "warning1")
gdal.Error(gdal.CE_Failure, 99999, "error1")
expected0 = [
(gdal.CE_Debug, 0, 'foo: bar'),
(gdal.CE_Debug, gdal.CPLE_FileIO, "debug2"),
(gdal.CE_None, gdal.CPLE_AppDefined, "info1"),
(gdal.CE_Warning, gdal.CPLE_AssertionFailed, "warning1"),
(gdal.CE_Failure, 99999, "error1"),
]
assert expected0 == messages0, "SetErrorHandler: mismatched log messages"
messages0[:] = []
# Check Push
gdal.PushErrorHandler(handle1)
gdal.SetConfigOption("CPL_DEBUG", "OFF")
gdal.Error(gdal.CE_Debug, gdal.CPLE_FileIO, "debug2")
gdal.Error(gdal.CE_None, gdal.CPLE_AppDefined, "info1")
gdal.Error(gdal.CE_Warning, gdal.CPLE_AssertionFailed, "warning1")
gdal.Error(gdal.CE_Failure, 99999, "error1")
assert len(messages0) == 0, "PushErrorHandler: unexpected log messages"
assert len(messages1) == 4, "PushErrorHandler: missing log messages"
# and pop restores original behaviour
gdal.PopErrorHandler()
messages1[:] = []
gdal.Error(gdal.CE_Debug, gdal.CPLE_FileIO, "debug2")
gdal.Error(gdal.CE_None, gdal.CPLE_AppDefined, "info1")
gdal.Error(gdal.CE_Warning, gdal.CPLE_AssertionFailed, "warning1")
gdal.Error(gdal.CE_Failure, 99999, "error1")
assert len(messages0) == 4, "PopErrorHandler: missing log messages"
assert len(messages1) == 0, "PopErrorHandler: unexpected log messages"
finally:
gdal.SetErrorHandler('CPLDefaultErrorHandler')
gdal.SetConfigOption('CPL_DEBUG', prev_debug)
###############################################################################
def test_misc_cleanup():
try:
shutil.rmtree('tmp/tmp')
except OSError:
pass
| 37.151194
| 206
| 0.548051
|
9004dbf47d4a26d57528dd103dacfb4b8b5d4449
| 687
|
py
|
Python
|
Preprocessing/data_preprocessing_template.py
|
mayurjainf007/Preprocesssing-Words
|
1c83e2f6fc1d14919fb0b45ddca6267b4d679f6b
|
[
"MIT"
] | null | null | null |
Preprocessing/data_preprocessing_template.py
|
mayurjainf007/Preprocesssing-Words
|
1c83e2f6fc1d14919fb0b45ddca6267b4d679f6b
|
[
"MIT"
] | null | null | null |
Preprocessing/data_preprocessing_template.py
|
mayurjainf007/Preprocesssing-Words
|
1c83e2f6fc1d14919fb0b45ddca6267b4d679f6b
|
[
"MIT"
] | null | null | null |
# Data Preprocessing Template
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Data.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 3].values
# Splitting the dataset into the Training set and Test set
#from sklearn.cross_validation import train_test_split
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""
| 29.869565
| 93
| 0.77147
|
7bca1108d23e43907d3f6e3c3d7cc2d0ea749bca
| 1,273
|
py
|
Python
|
model/review_playstore.py
|
alfredolp30/bot-reviews
|
5329b555574b76a9195c4b04f93fa7ec256c3707
|
[
"MIT"
] | 1
|
2022-01-23T11:29:34.000Z
|
2022-01-23T11:29:34.000Z
|
model/review_playstore.py
|
alfredolp30/bot-reviews
|
5329b555574b76a9195c4b04f93fa7ec256c3707
|
[
"MIT"
] | null | null | null |
model/review_playstore.py
|
alfredolp30/bot-reviews
|
5329b555574b76a9195c4b04f93fa7ec256c3707
|
[
"MIT"
] | null | null | null |
from datetime import datetime
class ReviewPlayStore:
def __init__(self, id: str, appId: str, appName: str, appVersion: str, url: str, author: str, date: datetime, description: str, rating: int, iconUrl: str, device: str, deviceProductName: str, androidOsVersion: str):
self.id = id
self.appId = appId
self.appName = appName
self.appVersion = appVersion
self.url = url
self.author = author
self.date = date
self.description = description
self.rating = rating
self.iconUrl = iconUrl
self.device = device
self.deviceProductName = deviceProductName
self.androidOsVersion = androidOsVersion
def asJson(self) -> dict:
return {
'id': self.id,
'appId': self.appId,
'appName': self.appName,
'appVersion': self.appVersion,
'url': self.url,
'author': self.author,
'date': int(self.date.timestamp()),
'description': self.description,
'rating': self.rating,
'iconUrl': self.iconUrl,
'device': self.device,
'deviceProductName': self.deviceProductName,
'androidOsVersion': self.androidOsVersion
}
| 35.361111
| 219
| 0.584446
|
1d0f245283b27627a57af08cc32885cf74a00e45
| 2,511
|
py
|
Python
|
setup.py
|
5l1v3r1/AtomShields
|
e75f25393b4a7a315ec96bf9b8e654cb2200866a
|
[
"Apache-2.0"
] | 13
|
2019-01-28T03:48:59.000Z
|
2021-02-03T12:57:11.000Z
|
setup.py
|
ElevenPaths/AtomShields
|
e75f25393b4a7a315ec96bf9b8e654cb2200866a
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
ElevenPaths/AtomShields
|
e75f25393b4a7a315ec96bf9b8e654cb2200866a
|
[
"Apache-2.0"
] | 4
|
2019-03-28T15:51:15.000Z
|
2021-02-01T00:51:51.000Z
|
# -*- coding: utf-8 -*-
u"""
Copyright 2018 ElevenPaths - Telefonica Digital España
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from setuptools import find_packages
from distutils.core import setup
from distutils.command.install import install as _install
def read_file(filename):
with open(filename) as f:
return f.read()
# (╯ರ ~ ರ)╯︵ ┻━┻
# For compatibility: Install dependencies directly via pip
import pip
pip.main(["install"] + read_file('requirements.txt').splitlines())
package_name = 'atomshields'
version = read_file('VERSION').strip()
setup(
name = package_name,
version = version,
install_requires=read_file('requirements.txt').splitlines(),
packages = find_packages(),
author = 'ElevenPaths',
description = "Security testing framework for repositories and source code.",
long_description=open('README.rst').read(),
author_email = 'diego.fernandez@11paths.com, david.amrani@11paths.com',
url = 'https://github.com/ElevenPaths/AtomShields',
project_urls={
"Documentation": "https://atomshields.readthedocs.io",
"Source Code": "https://github.com/ElevenPaths/AtomShields",
},
download_url = 'https://github.com/ElevenPaths/AtomShields/tarball/' + version,
keywords = 'security, source code, analysis',
license='Apache 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Other Audience',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing'
],
)
# Setup AtomShields
import os
from atomshields.scanner import AtomShieldsScanner
AtomShieldsScanner.setup()
if not os.path.isfile(AtomShieldsScanner.CONFIG_PATH):
AtomShieldsScanner.generateConfig(show = False)
| 33.48
| 81
| 0.725209
|
b8329d02eac6a00bba9190a63467ebf4b09b54cd
| 1,136
|
py
|
Python
|
tests/test_util.py
|
cldf/cltoolkit
|
10c2692bd5370d042fc70494a0b948fea8e6a58d
|
[
"MIT"
] | 2
|
2021-08-08T00:16:58.000Z
|
2021-12-31T19:55:40.000Z
|
tests/test_util.py
|
cldf/cltoolkit
|
10c2692bd5370d042fc70494a0b948fea8e6a58d
|
[
"MIT"
] | 6
|
2021-08-02T09:33:51.000Z
|
2022-03-22T19:46:14.000Z
|
tests/test_util.py
|
cldf/cltoolkit
|
10c2692bd5370d042fc70494a0b948fea8e6a58d
|
[
"MIT"
] | null | null | null |
from lingpy.basictypes import lists
from cltoolkit.models import Form
from cltoolkit.util import (
identity,
jaccard,
iter_syllables,
valid_sounds,
DictTuple,
datasets_by_id,
)
def test_datasets_by_id(tests_dir):
assert len(datasets_by_id('wangbcd', base_dir=tests_dir)) == 1
def test_DictTuple():
class C:
id = 5
d = DictTuple(list('abcde'), key=identity)
assert 'a' in d
assert d['a'] == d[0]
assert d.get('x', 5) == 5
d = DictTuple([C()])
assert C() in d
assert 5 in d
def test_valid_sounds(clts):
sounds = [clts.bipa[x] for x in ["_", "+", "a:", "b", "+", "_", "+", "c", "_", "_"]]
assert valid_sounds(sounds)[0] == "aː"
assert valid_sounds([]) == []
assert valid_sounds([clts.bipa['a'], clts.bipa['_'], clts.bipa['b']]) == ['a', '+', 'b']
def test_identity():
assert identity("x") == "x"
def test_jaccard():
assert jaccard(set([1, 2]), set([1, 2])) == 1
assert jaccard(set([]), set([])) == 0
def test_syllables():
form = Form(id="test", sounds=lists("t a k + t a k"))
assert len(list(iter_syllables(form))) == 2
| 22.27451
| 92
| 0.581866
|
5c5f284c24559477af23902218fc4effabd8b8b2
| 818
|
py
|
Python
|
utils/logger.py
|
ardihikaru/learn-to-cluster
|
d7a5ea0946f7b402f8878bfd608bf3e0dc9a26ca
|
[
"MIT"
] | 620
|
2019-04-16T01:06:59.000Z
|
2022-03-27T15:15:45.000Z
|
utils/logger.py
|
ardihikaru/learn-to-cluster
|
d7a5ea0946f7b402f8878bfd608bf3e0dc9a26ca
|
[
"MIT"
] | 83
|
2019-04-29T08:55:16.000Z
|
2022-03-11T09:27:16.000Z
|
utils/logger.py
|
ardihikaru/learn-to-cluster
|
d7a5ea0946f7b402f8878bfd608bf3e0dc9a26ca
|
[
"MIT"
] | 141
|
2019-04-16T08:53:02.000Z
|
2022-03-14T08:49:37.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
def create_logger(name='global_logger', log_file=None):
""" use different log level for file and stream
"""
logger = logging.getLogger(name)
formatter = logging.Formatter('[%(asctime)s] %(message)s')
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
logger.addHandler(sh)
if log_file is not None:
fh = logging.FileHandler(log_file)
fh.setFormatter(formatter)
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
return logger
if __name__ == '__main__':
logger = create_logger('test')
logger = create_logger('test', 'log.txt')
logger.info('output to file and stream')
logger.debug('output to file')
| 24.787879
| 62
| 0.662592
|
f82b81fe3b5b99f550cbdba8236159603661dc0b
| 2,477
|
py
|
Python
|
pytorch3d/ops/mesh_face_areas_normals.py
|
hangg7/pytorcg3d
|
f7f363eeb8efeba0927f674c83ab927ad8ce3e32
|
[
"BSD-3-Clause"
] | 1
|
2020-07-13T12:40:42.000Z
|
2020-07-13T12:40:42.000Z
|
pytorch3d/ops/mesh_face_areas_normals.py
|
hangg7/pytorch3d
|
f7f363eeb8efeba0927f674c83ab927ad8ce3e32
|
[
"BSD-3-Clause"
] | null | null | null |
pytorch3d/ops/mesh_face_areas_normals.py
|
hangg7/pytorch3d
|
f7f363eeb8efeba0927f674c83ab927ad8ce3e32
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import torch
from pytorch3d import _C
from torch.autograd import Function
from torch.autograd.function import once_differentiable
class _MeshFaceAreasNormals(Function):
"""
Torch autograd Function wrapper for face areas & normals C++/CUDA implementations.
"""
@staticmethod
def forward(ctx, verts, faces):
"""
Args:
ctx: Context object used to calculate gradients.
verts: FloatTensor of shape (V, 3), representing the packed
batch verts tensor.
faces: LongTensor of shape (F, 3), representing the packed
batch faces tensor
Returns:
areas: FloatTensor of shape (F,) with the areas of each face
normals: FloatTensor of shape (F,3) with the normals of each face
"""
if not (verts.dim() == 2):
raise ValueError('verts need to be of shape Vx3.')
if not (verts.shape[1] == 3):
raise ValueError('verts need to be of shape Vx3.')
if not (faces.dim() == 2):
raise ValueError('faces need to be of shape Fx3.')
if not (faces.shape[1] == 3):
raise ValueError('faces need to be of shape Fx3.')
if not (faces.dtype == torch.int64):
raise ValueError('faces need to be of type torch.int64.')
# TODO(gkioxari) Change cast to floats once we add support for doubles.
if not (verts.dtype == torch.float32):
verts = verts.float()
ctx.save_for_backward(verts, faces)
areas, normals = _C.face_areas_normals_forward(verts, faces)
return areas, normals
@staticmethod
@once_differentiable
def backward(ctx, grad_areas, grad_normals):
grad_areas = grad_areas.contiguous()
grad_normals = grad_normals.contiguous()
verts, faces = ctx.saved_tensors
# TODO(gkioxari) Change cast to floats once we add support for doubles.
if not (grad_areas.dtype == torch.float32):
grad_areas = grad_areas.float()
if not (grad_normals.dtype == torch.float32):
grad_normals = grad_normals.float()
grad_verts = _C.face_areas_normals_backward(
grad_areas, grad_normals, verts, faces
)
return grad_verts, None
# pyre-fixme[16]: `_MeshFaceAreasNormals` has no attribute `apply`.
mesh_face_areas_normals = _MeshFaceAreasNormals.apply
| 38.703125
| 86
| 0.636657
|
51c95ea7ea66f2af5ac9a3edbdfca354e113c678
| 7,135
|
py
|
Python
|
model.py
|
tuan3w/cnn_vocoder
|
7855be5174bb61a38c4f7c7c482365d460f93a2a
|
[
"MIT"
] | 80
|
2018-10-09T02:56:10.000Z
|
2021-07-12T10:08:27.000Z
|
model.py
|
HoltTechnologyCorporation/cnn_vocoder
|
7855be5174bb61a38c4f7c7c482365d460f93a2a
|
[
"MIT"
] | 10
|
2018-10-09T02:57:46.000Z
|
2020-06-20T12:39:51.000Z
|
model.py
|
HoltTechnologyCorporation/cnn_vocoder
|
7855be5174bb61a38c4f7c7c482365d460f93a2a
|
[
"MIT"
] | 16
|
2018-10-09T02:56:29.000Z
|
2021-01-03T21:50:01.000Z
|
import torch
import torch.nn.functional as F
from torch import nn
class ResnetBlock(nn.Module):
"""Residual Block
Args:
in_channels (int): number of channels in input data
out_channels (int): number of channels in output
"""
def __init__(self, in_channels, out_channels, kernel_size=3, one_d=False):
super(ResnetBlock, self).__init__()
self.build_conv_block(in_channels, out_channels, one_d, kernel_size=kernel_size)
def build_conv_block(self, in_channels, out_channels, one_d, kernel_size=3):
padding = (kernel_size -1)//2
if not one_d:
conv = nn.Conv2d
norm = nn.BatchNorm2d
else:
conv = nn.Conv1d
norm = nn.BatchNorm1d
self.conv1 = nn.Sequential(
conv(in_channels, out_channels, kernel_size=kernel_size, padding=padding),
norm(out_channels),
nn.ELU()
)
self.conv2 = nn.Sequential(
conv(out_channels, out_channels, kernel_size=kernel_size, padding=padding),
norm(out_channels),
)
if in_channels != out_channels:
self.down = nn.Sequential(
conv(in_channels, out_channels, kernel_size=1, bias=False),
norm(out_channels)
)
else:
self.down = None
self.act = nn.ELU()
def forward(self, x):
"""
Args:
x (Tensor): B x C x T
"""
residual = x
out = self.conv1(x)
out = self.conv2(out)
if self.down is not None:
residual = self.down(residual)
return self.act(out + residual)
class UpsamplingLayer(nn.Module):
"""Applies 1D upsampling operator over input tensor.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
residuals (int, optional): number of residual blocks. Default=0
"""
def __init__(self, in_channels, out_channels, residuals=0):
super(UpsamplingLayer, self).__init__()
# TODO: try umsampling with bilinear interpolation
self.upsample = nn.Upsample(scale_factor=2, mode='linear')
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=3, padding=1)
torch.nn.init.xavier_uniform_(self.conv.weight)
self.bn = nn.BatchNorm1d(out_channels)
self.act = nn.ELU()
if residuals != 0:
# resnet blocks
layers = []
for _ in range(residuals):
layers.append(
ResnetBlock(out_channels, out_channels, one_d=True)
)
self.res_blocks = nn.Sequential(*layers)
else:
self.res_blocks = None
def forward(self, x):
"""
Args:
x (Tensor): B x in_channels x T
Returns:
Tensor of shape (B, out_channels, T x 2)
"""
# upsample network
B, C, T = x.shape
# upsample
# x = x.unsqueeze(dim=3)
# x = F.upsample(x, size=(T*2, 1), mode='bilinear').squeeze(3)
x = self.upsample(x)
# x = self.pad(x)
x = self.conv(x)
x = self.bn(x)
x = self.act(x)
# pass through resnet blocks to improve internal representations
# of data
if self.res_blocks != None:
x = self.res_blocks(x)
return x
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, padding=1):
super(ConvBlock, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding)
self.bn = nn.BatchNorm2d(out_channels)
self.act = nn.ELU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.act(x)
return x
class Head(nn.Module):
"""Head module
Args:
channels (list): list of #channels in each upsampling layer
pre_residuals (int, optional): number of residual blocks before upsampling. Default: 64
down_conv_channels (list): list of #channels in each down_conv blocks
up_residuals (int, optional): number of residual blocks in each upsampling module. Default: 0
"""
def __init__(self, channels,
pre_residuals=64,
pre_conv_channels=[64, 32, 16, 8, 4],
up_residuals=0,
post_residuals=2):
super(Head, self).__init__()
pre_convs = []
c0 = pre_conv_channels[0]
pre_convs.append(ConvBlock(1, c0, kernel_size=3, padding=1))
for _ in range(pre_residuals):
pre_convs.append(ResnetBlock(c0, c0))
for i in range(len(pre_conv_channels) -1):
in_c = pre_conv_channels[i]
out_c = pre_conv_channels[i + 1]
pre_convs.append(ResnetBlock(in_c, out_c))
for _ in range(pre_residuals):
pre_convs.append(ResnetBlock(out_c, out_c))
self.pre_conv = nn.Sequential(*pre_convs)
up_layers = []
for i in range(len(channels) - 1):
in_channels = channels[i]
out_channels = channels[i + 1]
layer = UpsamplingLayer(in_channels, out_channels, residuals=up_residuals)
up_layers.append(layer)
self.upsampling = nn.Sequential(*up_layers)
post_convs = []
last_channels = channels[-1]
for i in range(post_residuals):
post_convs.append(ResnetBlock(last_channels, last_channels, one_d=True, kernel_size=5))
self.post_conv = nn.Sequential(*post_convs)
def forward(self, x):
"""
forward pass
Args:
x (Tensor): B x C x T
Returns:
Tensor: B x C x (2^#channels * T)
"""
x = x.unsqueeze(1) # reshape to [B x 1 x C x T]
x = self.pre_conv(x)
s1, _, _, s4 = x.shape
x = x.reshape(s1, -1, s4)
x = self.upsampling(x)
x2 = self.post_conv(x)
return x, x2
DEFAULT_LAYERS_PARAMS = [80, 128, 128, 64, 64, 32, 16, 8, 1]
class CNNVocoder(nn.Module):
"""CNN Vocoder
Args:
n_heads (int): Number of heads
layer_channels (list): list of #channels of each layer
"""
def __init__(self, n_heads=3,
layer_channels=DEFAULT_LAYERS_PARAMS,
pre_conv_channels=[64, 32, 16, 8, 4],
pre_residuals=64,
up_residuals=0,
post_residuals=3):
super(CNNVocoder, self).__init__()
self.head = Head(layer_channels,
pre_conv_channels=pre_conv_channels,
pre_residuals=pre_residuals, up_residuals=up_residuals,
post_residuals=post_residuals)
self.linear = nn.Linear(layer_channels[-1], 1)
self.act_fn = nn.Softsign()
def forward(self, x):
b = x.shape[0]
pre, post = self.head(x)
rs0 = self.linear(pre.transpose(1, 2))
rs0 = self.act_fn(rs0).squeeze(-1)
rs1 = self.linear(post.transpose(1, 2))
rs1 = self.act_fn(rs1).squeeze(-1)
return rs0, rs1
| 33.032407
| 101
| 0.578416
|
ab1cf6ca44d16c957dd36043d44ab5ec9974c986
| 6,421
|
py
|
Python
|
download.py
|
qmeeus/SSGAN-Tensorflow
|
6f359581b188b4420a57f6746b226086454d9ab9
|
[
"MIT"
] | 2
|
2018-07-25T02:23:53.000Z
|
2018-09-11T05:31:09.000Z
|
download.py
|
qmeeus/SSGAN-Tensorflow
|
6f359581b188b4420a57f6746b226086454d9ab9
|
[
"MIT"
] | null | null | null |
download.py
|
qmeeus/SSGAN-Tensorflow
|
6f359581b188b4420a57f6746b226086454d9ab9
|
[
"MIT"
] | 1
|
2020-04-19T22:29:33.000Z
|
2020-04-19T22:29:33.000Z
|
from __future__ import print_function
import os
import tarfile
import subprocess
import argparse
import h5py
import numpy as np
parser = argparse.ArgumentParser(description='Download dataset for SSGAN.')
parser.add_argument('--datasets', metavar='N', type=str, nargs='+', choices=['MNIST', 'SVHN', 'CIFAR10'])
def prepare_h5py(train_image, train_label, test_image, test_label, data_dir, shape=None):
image = np.concatenate((train_image, test_image), axis=0).astype(np.uint8)
label = np.concatenate((train_label, test_label), axis=0).astype(np.uint8)
print('Preprocessing data...')
import progressbar
bar = progressbar.ProgressBar(maxval=100,
widgets=[progressbar.Bar('=', '[', ']'), ' ',
progressbar.Percentage()])
bar.start()
f = h5py.File(os.path.join(data_dir, 'data.hy'), 'w')
data_id = open(os.path.join(data_dir,'id.txt'), 'w')
for i in range(image.shape[0]):
if i%(image.shape[0]/100)==0:
bar.update(i/(image.shape[0]/100))
grp = f.create_group(str(i))
data_id.write(str(i)+'\n')
if shape:
grp['image'] = np.reshape(image[i], shape, order='F')
else:
grp['image'] = image[i]
label_vec = np.zeros(10)
label_vec[label[i]%10] = 1
grp['label'] = label_vec.astype(np.bool)
bar.finish()
f.close()
data_id.close()
return
def check_file(data_dir):
if os.path.exists(data_dir):
if os.path.isfile(os.path.join('data.hy')) and \
os.path.isfile(os.path.join('id.txt')):
return True
else:
os.mkdir(data_dir)
return False
def download_mnist(download_path):
data_dir = os.path.join(download_path, 'mnist')
if check_file(data_dir):
print('MNIST was downloaded.')
return
data_url = 'http://yann.lecun.com/exdb/mnist/'
keys = ['train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz',
't10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz']
for k in keys:
url = (data_url+k).format(**locals())
target_path = os.path.join(data_dir, k)
cmd = ['curl', url, '-o', target_path]
print('Downloading ', k)
subprocess.call(cmd)
cmd = ['gzip', '-d', target_path]
print('Unzip ', k)
subprocess.call(cmd)
num_mnist_train = 60000
num_mnist_test = 10000
fd = open(os.path.join(data_dir,'train-images-idx3-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
train_image = loaded[16:].reshape((num_mnist_train,28,28,1)).astype(np.float)
fd = open(os.path.join(data_dir,'train-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
train_label = np.asarray(loaded[8:].reshape((num_mnist_train)).astype(np.float))
fd = open(os.path.join(data_dir,'t10k-images-idx3-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
test_image = loaded[16:].reshape((num_mnist_test,28,28,1)).astype(np.float)
fd = open(os.path.join(data_dir,'t10k-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
test_label = np.asarray(loaded[8:].reshape((num_mnist_test)).astype(np.float))
prepare_h5py(train_image, train_label, test_image, test_label, data_dir)
for k in keys:
cmd = ['rm', '-f', os.path.join(data_dir, k[:-3])]
subprocess.call(cmd)
def download_svhn(download_path):
data_dir = os.path.join(download_path, 'svhn')
import scipy.io as sio
# svhn file loader
def svhn_loader(url, path):
cmd = ['curl', url, '-o', path]
subprocess.call(cmd)
m = sio.loadmat(path)
return m['X'], m['y']
if check_file(data_dir):
print('SVHN was downloaded.')
return
data_url = 'http://ufldl.stanford.edu/housenumbers/train_32x32.mat'
train_image, train_label = svhn_loader(data_url, os.path.join(data_dir, 'train_32x32.mat'))
data_url = 'http://ufldl.stanford.edu/housenumbers/test_32x32.mat'
test_image, test_label = svhn_loader(data_url, os.path.join(data_dir, 'test_32x32.mat'))
prepare_h5py(np.transpose(train_image, (3, 0, 1, 2)), train_label,
np.transpose(test_image, (3, 0, 1, 2)), test_label, data_dir)
cmd = ['rm', '-f', os.path.join(data_dir, '*.mat')]
subprocess.call(cmd)
def download_cifar10(download_path):
data_dir = os.path.join(download_path, 'cifar10')
# cifar file loader
def unpickle(file):
import cPickle
with open(file, 'rb') as fo:
dict = cPickle.load(fo)
return dict
if check_file(data_dir):
print('CIFAR was downloaded.')
return
data_url = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
k = 'cifar-10-python.tar.gz'
target_path = os.path.join(data_dir, k)
print(target_path)
cmd = ['curl', data_url, '-o', target_path]
print('Downloading CIFAR10')
subprocess.call(cmd)
tarfile.open(target_path, 'r:gz').extractall(data_dir)
num_cifar_train = 50000
num_cifar_test = 10000
target_path = os.path.join(data_dir, 'cifar-10-batches-py')
train_image = []
train_label = []
for i in range(5):
fd = os.path.join(target_path, 'data_batch_'+str(i+1))
dict = unpickle(fd)
train_image.append(dict['data'])
train_label.append(dict['labels'])
train_image = np.reshape(np.stack(train_image, axis=0), [num_cifar_train, 32*32*3])
train_label = np.reshape(np.array(np.stack(train_label, axis=0)), [num_cifar_train])
fd = os.path.join(target_path, 'test_batch')
dict = unpickle(fd)
test_image = np.reshape(dict['data'], [num_cifar_test, 32*32*3])
test_label = np.reshape(dict['labels'], [num_cifar_test])
prepare_h5py(train_image, train_label, test_image, test_label, data_dir, [32, 32, 3])
cmd = ['rm', '-f', os.path.join(data_dir, 'cifar-10-python.tar.gz')]
subprocess.call(cmd)
cmd = ['rm', '-rf', os.path.join(data_dir, 'cifar-10-batches-py')]
subprocess.call(cmd)
if __name__ == '__main__':
args = parser.parse_args()
path = './datasets'
if not os.path.exists(path): os.mkdir(path)
if 'MNIST' in args.datasets:
download_mnist('./datasets')
if 'SVHN' in args.datasets:
download_svhn('./datasets')
if 'CIFAR10' in args.datasets:
download_cifar10('./datasets')
| 33.794737
| 105
| 0.63121
|
2a50ddd5bf52a1c32f1f9bffc7cb5632e3b276bc
| 2,441
|
py
|
Python
|
phylogeny/panseqtrees/phenotype.py
|
mwhitesi/pangamr
|
84f6e03a189e9dc7b11bb04db6e0b5f134cdcbf4
|
[
"Apache-2.0"
] | null | null | null |
phylogeny/panseqtrees/phenotype.py
|
mwhitesi/pangamr
|
84f6e03a189e9dc7b11bb04db6e0b5f134cdcbf4
|
[
"Apache-2.0"
] | null | null | null |
phylogeny/panseqtrees/phenotype.py
|
mwhitesi/pangamr
|
84f6e03a189e9dc7b11bb04db6e0b5f134cdcbf4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Functions for working with Phenotype data
"""
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import argparse
import logging
import pandas as pd
import re
import pickle
__author__ = "Matthew Whiteside"
__copyright__ = "Copyright 2015, Public Health Agency of Canada"
__license__ = "APL"
__version__ = "2.0"
__maintainer__ = "Matthew Whiteside"
__email__ = "mwhiteside@canada.ca"
logger = None
def to_dict(tsvfile, id_col, pheno_col, name_col=None, phenotype_name=None, idformatter=None,
phenoformatter=None):
"""Convert from tsv file to dictionary structure expected in LocusTree
Args:
tsvfile:
"""
df = pd.read_table(tsvfile, dtype={id_col: str})
if name_col:
df = df[[id_col,name_col,pheno_col]]
else:
df = df[[id_col,pheno_col]]
if idformatter:
df[id_col] = df[id_col].apply(idformatter)
if phenoformatter:
df[pheno_col] = df[pheno_col].apply(phenoformatter)
if name_col == phenotype_name:
raise Exception("Missing both argument: name_col and phenotype_name. Must provide column with phenotype name "+
"(when input file contains multiple phenotypes) "+
"or the name of phenotype (input file contains single phenotype).")
if name_col:
# Multiple phenotypes
df[name_col] = df[name_col].astype("category")
phenodict = df.groupby(name_col).apply(
lambda df: df.groupby(id_col).apply(
lambda df2: df2[pheno_col].values[0]
).to_dict()
).to_dict()
else:
tmp = df.groupby(id_col).apply(lambda df2: df2[pheno_col].values[0]).to_dict()
phenodict = {phenotype_name: tmp}
return phenodict
def normalizefn(x):
return re.sub(r'\.',r'_dot_',str(x))
if __name__ == "__main__":
"""Convert tsv to dict and pickle
"""
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('panseqtrees.phenotype')
# Parse command-line args
parser = argparse.ArgumentParser()
parser.add_argument('output', help='Picke file location')
parser.add_argument('input', help='Phenotype .tsv file')
options = parser.parse_args()
phenodict = to_dict(options.input, 'genome_id', 'resistant_phenotype', name_col='antibiotic', idformatter=normalizefn)
with open(options.output, 'wb') as fh:
pickle.dump(phenodict, fh)
| 25.427083
| 122
| 0.664891
|
925477ecb3341f0e5603abc51391d406c244c3ff
| 8,090
|
py
|
Python
|
mega_nerf/datasets/filesystem_dataset.py
|
ksihat/mega-nerf
|
a4865f31e5b95637c79f89dfdda3fa289e226bf9
|
[
"MIT"
] | 1
|
2021-12-21T03:06:43.000Z
|
2021-12-21T03:06:43.000Z
|
mega_nerf/datasets/filesystem_dataset.py
|
cuulee/mega-nerf
|
b38ea40b6ca53ae4423fcfb354ac13cd794827a4
|
[
"MIT"
] | null | null | null |
mega_nerf/datasets/filesystem_dataset.py
|
cuulee/mega-nerf
|
b38ea40b6ca53ae4423fcfb354ac13cd794827a4
|
[
"MIT"
] | null | null | null |
import math
import os
import shutil
from concurrent.futures import Future, ThreadPoolExecutor
from itertools import cycle
from pathlib import Path
from typing import List, Optional, Dict, Tuple
import numpy as np
import torch
from npy_append_array import NpyAppendArray
from torch.utils.data import Dataset
from mega_nerf.datasets.dataset_utils import get_image_data
from mega_nerf.image_metadata import ImageMetadata
from mega_nerf.misc_utils import main_tqdm, main_print
class FilesystemDataset(Dataset):
def __init__(self, metadata_items: List[ImageMetadata], near: float, far: float, ray_altitude_range: List[float],
center_pixels: bool, device: torch.device, chunk_paths: List[Path], num_chunks: int,
scale_factor: int, disk_flush_size: int):
super(FilesystemDataset, self).__init__()
append_arrays = self._check_existing_paths(chunk_paths, near, far, ray_altitude_range, center_pixels,
scale_factor, len(metadata_items))
if append_arrays is not None:
main_print('Reusing {} chunks from previous run'.format(len(append_arrays)))
self._append_arrays = append_arrays
else:
self._append_arrays = []
self._write_chunks(metadata_items, near, far, ray_altitude_range, center_pixels, device, chunk_paths,
num_chunks, scale_factor, disk_flush_size)
self._append_arrays.sort(key=lambda x: x.filename)
self._chunk_index = cycle(range(len(self._append_arrays)))
self._loaded_rgbs = None
self._loaded_rays = None
self._loaded_image_indices = None
self._chunk_load_executor = ThreadPoolExecutor(max_workers=1)
self._chunk_future = self._chunk_load_executor.submit(self._load_chunk_inner)
self._chosen = None
def load_chunk(self) -> None:
chosen, loaded_chunk = self._chunk_future.result()
self._chosen = chosen
self._loaded_rgbs = loaded_chunk[:, :3]
self._loaded_rays = loaded_chunk[:, 3:11]
self._loaded_image_indices = loaded_chunk[:, 11]
self._chunk_future = self._chunk_load_executor.submit(self._load_chunk_inner)
def get_state(self) -> str:
return self._chosen
def set_state(self, chosen: str) -> None:
while self._chosen != chosen:
self.load_chunk()
def __len__(self) -> int:
return self._loaded_rgbs.shape[0]
def __getitem__(self, idx) -> Dict[str, torch.Tensor]:
return {
'rgbs': self._loaded_rgbs[idx],
'rays': self._loaded_rays[idx],
'image_indices': self._loaded_image_indices[idx]
}
def _load_chunk_inner(self) -> Tuple[str, torch.FloatTensor]:
chosen = self._append_arrays[next(self._chunk_index)]
return str(chosen.filename), torch.FloatTensor(np.load(chosen.filename))
def _write_chunks(self, metadata_items: List[ImageMetadata], near: float, far: float,
ray_altitude_range: List[float], center_pixels: bool, device: torch.device,
chunk_paths: List[Path], num_chunks: int, scale_factor: int, disk_flush_size: int) -> None:
assert ('RANK' not in os.environ) or int(os.environ['LOCAL_RANK']) == 0
path_frees = []
total_free = 0
for chunk_path in chunk_paths:
(chunk_path / 'chunks').mkdir(parents=True)
_, _, free = shutil.disk_usage(chunk_path)
total_free += free
path_frees.append(free)
index = 0
for chunk_path, path_free in zip(chunk_paths, path_frees):
allocated = int(path_free / total_free * num_chunks)
main_print('Allocating {} chunks to dataset path {}'.format(allocated, chunk_path))
for j in range(allocated):
self._append_arrays.append(NpyAppendArray(str(chunk_path / 'chunks' / '{}.npy'.format(index))))
index += 1
main_print('{} chunks allocated'.format(index))
write_futures = []
rgbs = []
rays = []
indices = []
in_memory_count = 0
with ThreadPoolExecutor(max_workers=len(self._append_arrays)) as executor:
for metadata_item in main_tqdm(metadata_items):
image_data = get_image_data(metadata_item, near, far, ray_altitude_range, center_pixels, device)
if image_data is None:
continue
image_rgbs, image_rays, image_indices = image_data
rgbs.append(image_rgbs)
rays.append(image_rays)
indices.append(image_indices)
in_memory_count += len(image_rgbs)
if in_memory_count >= disk_flush_size:
for write_future in write_futures:
write_future.result()
write_futures = self._write_to_disk(executor, torch.cat(rgbs), torch.cat(rays), torch.cat(indices))
rgbs = []
rays = []
indices = []
in_memory_count = 0
for write_future in write_futures:
write_future.result()
if in_memory_count > 0:
write_futures = self._write_to_disk(executor, torch.cat(rgbs), torch.cat(rays), torch.cat(indices))
for write_future in write_futures:
write_future.result()
for chunk_path in chunk_paths:
torch.save({
'images': len(metadata_items),
'scale_factor': scale_factor,
'near': near,
'far': far,
'center_pixels': center_pixels,
'ray_altitude_range': ray_altitude_range,
}, chunk_path / 'metadata.pt')
main_print('Finished writing chunks to dataset paths')
def _check_existing_paths(self, chunk_paths: List[Path], near: float, far: float, ray_altitude_range: List[float],
center_pixels: bool, scale_factor: int, images: int) -> Optional[List[NpyAppendArray]]:
append_arrays = []
num_exist = 0
for chunk_path in chunk_paths:
if chunk_path.exists():
dataset_metadata = torch.load(chunk_path / 'metadata.pt', map_location='cpu')
assert dataset_metadata['images'] == images
assert dataset_metadata['scale_factor'] == scale_factor
assert dataset_metadata['near'] == near
assert dataset_metadata['far'] == far
assert dataset_metadata['center_pixels'] == center_pixels
if ray_altitude_range is not None:
assert (torch.allclose(torch.FloatTensor(dataset_metadata['ray_altitude_range']),
torch.FloatTensor(ray_altitude_range)))
else:
assert dataset_metadata['ray_altitude_range'] is None
for child in list((chunk_path / 'chunks').iterdir()):
append_arrays.append(NpyAppendArray(child))
num_exist += 1
if num_exist > 0:
assert num_exist == len(chunk_paths)
return append_arrays
else:
return None
def _write_to_disk(self, executor: ThreadPoolExecutor, rgbs: torch.Tensor, rays: torch.Tensor,
image_indices: torch.Tensor) -> List[Future[None]]:
to_store = torch.cat([rgbs, rays, image_indices.unsqueeze(-1)], -1)
indices = torch.randperm(to_store.shape[0])
num_chunks = len(self._append_arrays)
chunk_size = math.ceil(to_store.shape[0] / num_chunks)
futures = []
def append(index: int) -> None:
self._append_arrays[index].append(
to_store[indices[index * chunk_size:(index + 1) * chunk_size]].numpy())
for i in range(num_chunks):
future = executor.submit(append, i)
futures.append(future)
return futures
| 41.701031
| 119
| 0.612608
|
bec356652aec26864ed724f13a942508b1c2aa07
| 610
|
py
|
Python
|
tests/test_log.py
|
shahzebsiddiqui/buildtest-1
|
6c47424b82ec1e92ce0930f99be4ba10da62515c
|
[
"MIT"
] | null | null | null |
tests/test_log.py
|
shahzebsiddiqui/buildtest-1
|
6c47424b82ec1e92ce0930f99be4ba10da62515c
|
[
"MIT"
] | 42
|
2021-04-20T11:11:06.000Z
|
2022-03-30T16:54:24.000Z
|
tests/test_log.py
|
shahzebsiddiqui/buildtest-1
|
6c47424b82ec1e92ce0930f99be4ba10da62515c
|
[
"MIT"
] | null | null | null |
import tempfile
from buildtest.log import init_logfile
from buildtest.utils.file import read_file
def test_BuildTestLogger():
logger = init_logfile()
# ensure we have effective level of 10 (DEBUG)
assert logger.getEffectiveLevel() == 10
tf = tempfile.NamedTemporaryFile()
assert logger.name == "buildtest"
# writing message at each log level
logger.debug("DEBUG MESSAGE")
logger.info("INFO MESSAGE")
logger.warning("WARNING MESSAGE!")
logger.error("ERROR MESSAGE!!")
logger.critical("CRITICAL MESSAGE!!!")
content = read_file(tf.name)
print(content)
| 23.461538
| 50
| 0.704918
|
f7a27c26b3f6a35e21b4a64881b1ccb13f86a476
| 1,063
|
py
|
Python
|
Python_PI/Clase20.py
|
Alex8Navas/PythonPI
|
5f1eff48e8a28f364f5f0dbf25d7a4968a0025bd
|
[
"CC0-1.0"
] | null | null | null |
Python_PI/Clase20.py
|
Alex8Navas/PythonPI
|
5f1eff48e8a28f364f5f0dbf25d7a4968a0025bd
|
[
"CC0-1.0"
] | null | null | null |
Python_PI/Clase20.py
|
Alex8Navas/PythonPI
|
5f1eff48e8a28f364f5f0dbf25d7a4968a0025bd
|
[
"CC0-1.0"
] | null | null | null |
# Clase 20. Curso Píldoras Informáticas.
# Control de Flujo. Generadores 2.
# Instrucción yield from: simplifica el código para bucles anidados.
# El asterisco es un número indeterminado de elementos dados en forma de tupla.
def Cities(*ciudades):
for i in ciudades:
yield i
Ciudades = Cities("Madrid", "Barcelona", "Sevilla", "León")
print(next(Ciudades))
print(next(Ciudades))
# Acceder a los subelementos o letras.
def Cities2(*ciudades):
for i in ciudades:
for j in i:
yield j
Ciudades2 = Cities2("Madrid", "Barcelona", "Sevilla", "León")
print(next(Ciudades2))
print(next(Ciudades2))
print(next(Ciudades2))
print(next(Ciudades2))
print(next(Ciudades2))
print(next(Ciudades2))
print("\nCon Yield From")
# Con yield from.
def Cities2(*ciudades):
for i in ciudades:
yield from i
Ciudades2 = Cities2("Madrid", "Barcelona", "Sevilla", "León")
print(next(Ciudades2))
print(next(Ciudades2))
print(next(Ciudades2))
print(next(Ciudades2))
print(next(Ciudades2))
print(next(Ciudades2))
| 21.26
| 81
| 0.69238
|
b75a4a6f53f22887fd34266246da256b0fba40db
| 16,818
|
py
|
Python
|
tests/m2m_through/tests.py
|
Yoann-Vie/esgi-hearthstone
|
115d03426c7e8e80d89883b78ac72114c29bed12
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/m2m_through/tests.py
|
Yoann-Vie/esgi-hearthstone
|
115d03426c7e8e80d89883b78ac72114c29bed12
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/m2m_through/tests.py
|
Yoann-Vie/esgi-hearthstone
|
115d03426c7e8e80d89883b78ac72114c29bed12
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
from datetime import datetime
from operator import attrgetter
from django.test import TestCase
from .models import (
CustomMembership, Employee, Event, Friendship, Group, Ingredient,
Invitation, Membership, Person, PersonSelfRefM2M, Recipe, RecipeIngredient,
Relationship,
)
class M2mThroughTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.bob = Person.objects.create(name='Bob')
cls.jim = Person.objects.create(name='Jim')
cls.jane = Person.objects.create(name='Jane')
cls.rock = Group.objects.create(name='Rock')
cls.roll = Group.objects.create(name='Roll')
def test_retrieve_intermediate_items(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(person=self.jane, group=self.rock)
expected = ['Jane', 'Jim']
self.assertQuerysetEqual(
self.rock.members.all(),
expected,
attrgetter("name")
)
def test_get_on_intermediate_model(self):
Membership.objects.create(person=self.jane, group=self.rock)
queryset = Membership.objects.get(person=self.jane, group=self.rock)
self.assertEqual(
repr(queryset),
'<Membership: Jane is a member of Rock>'
)
def test_filter_on_intermediate_model(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(person=self.jane, group=self.rock)
queryset = Membership.objects.filter(group=self.rock)
expected = [
'<Membership: Jim is a member of Rock>',
'<Membership: Jane is a member of Rock>',
]
self.assertQuerysetEqual(
queryset,
expected
)
def test_cannot_use_add_on_m2m_with_intermediary_model(self):
msg = 'Cannot use add() on a ManyToManyField which specifies an intermediary model'
with self.assertRaisesMessage(AttributeError, msg):
self.rock.members.add(self.bob)
self.assertQuerysetEqual(
self.rock.members.all(),
[]
)
def test_cannot_use_create_on_m2m_with_intermediary_model(self):
msg = 'Cannot use create() on a ManyToManyField which specifies an intermediary model'
with self.assertRaisesMessage(AttributeError, msg):
self.rock.members.create(name='Annie')
self.assertQuerysetEqual(
self.rock.members.all(),
[]
)
def test_cannot_use_remove_on_m2m_with_intermediary_model(self):
Membership.objects.create(person=self.jim, group=self.rock)
msg = 'Cannot use remove() on a ManyToManyField which specifies an intermediary model'
with self.assertRaisesMessage(AttributeError, msg):
self.rock.members.remove(self.jim)
self.assertQuerysetEqual(
self.rock.members.all(),
['Jim'],
attrgetter("name")
)
def test_cannot_use_setattr_on_m2m_with_intermediary_model(self):
msg = 'Cannot set values on a ManyToManyField which specifies an intermediary model'
members = list(Person.objects.filter(name__in=['Bob', 'Jim']))
with self.assertRaisesMessage(AttributeError, msg):
self.rock.members.set(members)
self.assertQuerysetEqual(
self.rock.members.all(),
[]
)
def test_clear_removes_all_the_m2m_relationships(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(person=self.jane, group=self.rock)
self.rock.members.clear()
self.assertQuerysetEqual(
self.rock.members.all(),
[]
)
def test_retrieve_reverse_intermediate_items(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(person=self.jim, group=self.roll)
expected = ['Rock', 'Roll']
self.assertQuerysetEqual(
self.jim.group_set.all(),
expected,
attrgetter("name")
)
def test_cannot_use_add_on_reverse_m2m_with_intermediary_model(self):
msg = 'Cannot use add() on a ManyToManyField which specifies an intermediary model'
with self.assertRaisesMessage(AttributeError, msg):
self.bob.group_set.add(self.bob)
self.assertQuerysetEqual(
self.bob.group_set.all(),
[]
)
def test_cannot_use_create_on_reverse_m2m_with_intermediary_model(self):
msg = 'Cannot use create() on a ManyToManyField which specifies an intermediary model'
with self.assertRaisesMessage(AttributeError, msg):
self.bob.group_set.create(name='Funk')
self.assertQuerysetEqual(
self.bob.group_set.all(),
[]
)
def test_cannot_use_remove_on_reverse_m2m_with_intermediary_model(self):
Membership.objects.create(person=self.bob, group=self.rock)
msg = 'Cannot use remove() on a ManyToManyField which specifies an intermediary model'
with self.assertRaisesMessage(AttributeError, msg):
self.bob.group_set.remove(self.rock)
self.assertQuerysetEqual(
self.bob.group_set.all(),
['Rock'],
attrgetter('name')
)
def test_cannot_use_setattr_on_reverse_m2m_with_intermediary_model(self):
msg = 'Cannot set values on a ManyToManyField which specifies an intermediary model'
members = list(Group.objects.filter(name__in=['Rock', 'Roll']))
with self.assertRaisesMessage(AttributeError, msg):
self.bob.group_set.set(members)
self.assertQuerysetEqual(
self.bob.group_set.all(),
[]
)
def test_clear_on_reverse_removes_all_the_m2m_relationships(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(person=self.jim, group=self.roll)
self.jim.group_set.clear()
self.assertQuerysetEqual(
self.jim.group_set.all(),
[]
)
def test_query_model_by_attribute_name_of_related_model(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(person=self.jane, group=self.rock)
Membership.objects.create(person=self.bob, group=self.roll)
Membership.objects.create(person=self.jim, group=self.roll)
Membership.objects.create(person=self.jane, group=self.roll)
self.assertQuerysetEqual(
Group.objects.filter(members__name='Bob'),
['Roll'],
attrgetter("name")
)
def test_order_by_relational_field_through_model(self):
CustomMembership.objects.create(person=self.jim, group=self.rock)
CustomMembership.objects.create(person=self.bob, group=self.rock)
CustomMembership.objects.create(person=self.jane, group=self.roll)
CustomMembership.objects.create(person=self.jim, group=self.roll)
self.assertSequenceEqual(
self.rock.custom_members.order_by('custom_person_related_name'),
[self.jim, self.bob]
)
self.assertSequenceEqual(
self.roll.custom_members.order_by('custom_person_related_name'),
[self.jane, self.jim]
)
def test_query_first_model_by_intermediate_model_attribute(self):
Membership.objects.create(
person=self.jane, group=self.roll,
invite_reason="She was just awesome."
)
Membership.objects.create(
person=self.jim, group=self.roll,
invite_reason="He is good."
)
Membership.objects.create(person=self.bob, group=self.roll)
qs = Group.objects.filter(
membership__invite_reason="She was just awesome."
)
self.assertQuerysetEqual(
qs,
['Roll'],
attrgetter("name")
)
def test_query_second_model_by_intermediate_model_attribute(self):
Membership.objects.create(
person=self.jane, group=self.roll,
invite_reason="She was just awesome."
)
Membership.objects.create(
person=self.jim, group=self.roll,
invite_reason="He is good."
)
Membership.objects.create(person=self.bob, group=self.roll)
qs = Person.objects.filter(
membership__invite_reason="She was just awesome."
)
self.assertQuerysetEqual(
qs,
['Jane'],
attrgetter("name")
)
def test_query_model_by_related_model_name(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(person=self.jane, group=self.rock)
Membership.objects.create(person=self.bob, group=self.roll)
Membership.objects.create(person=self.jim, group=self.roll)
Membership.objects.create(person=self.jane, group=self.roll)
self.assertQuerysetEqual(
Person.objects.filter(group__name="Rock"),
['Jane', 'Jim'],
attrgetter("name")
)
def test_query_model_by_custom_related_name(self):
CustomMembership.objects.create(person=self.bob, group=self.rock)
CustomMembership.objects.create(person=self.jim, group=self.rock)
self.assertQuerysetEqual(
Person.objects.filter(custom__name="Rock"),
['Bob', 'Jim'],
attrgetter("name")
)
def test_query_model_by_intermediate_can_return_non_unique_queryset(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(
person=self.jane, group=self.rock,
date_joined=datetime(2006, 1, 1)
)
Membership.objects.create(
person=self.bob, group=self.roll,
date_joined=datetime(2004, 1, 1))
Membership.objects.create(person=self.jim, group=self.roll)
Membership.objects.create(
person=self.jane, group=self.roll,
date_joined=datetime(2004, 1, 1))
qs = Person.objects.filter(
membership__date_joined__gt=datetime(2004, 1, 1)
)
self.assertQuerysetEqual(
qs,
['Jane', 'Jim', 'Jim'],
attrgetter("name")
)
def test_custom_related_name_forward_empty_qs(self):
self.assertQuerysetEqual(
self.rock.custom_members.all(),
[]
)
def test_custom_related_name_reverse_empty_qs(self):
self.assertQuerysetEqual(
self.bob.custom.all(),
[]
)
def test_custom_related_name_forward_non_empty_qs(self):
CustomMembership.objects.create(person=self.bob, group=self.rock)
CustomMembership.objects.create(person=self.jim, group=self.rock)
self.assertQuerysetEqual(
self.rock.custom_members.all(),
['Bob', 'Jim'],
attrgetter("name")
)
def test_custom_related_name_reverse_non_empty_qs(self):
CustomMembership.objects.create(person=self.bob, group=self.rock)
CustomMembership.objects.create(person=self.jim, group=self.rock)
self.assertQuerysetEqual(
self.bob.custom.all(),
['Rock'],
attrgetter("name")
)
def test_custom_related_name_doesnt_conflict_with_fky_related_name(self):
CustomMembership.objects.create(person=self.bob, group=self.rock)
self.assertQuerysetEqual(
self.bob.custom_person_related_name.all(),
['<CustomMembership: Bob is a member of Rock>']
)
def test_through_fields(self):
"""
Relations with intermediary tables with multiple FKs
to the M2M's ``to`` model are possible.
"""
event = Event.objects.create(title='Rockwhale 2014')
Invitation.objects.create(event=event, inviter=self.bob, invitee=self.jim)
Invitation.objects.create(event=event, inviter=self.bob, invitee=self.jane)
self.assertQuerysetEqual(
event.invitees.all(),
['Jane', 'Jim'],
attrgetter('name')
)
class M2mThroughReferentialTests(TestCase):
def test_self_referential_empty_qs(self):
tony = PersonSelfRefM2M.objects.create(name="Tony")
self.assertQuerysetEqual(
tony.friends.all(),
[]
)
def test_self_referential_non_symmetrical_first_side(self):
tony = PersonSelfRefM2M.objects.create(name="Tony")
chris = PersonSelfRefM2M.objects.create(name="Chris")
Friendship.objects.create(
first=tony, second=chris, date_friended=datetime.now()
)
self.assertQuerysetEqual(
tony.friends.all(),
['Chris'],
attrgetter("name")
)
def test_self_referential_non_symmetrical_second_side(self):
tony = PersonSelfRefM2M.objects.create(name="Tony")
chris = PersonSelfRefM2M.objects.create(name="Chris")
Friendship.objects.create(
first=tony, second=chris, date_friended=datetime.now()
)
self.assertQuerysetEqual(
chris.friends.all(),
[]
)
def test_self_referential_non_symmetrical_clear_first_side(self):
tony = PersonSelfRefM2M.objects.create(name="Tony")
chris = PersonSelfRefM2M.objects.create(name="Chris")
Friendship.objects.create(
first=tony, second=chris, date_friended=datetime.now()
)
chris.friends.clear()
self.assertQuerysetEqual(
chris.friends.all(),
[]
)
# Since this isn't a symmetrical relation, Tony's friend link still exists.
self.assertQuerysetEqual(
tony.friends.all(),
['Chris'],
attrgetter("name")
)
def test_self_referential_symmetrical(self):
tony = PersonSelfRefM2M.objects.create(name="Tony")
chris = PersonSelfRefM2M.objects.create(name="Chris")
Friendship.objects.create(
first=tony, second=chris, date_friended=datetime.now()
)
Friendship.objects.create(
first=chris, second=tony, date_friended=datetime.now()
)
self.assertQuerysetEqual(
tony.friends.all(),
['Chris'],
attrgetter("name")
)
self.assertQuerysetEqual(
chris.friends.all(),
['Tony'],
attrgetter("name")
)
def test_through_fields_self_referential(self):
john = Employee.objects.create(name='john')
peter = Employee.objects.create(name='peter')
mary = Employee.objects.create(name='mary')
harry = Employee.objects.create(name='harry')
Relationship.objects.create(source=john, target=peter, another=None)
Relationship.objects.create(source=john, target=mary, another=None)
Relationship.objects.create(source=john, target=harry, another=peter)
self.assertQuerysetEqual(
john.subordinates.all(),
['peter', 'mary', 'harry'],
attrgetter('name')
)
class M2mThroughToFieldsTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.pea = Ingredient.objects.create(iname='pea')
cls.potato = Ingredient.objects.create(iname='potato')
cls.tomato = Ingredient.objects.create(iname='tomato')
cls.curry = Recipe.objects.create(rname='curry')
RecipeIngredient.objects.create(recipe=cls.curry, ingredient=cls.potato)
RecipeIngredient.objects.create(recipe=cls.curry, ingredient=cls.pea)
RecipeIngredient.objects.create(recipe=cls.curry, ingredient=cls.tomato)
def test_retrieval(self):
# Forward retrieval
self.assertSequenceEqual(self.curry.ingredients.all(), [self.pea, self.potato, self.tomato])
# Backward retrieval
self.assertEqual(self.tomato.recipes.get(), self.curry)
def test_choices(self):
field = Recipe._meta.get_field('ingredients')
self.assertEqual(
[choice[0] for choice in field.get_choices(include_blank=False)],
['pea', 'potato', 'tomato']
)
| 35.782979
| 101
| 0.61779
|
9135155b29988300006e71bbf2f02da6eb8333da
| 1,789
|
py
|
Python
|
src/ralph/operations/tests/factories.py
|
andrzej-jankowski/ralph
|
68ec18a66b8fe47ddf1c082c3ce2d82b2cd430dc
|
[
"Apache-2.0"
] | null | null | null |
src/ralph/operations/tests/factories.py
|
andrzej-jankowski/ralph
|
68ec18a66b8fe47ddf1c082c3ce2d82b2cd430dc
|
[
"Apache-2.0"
] | null | null | null |
src/ralph/operations/tests/factories.py
|
andrzej-jankowski/ralph
|
68ec18a66b8fe47ddf1c082c3ce2d82b2cd430dc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import factory
from factory.django import DjangoModelFactory
from ralph.accounts.tests.factories import UserFactory
from ralph.operations.models import (
Change,
Failure,
Incident,
Operation,
OperationStatus,
OperationType,
Problem
)
def get_operation_type(name):
return OperationType.objects.get(name=name)
def get_operation_status(name):
return OperationStatus.objects.get(name=name)
class OperationTypeFactory(DjangoModelFactory):
name = factory.Iterator(['Problem', 'Incident', 'Failure', 'Change'])
class Meta:
model = OperationType
django_get_or_create = ['name']
class OperationStatusFactory(DjangoModelFactory):
name = factory.Iterator(['Open', 'Closed', 'Resolved', 'In Progress'])
class Meta:
model = OperationStatus
django_get_or_create = ['name']
class OperationFactory(DjangoModelFactory):
title = factory.Sequence(lambda n: 'Operation #%d' % n)
status = factory.LazyAttribute(lambda obj: get_operation_status('Open'))
type = factory.LazyAttribute(lambda obj: get_operation_type('Change'))
assignee = factory.SubFactory(UserFactory)
class Meta:
model = Operation
class ChangeFactory(OperationFactory):
class Meta:
model = Change
class FailureFactory(OperationFactory):
type = factory.LazyAttribute(lambda obj: get_operation_type('Failure'))
class Meta:
model = Failure
class ProblemFactory(OperationFactory):
type = factory.LazyAttribute(lambda obj: get_operation_type('Problem'))
class Meta:
model = Problem
class IncidentFactory(OperationFactory):
type = factory.LazyAttribute(lambda obj: get_operation_type('Incident'))
class Meta:
model = Incident
| 22.935897
| 76
| 0.711571
|
c826338f1b570f3bd5a73608076443b707972da0
| 5,703
|
py
|
Python
|
geophoto/settings.py
|
amatmv/geophoto-backend
|
56e823e8aa152dc80e40a65a7311c5561c965ee0
|
[
"MIT"
] | 1
|
2019-11-04T22:03:09.000Z
|
2019-11-04T22:03:09.000Z
|
geophoto/settings.py
|
amatmv/GeoPhoto_Server
|
56e823e8aa152dc80e40a65a7311c5561c965ee0
|
[
"MIT"
] | null | null | null |
geophoto/settings.py
|
amatmv/GeoPhoto_Server
|
56e823e8aa152dc80e40a65a7311c5561c965ee0
|
[
"MIT"
] | null | null | null |
"""
Django settings for geophoto project.
Generated by 'django-admin startproject' using Django 3.0.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import datetime
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8kt^jyi#q2q9cx_eo%revxtam_$m&(&3(o=pw1#lw95-ba&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'192.168.43.161',
'127.0.0.1',
'localhost'
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'rest_framework',
'corsheaders',
'geophoto_api',
'storages',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'geophoto.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
AUTH_USER_MODEL = 'geophoto_api.User'
WSGI_APPLICATION = 'geophoto.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'geophoto',
'USER': 'geophoto',
'PASSWORD': 'geophoto',
'HOST': 'localhost',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
REST_FRAMEWORK = {
# When you enable API versioning, the request.version attribute will contain a string
# that corresponds to the version requested in the incoming client request.
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.URLPathVersioning',
# Permission settings
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
# Authentication settings
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.BasicAuthentication',
],
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10
}
# JWT settings
JWT_AUTH = {
'JWT_ENCODE_HANDLER':
'rest_framework_jwt.utils.jwt_encode_handler',
'JWT_DECODE_HANDLER':
'rest_framework_jwt.utils.jwt_decode_handler',
'JWT_PAYLOAD_HANDLER':
'rest_framework_jwt.utils.jwt_payload_handler',
'JWT_PAYLOAD_GET_USER_ID_HANDLER':
'rest_framework_jwt.utils.jwt_get_user_id_from_payload_handler',
'JWT_RESPONSE_PAYLOAD_HANDLER':
'rest_framework_jwt.utils.jwt_response_payload_handler',
'JWT_SECRET_KEY': SECRET_KEY,
'JWT_GET_USER_SECRET_KEY': None,
'JWT_PUBLIC_KEY': None,
'JWT_PRIVATE_KEY': None,
'JWT_ALGORITHM': 'HS256',
'JWT_VERIFY': True,
'JWT_VERIFY_EXPIRATION': True,
'JWT_LEEWAY': 0,
'JWT_EXPIRATION_DELTA': datetime.timedelta(days=10),
'JWT_AUDIENCE': None,
'JWT_ISSUER': None,
'JWT_ALLOW_REFRESH': False,
'JWT_REFRESH_EXPIRATION_DELTA': datetime.timedelta(days=7),
'JWT_AUTH_HEADER_PREFIX': 'Bearer',
'JWT_AUTH_COOKIE': None,
}
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'staticfiles'
STATICFILES_DIR = (
os.path.join(BASE_DIR, 'static')
)
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = True
# Configuracio del bucket S3
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
AWS_S3_BASE_URL = 'https://s3-eu-west-3.amazonaws.com/'
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME')
AWS_S3_FILE_OVERWRITE = False
AWS_DEFAULT_ACL = None
| 27.418269
| 91
| 0.712257
|
d8b7ad8b51e958c3a36ff141a9426379d7588845
| 797
|
py
|
Python
|
teamspirit/profiles/migrations/0013_auto_20200914_1528.py
|
etienne86/oc_p13_team_spirit
|
fd3d45618d349ecd0a03e63c4a7e9c1044eeffaa
|
[
"MIT"
] | null | null | null |
teamspirit/profiles/migrations/0013_auto_20200914_1528.py
|
etienne86/oc_p13_team_spirit
|
fd3d45618d349ecd0a03e63c4a7e9c1044eeffaa
|
[
"MIT"
] | null | null | null |
teamspirit/profiles/migrations/0013_auto_20200914_1528.py
|
etienne86/oc_p13_team_spirit
|
fd3d45618d349ecd0a03e63c4a7e9c1044eeffaa
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.7 on 2020-09-14 13:28
from django.db import migrations, models
import teamspirit.profiles.managers
class Migration(migrations.Migration):
dependencies = [
('profiles', '0012_auto_20200909_1836'),
]
operations = [
migrations.AlterField(
model_name='personal',
name='id_file',
field=models.FileField(blank=True, null=True, upload_to=teamspirit.profiles.managers.rename_id_file, verbose_name="Pièce d'identité"),
),
migrations.AlterField(
model_name='personal',
name='medical_file',
field=models.FileField(blank=True, null=True, upload_to=teamspirit.profiles.managers.rename_medical_file, verbose_name='Certificat médical ou licence'),
),
]
| 31.88
| 164
| 0.664994
|
b428a5455cdd21b28df2d19edc9661537fd86964
| 1,676
|
py
|
Python
|
final_round.py
|
priyansha147/Music-Recommendation-System
|
ba1ac96ddf2789b48f7102b01e35c0919b0207aa
|
[
"MIT"
] | null | null | null |
final_round.py
|
priyansha147/Music-Recommendation-System
|
ba1ac96ddf2789b48f7102b01e35c0919b0207aa
|
[
"MIT"
] | null | null | null |
final_round.py
|
priyansha147/Music-Recommendation-System
|
ba1ac96ddf2789b48f7102b01e35c0919b0207aa
|
[
"MIT"
] | null | null | null |
import scipy.io as sio
from math import ceil,floor
from statistics import mean
from numpy import square,sqrt,absolute
matfile = sio.loadmat( 'changed_param_2.mat' )
old_mat = sio.loadmat( 'learned_all_param_2.mat' )
#for i in range( 15 ):
#print matfile['p'][[11 ,67 ,225,336,357,444,635,679],0],old_mat['p'][[11 ,67 ,225,336,357,444,635,679],0]
count = 0
mea_ceil = []
mea_floor = []
mea_old_ceil = []
mea_round = []
mea_old_round = []
mea_old_floor = []
for a in matfile['new_ratings']:
if int( a ) > 0:
original_rating = matfile[count,0]
old_rating = old_mat['p'][count,0]
mea_ceil.append( absolute( int( a ) - ceil( original_rating ) ) )
mea_round.append( absolute( int( a ) - round( original_rating ) ) )
mea_floor.append( absolute( int( a ) - floor( original_rating ) ) )
mea_old_ceil.append( absolute( int( a ) - ceil( old_rating ) ) )
mea_old_floor.append( absolute( int( a ) - floor( old_rating ) ) )
mea_old_round.append( absolute( int( a ) - round( old_rating ) ) )
#print count,a,original_rating,old_rating
count = count + 1
print mean( mea_ceil ),mean( mea_floor ), mean(mea_round)
print mean( mea_old_ceil ),mean( mea_old_floor ),mean(mea_old_round)
print sqrt( mean( square( mea_ceil ) ) ),sqrt( mean( square( mea_floor ) ) ),sqrt( mean( square( mea_round ) ) )
print sqrt( mean( square( mea_old_ceil ) ) ),sqrt( mean( square( mea_old_floor ) ) ),sqrt( mean( square( mea_old_round ) ) )
print mea_ceil.count( 0 )/len(mea_ceil),mea_floor.count( 0 )/len(mea_floor),mea_round.count( 0 )/len(mea_round)
print mea_old_ceil.count( 0 )/len(mea_old_ceil),mea_old_floor.count( 0 )/len(mea_old_floor),mea_old_round.count( 0 )/len(mea_old_round)
| 39.904762
| 135
| 0.701074
|
f55098fbb6ebca16b705ea2a7249ebe65a3aa395
| 8,673
|
py
|
Python
|
wildfire.py
|
MoveInc/TanFire
|
f67200d38df7781fe65b1215962da8e3fffba3f0
|
[
"MIT"
] | 1
|
2017-09-29T01:37:59.000Z
|
2017-09-29T01:37:59.000Z
|
wildfire.py
|
MoveInc/TanFire
|
f67200d38df7781fe65b1215962da8e3fffba3f0
|
[
"MIT"
] | null | null | null |
wildfire.py
|
MoveInc/TanFire
|
f67200d38df7781fe65b1215962da8e3fffba3f0
|
[
"MIT"
] | 1
|
2021-04-02T21:38:53.000Z
|
2021-04-02T21:38:53.000Z
|
import ConfigParser, urllib, urllib2, time, datetime, threading, os, requests
import xml.etree.ElementTree as ET
config = ConfigParser.ConfigParser()
config.read('config.cfg')
#Set debug level
if config.get('config','debug') == 'yes': debug = True
else: debug = False
if config.get('config','advanced_debug') == 'yes': advancedDebug = True
else: advancedDebug = False
#Processes Dictionary of unique hashes returning a Dictionary of unique hashes with their WildFire results. The "list" input is only used to know where to copy a new file from.
def WildFire(list, unique, tanium_handler):
if debug: print ("\n\nMODULE WILDFIRE")
if debug: print ("FUNCTION wildfire.WildFire")
if debug: print (" Incuded hashes: " + str(len(list)))
if debug: print (" Unique included hashes: " + str(len(unique)))
new = {}
updated = {}
upload = {}
uploaded = {}
cached = {}
not_found = {}
wf_hashes = {}
uploaded_count = 0
#Read in WF results from local file cache
cached = Cache()
#If unique hash is not in cache add it to the new dictionary
for hash in unique:
if not hash in cached:
new[hash] = ''
#Check new hashes against WF
updated, upload = Check(new, 'no')
wf_upload = config.get('config','wf_upload')
if wf_upload != 'yes':
if debug: print ("\nFUNCTION wildfire.WildFire: Skipping file upload")
elif len(upload) > 0:
upload_list = Copy(list, upload, tanium_handler)
time.sleep(60)
uploaded_count = Upload(upload_list)
wf_wait_time = float(config.get('config','wf_wait_time'))
if debug: print ("\nFUNCTION wildfire.WildFire: Sleeping " + str(wf_wait_time) + " seconds.")
time.sleep(wf_wait_time)
uploaded, not_found = Check(upload, 'yes')
#Combine updated & uploaded Dictionaries then update the local file cache
updated.update(uploaded)
Update_Cache(updated)
#Combine cached and updated Dictionaries into wf_hashes and compute stats
wf_hashes.update(cached)
wf_hashes.update(updated)
wf_stats = {'wf_cache':len(unique)-len(new), 'wf_new':len(new), 'wf_uploaded':uploaded_count}
#Download malware reports
Download_Reports(wf_hashes)
return(wf_hashes, wf_stats)
#Read in WF results from local file cache
def Cache():
if debug: print ("\nFUNCTION wildfire.Cache")
file = open('wf_cache.txt')
hashes = {}
for line in file:
hash = line.rstrip()
list = hash.split(',')#Hash, Malware Status
hashes[list[0]] = [list[1], 'no', 'no']
file.close()
if debug: print (" Total hashes in cache: " + str(len(hashes)))
return(hashes)
#Update local cache file with new WF results
def Update_Cache(updated):
if debug: print ("\nFUNCTION wildfire.UpdateCache")
if debug: print (" Hashes to add to cache: " + str(len(updated)))
if len(updated)>0:
file = open('wf_cache.txt', 'a')
for hash in updated:
malware = updated[hash][0]
if (malware == 'yes' or malware == 'no' or malware == 'grayware'):
line = hash + ',' + malware + '\n'
file.write(line)
file.close()
#Check new hashes against WF
def Check(new, wf_upload):
if debug: print ("\nFUNCTION wildfire.Check")
if debug: print (" Hashes to check: " + str(len(new)))
updated = {}
upload = {}
for hash in new:
#Sample File: https://wildfire.paloaltonetworks.com/publicapi/test/pe
#malware no: 3ee766cf1827c5afa1ac3cccdd29d629
#malware yes: 2c4bb9f9cf82f797eba0e2cf26fc5275
#grayware: 455d55000d14b5cdd9e7e6773887a24b
#hash not found: 65ea57712340c09b1b0c427b4848ae05
try:
time.sleep(1)
malware = ''
apikey = config.get('config','wf_apikey')
url = config.get('config','wf_url')
values = {'hash' : hash,
'format' : 'xml',
'apikey' : apikey }
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
results = response.read()
root = ET.fromstring(results)
#Return malware status from XML
malware = root[1][0].text
updated[hash] = [malware, 'yes', wf_upload]
except (urllib2.HTTPError) as malware:
upload[hash] = 'not found'
if advancedDebug: print (' ' + hash + ', ' + str(malware))
return(updated, upload)
#Copy files from source systems to central share. Share needs to be writable by Authenticated Computers.
def Copy(list, upload, tanium_handler):
if debug: print("\nFUNCTION wildfire.Copy")
if debug: print(" Files to copy: " + str(len(upload)))
upload_list = []
unique = {}
for i in list:
hash = i[4]
if hash in upload:
if not hash in unique:
unique[hash] = ''
upload_list.append(i)
length = len(upload_list)
x = 0
threads = []
while x < length:
try:
file = upload_list[x]
endpoint = file[0]
path = file[2] + "\\" + file[1]
#Check if list will be out of bounds
if x+1 < length:
next_endpoint = upload_list[x+1][0]
#If the next entry is for the same Endpoint append the file path so only one copy file package action is run per endpoint.
while endpoint == next_endpoint and x+1 < length:
x += 1
file = upload_list[x]
add_path = file[2] + "\\" + file[1]
path += '\,' + add_path
if x+1 < length:
next_endpoint = upload_list[x+1][0]
#Use threading to call copy file package so they can be run in paralell due to the Tanium targeting question taking 2 minuets to complete. https://pymotw.com/2/threading/
t = threading.Thread(target=Tanium_Copy, args=(tanium_handler,endpoint,path))
t.setDaemon(True)
threads.append(t)
time.sleep(5)
t.start()
x+=1
except:
print ("wildfire.Copy function FAILED")
return(upload_list)
#Execute Tanium's Copy File package
def Tanium_Copy(handler,endpoint,path):
if debug: print ("\nFUNCTION Tanium_Copy")
try:
if debug: print (' ' + endpoint + ': ' + path)
share_name = config.get('config','share_name')
kwargs = {}
kwargs["run"] = True
kwargs["action_filters"] = u'Computer Name, that contains:' + endpoint
kwargs["package"] = u'Copy Tools - Copy Files to Central Location{$1=SMB,$2=' + share_name + ',$3=0,$4=0,$5=' + path + ',$6=No,$7=0,$8=files}'
#This will take 2 minutes for tanium to complete the question
handler.deploy_action(**kwargs)
#response = handler.deploy_action(**kwargs)
if debug: print ("\nFUNCTION copyFileTanium END " + endpoint)
except:
print ("wildfire.Tanium_Copy function FAILED")
#Upload files for analysis to WildFire
def Upload(upload_list):
if debug: print ("\nFUNCTION wildfire.upload")
if debug: print (" Files to upload: " + str(len(upload_list)))
uploaded_count = 0
url = config.get('config','wf_submit')
now = datetime.datetime.now()
apikey = config.get('config','wf_apikey')
max_size = int(config.get('config','wf_size'))
local_share_path = config.get('config','local_share_path')
for file in upload_list:
try:
path = file[2] + "\\" + file[1]
computer = file[0]
name = computer.split('.', 1)[0]
folder = str(now.year) + '-' + '{:02d}'.format(now.month) + '-' + '{:02d}'.format(now.day) + '-' + name
path = local_share_path + "\\" + folder + path[2:]
path = path.replace("\\\\","\\")
#Verify the file exists and is less than the max size before uploading
exists = os.path.isfile(path)
size = os.path.getsize(path) < max_size
if(exists and size):
if advancedDebug: print "Uploading " + computer + ": " + path + " - " + file[2]
files = {'file': open(path, 'rb')}
time.sleep(3)
r = requests.post(url, files=files, data={'apikey':apikey})
#Count hashes of files uploaded to WildFire
uploaded_count += 1
if debug:
print (path)
print (file[2]) #Hash
print (r)
except:
print ("wildfire.Upload function FAILED for " + computer + ": " + path)
return(uploaded_count)
#Download WildFire PDF reports for all malware hashes
def Download_Reports(wf_hashes):
if debug: print ("\nFUNCTION wildfire.Download_Reports")
apikey = config.get('config','wf_apikey')
url = config.get('config','wf_url')
report_count = 0
for hash in wf_hashes:
try:
md5 = hash
wf_malware = wf_hashes[md5][0]
filename = md5 + '.pdf'
exists = os.path.isfile('reports\\' + filename)
if wf_malware == 'yes' and not exists:
values = {'hash' : md5,
'format' : 'pdf',
'apikey' : apikey }
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
CHUNK = 16 * 1024
with open('reports\\' + filename, 'wb') as f:
while True:
chunk = response.read(CHUNK)
if not chunk:
break
f.write(chunk)
report_count += 1
except:
print (" Download_Reports failed for: " + md5)
if debug: print (" Malware reports downloaded: " + str(report_count))
| 31.197842
| 176
| 0.665514
|
34808211e1f3241b4c49e8d0c0b0cefd22ab2ba6
| 1,016
|
py
|
Python
|
src/dqn_lunar_lander.py
|
michalnand/reinforcement_learning_tutorial
|
fa7d2443821c3bae236df126e65b3c8d12d0d438
|
[
"MIT"
] | 2
|
2021-07-30T07:46:46.000Z
|
2021-07-30T13:26:42.000Z
|
src/dqn_lunar_lander.py
|
michalnand/reinforcement_learning_tutorial
|
fa7d2443821c3bae236df126e65b3c8d12d0d438
|
[
"MIT"
] | null | null | null |
src/dqn_lunar_lander.py
|
michalnand/reinforcement_learning_tutorial
|
fa7d2443821c3bae236df126e65b3c8d12d0d438
|
[
"MIT"
] | null | null | null |
import time
import gym
from agents.agent_dqn import *
from models.lunar_lander_model_dqn import *
#environment wrapper, reward scaling
class SetRewardRange(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def step(self, action):
obs, reward, done, info = self.env.step(action)
reward = reward / 100.0
return obs, reward, done, info
#create environment
env = gym.make("LunarLander-v2")
env = SetRewardRange(env)
env.reset()
#create DQN agent
agent = AgentDQN(env, ModelDQN)
'''
#train, uncomment for run training
for iteration in range(1000000):
agent.main()
if iteration%256 == 0:
print("iterations = ", iteration, " score = ", agent.score_episode)
env.render()
#save model
agent.save("./models/trained/lunar_lander_")
'''
#load model
agent.load("./models/trained/lunar_lander_")
agent.epsilon = 0.02
#show how's running
while True:
agent.main()
env.render()
time.sleep(0.01)
| 19.538462
| 75
| 0.673228
|
63ccc24655eb935a94a86c132acf0ca81e91e6ac
| 188
|
py
|
Python
|
Moves/TiltForward.py
|
johan--/PoMoCo_RobCook
|
0ca95286b0f21803ed3a33cbad6d11fce4d7172a
|
[
"MIT",
"Unlicense"
] | 10
|
2015-07-14T05:23:56.000Z
|
2021-08-07T16:46:42.000Z
|
Moves/TiltForward.py
|
rpcook/PoMoCo
|
08f5170006bafabc2d70d5a681b62f7448afdbd2
|
[
"Unlicense",
"MIT"
] | 6
|
2015-05-02T23:10:38.000Z
|
2015-05-02T23:15:39.000Z
|
Moves/TiltForward.py
|
rpcook/PoMoCo
|
08f5170006bafabc2d70d5a681b62f7448afdbd2
|
[
"Unlicense",
"MIT"
] | 10
|
2015-03-13T08:10:43.000Z
|
2021-08-08T04:12:42.000Z
|
import time
# Move: Tilt Forward
hexy.LF.setFootY(floor/4)
hexy.LM.setFootY(floor/2)
hexy.LB.setFootY(floor)
hexy.RF.setFootY(floor/4)
hexy.RM.setFootY(floor/2)
hexy.RB.setFootY(floor)
| 15.666667
| 25
| 0.760638
|
089f6ead4522f0b5a93472e64d62e48766dd94d8
| 1,117
|
py
|
Python
|
login_test.py
|
rochman-l/trello_python_tests
|
610549bf845dbd2475a49117a383aed464a85aa3
|
[
"Apache-2.0"
] | null | null | null |
login_test.py
|
rochman-l/trello_python_tests
|
610549bf845dbd2475a49117a383aed464a85aa3
|
[
"Apache-2.0"
] | null | null | null |
login_test.py
|
rochman-l/trello_python_tests
|
610549bf845dbd2475a49117a383aed464a85aa3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from selenium.webdriver.chrome.webdriver import WebDriver
import unittest
class test_login(unittest.TestCase):
def setUp(self):
self.driver = WebDriver()
self.driver.implicitly_wait(10)
def test_login_atlassian_account(self):
driver = self.driver
self.open_home_page(driver)
self.login(driver, user = 'rochman.elena@gmail.com', password = '12345.com')
def open_home_page(self, driver):
driver.get("https://trello.com/")
def login(self, driver, user, password):
driver.find_element_by_css_selector("[href='/login']").click()
driver.find_element_by_name("user").click()
driver.find_element_by_name("user").clear()
driver.find_element_by_name("user").send_keys(user)
driver.find_element_by_id("login").click()
driver.find_element_by_css_selector("#password").click()
driver.find_element_by_css_selector("#password").clear()
driver.find_element_by_css_selector("#password").send_keys(password)
driver.find_element_by_css_selector("#login-submit").click()
| 37.233333
| 84
| 0.691137
|
d9e829ad59dafed82129cda3482353b120cadca7
| 3,074
|
py
|
Python
|
selfdrive/can/tests/test_packer_hyundai.py
|
matthewklinko/openpilot
|
b0563a59684d0901f99abbb58ac1fbd729ded1f9
|
[
"MIT"
] | 4
|
2019-02-12T03:06:31.000Z
|
2020-07-17T03:54:46.000Z
|
selfdrive/can/tests/test_packer_hyundai.py
|
matthewklinko/openpilot
|
b0563a59684d0901f99abbb58ac1fbd729ded1f9
|
[
"MIT"
] | 3
|
2020-09-08T07:21:59.000Z
|
2020-09-08T07:22:07.000Z
|
selfdrive/can/tests/test_packer_hyundai.py
|
matthewklinko/openpilot
|
b0563a59684d0901f99abbb58ac1fbd729ded1f9
|
[
"MIT"
] | 4
|
2019-05-21T19:02:46.000Z
|
2020-03-24T14:27:45.000Z
|
import unittest
import random
from selfdrive.can.tests.packer_old import CANPacker as CANPackerOld
from selfdrive.can.packer import CANPacker
import selfdrive.car.hyundai.hyundaican as hyundaican
from selfdrive.car.hyundai.values import CHECKSUM as hyundai_checksum
class TestPackerMethods(unittest.TestCase):
def setUp(self):
self.hyundai_cp_old = CANPackerOld("hyundai_kia_generic")
self.hyundai_cp = CANPacker("hyundai_kia_generic")
def test_correctness(self):
# Test all commands, randomize the params.
for _ in xrange(1000):
# Hyundai
car_fingerprint = hyundai_checksum["crc8"][0]
apply_steer = (random.randint(0, 2) % 2 == 0)
steer_req = (random.randint(0, 2) % 2 == 0)
cnt = random.randint(0, 65536)
enabled = (random.randint(0, 2) % 2 == 0)
lkas11 = {
"CF_Lkas_LdwsSysState": random.randint(0,65536),
"CF_Lkas_SysWarning": random.randint(0,65536),
"CF_Lkas_LdwsLHWarning": random.randint(0,65536),
"CF_Lkas_LdwsRHWarning": random.randint(0,65536),
"CF_Lkas_HbaLamp": random.randint(0,65536),
"CF_Lkas_FcwBasReq": random.randint(0,65536),
"CF_Lkas_ToiFlt": random.randint(0,65536),
"CF_Lkas_HbaSysState": random.randint(0,65536),
"CF_Lkas_FcwOpt": random.randint(0,65536),
"CF_Lkas_HbaOpt": random.randint(0,65536),
"CF_Lkas_FcwSysState": random.randint(0,65536),
"CF_Lkas_FcwCollisionWarning": random.randint(0,65536),
"CF_Lkas_FusionState": random.randint(0,65536),
"CF_Lkas_FcwOpt_USM": random.randint(0,65536),
"CF_Lkas_LdwsOpt_USM": random.randint(0,65536)
}
hud_alert = random.randint(0, 65536)
keep_stock = (random.randint(0, 2) % 2 == 0)
m_old = hyundaican.create_lkas11(self.hyundai_cp_old, car_fingerprint, apply_steer, steer_req, cnt, enabled,
lkas11, hud_alert, keep_stock)
m = hyundaican.create_lkas11(self.hyundai_cp, car_fingerprint, apply_steer, steer_req, cnt, enabled,
lkas11, hud_alert, keep_stock)
self.assertEqual(m_old, m)
clu11 = {
"CF_Clu_CruiseSwState": random.randint(0,65536),
"CF_Clu_CruiseSwMain": random.randint(0,65536),
"CF_Clu_SldMainSW": random.randint(0,65536),
"CF_Clu_ParityBit1": random.randint(0,65536),
"CF_Clu_VanzDecimal": random.randint(0,65536),
"CF_Clu_Vanz": random.randint(0,65536),
"CF_Clu_SPEED_UNIT": random.randint(0,65536),
"CF_Clu_DetentOut": random.randint(0,65536),
"CF_Clu_RheostatLevel": random.randint(0,65536),
"CF_Clu_CluInfo": random.randint(0,65536),
"CF_Clu_AmpInfo": random.randint(0,65536),
"CF_Clu_AliveCnt1": random.randint(0,65536),
}
button = random.randint(0, 65536)
m_old = hyundaican.create_clu11(self.hyundai_cp_old, clu11, button)
m = hyundaican.create_clu11(self.hyundai_cp, clu11, button)
self.assertEqual(m_old, m)
if __name__ == "__main__":
unittest.main()
| 43.295775
| 114
| 0.671438
|
e65fbaf3f09e2c7b9ea57f614d8ea7cd1de1fc9c
| 6,581
|
py
|
Python
|
python/WAMFactorGraphExample.py
|
kalyanvasudev/gpmp2
|
1ee99c743d978ab20dc804c8cd9cfa7813084957
|
[
"BSD-3-Clause"
] | null | null | null |
python/WAMFactorGraphExample.py
|
kalyanvasudev/gpmp2
|
1ee99c743d978ab20dc804c8cd9cfa7813084957
|
[
"BSD-3-Clause"
] | null | null | null |
python/WAMFactorGraphExample.py
|
kalyanvasudev/gpmp2
|
1ee99c743d978ab20dc804c8cd9cfa7813084957
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from gtsam import *
from gpmp2 import *
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D #<-- Note the capitalization!
from gpmp_utils.generate3Ddataset import generate3Ddataset
from gpmp_utils.signedDistanceField3D import signedDistanceField3D
from gpmp_utils.generateArm import generateArm
from gpmp_utils.plotMap3D import plotMap3D
from gpmp_utils.plotRobotModel import plotRobotModel
from gpmp_utils.set3DPlotRange import set3DPlotRange
from gpmp_utils.plotArm import plotArm
# dataset
dataset = generate3Ddataset('WAMDeskDataset')
origin = np.asarray([dataset.origin_x, dataset.origin_y, dataset.origin_z])
origin_point3 = Point3(origin)
cell_size = dataset.cell_size
# sdf
print('calculating signed distance field ...');
field = signedDistanceField3D(dataset.map, dataset.cell_size)
print('calculating signed distance field done')
# arm: WAM arm
arm = generateArm('WAMArm')
start_conf = np.asarray([-0.8,-1.70,1.64,1.29,1.1,-0.106,2.2])
end_conf = np.asarray([-0.0,0.94,0,1.6,0,-0.919,1.55])
start_vel = np.zeros(7)
end_vel = np.zeros(7)
# plot problem setting
figure0 = plt.figure(0)
axis0 = Axes3D(figure0)
axis0.set_title('Problem Settings')
set3DPlotRange(figure0, axis0, dataset)
plotRobotModel(figure0, axis0, arm, start_conf)
plotRobotModel(figure0, axis0, arm, end_conf)
plotMap3D(figure0, axis0, dataset.corner_idx, origin, cell_size)
## settings
total_time_sec = 2.0
total_time_step = 10
total_check_step = 100
delta_t = total_time_sec / total_time_step
check_inter = total_check_step / total_time_step - 1
# GP
Qc = np.identity(7)
Qc_model = noiseModel_Gaussian.Covariance(Qc)
# algo settings
cost_sigma = 0.02
epsilon_dist = 0.2
# noise model
fix_sigma = 0.0001
pose_fix_model = noiseModel_Isotropic.Sigma(7, fix_sigma)
vel_fix_model = noiseModel_Isotropic.Sigma(7, fix_sigma)
# init sdf
sdf = SignedDistanceField(origin_point3, cell_size, field.shape[0],
field.shape[1], field.shape[2])
for z in range(field.shape[2]):
sdf.initFieldData(z, field[:,:,z]) #TODO: check this line with its matlab counterpart
#% plot settings
plot_inter_traj = False
plot_inter = 4
if plot_inter_traj:
total_plot_step = total_time_step * (plot_inter + 1)
else:
total_plot_step = total_time_step
pause_time = total_time_sec / total_plot_step
## initial traj
init_values = initArmTrajStraightLine(start_conf, end_conf, total_time_step)
# plot initial traj
if plot_inter_traj:
plot_values = interpolateArmTraj(init_values, Qc_model, delta_t, plot_inter)
else:
plot_values = init_values
# plot init values
figure1 = plt.figure(1)
axis1 = Axes3D(figure1)
axis1.set_title('Initial Values')
# plot world
plotMap3D(figure1, axis1, dataset.corner_idx, origin, cell_size)
set3DPlotRange(figure1, axis1, dataset)
for i in range(total_plot_step):
conf = plot_values.atVector(symbol(ord('x'), i));
plotArm(figure1, axis1, arm.fk_model(), conf, 'b', 2)
plt.pause(pause_time)
## init optimization
graph = NonlinearFactorGraph()
graph_obs = NonlinearFactorGraph()
for i in range(total_time_step+1):
key_pos = symbol(ord('x'), i)
key_vel = symbol(ord('v'), i)
# priors
if i==0:
graph.push_back(PriorFactorVector(key_pos, start_conf, pose_fix_model))
graph.push_back(PriorFactorVector(key_vel, start_vel, vel_fix_model))
elif i==total_time_step:
graph.push_back(PriorFactorVector(key_pos, end_conf, pose_fix_model))
graph.push_back(PriorFactorVector(key_vel, end_vel, vel_fix_model))
# GP priors and cost factor
if i > 0:
key_pos1 = symbol(ord('x'), i-1)
key_pos2 = symbol(ord('x'), i)
key_vel1 = symbol(ord('v'), i-1)
key_vel2 = symbol(ord('v'), i)
graph.push_back(GaussianProcessPriorLinear(key_pos1, key_vel1,
key_pos2, key_vel2, delta_t, Qc_model))
# cost factor
graph.push_back(ObstacleSDFFactorArm(
key_pos, arm, sdf, cost_sigma, epsilon_dist))
graph_obs.push_back(ObstacleSDFFactorArm(
key_pos, arm, sdf, cost_sigma, epsilon_dist))
# GP cost factor
if check_inter > 0:
for j in range(1, check_inter+1):
tau = j * (total_time_sec / total_check_step)
graph.push_back(ObstacleSDFFactorGPArm(
key_pos1, key_vel1, key_pos2, key_vel2,
arm, sdf, cost_sigma, epsilon_dist,
Qc_model, delta_t, tau))
graph_obs.push_back(ObstacleSDFFactorGPArm(
key_pos1, key_vel1, key_pos2, key_vel2,
arm, sdf, cost_sigma, epsilon_dist,
Qc_model, delta_t, tau))
## optimize!
use_LM = False
use_trustregion_opt = True
if use_LM:
parameters = LevenbergMarquardtParams() # Todo: check why this fails
parameters.setVerbosity('ERROR')
#parameters.setVerbosityLM('LAMBDA');
parameters.setlambdaInitial(1000.0)
optimizer = LevenbergMarquardtOptimizer(graph, init_values, parameters)
elif use_trustregion_opt:
parameters = DoglegParams()
parameters.setVerbosity('ERROR')
optimizer = DoglegOptimizer(graph, init_values, parameters)
else:
parameters = GaussNewtonParams()
parameters.setVerbosity('ERROR')
optimizer = GaussNewtonOptimizer(graph, init_values, parameters)
print('Initial Error = %d\n', graph.error(init_values))
print('Initial Collision Cost: %d\n', graph_obs.error(init_values))
optimizer.optimizeSafely()
result = optimizer.values()
print('Error = %d\n', graph.error(result))
print('Collision Cost End: %d\n', graph_obs.error(result))
# plot results
if plot_inter_traj:
plot_values = interpolateArmTraj(result, Qc_model, delta_t, plot_inter)
else:
plot_values = result
# plot final values
figure2 = plt.figure(2)
axis2 = Axes3D(figure2)
axis2.set_title('Result Values')
plotMap3D(figure2, axis2, dataset.corner_idx, origin, cell_size)
set3DPlotRange(figure2, axis2, dataset)
for i in range(total_plot_step):
conf = plot_values.atVector(symbol(ord('x'), i))
plotArm(figure2, axis2, arm.fk_model(), conf, 'b', 2)
plt.pause(pause_time)
# plot final values
figure3 = plt.figure(3)
axis3 = Axes3D(figure3)
axis3.set_title('Result Values')
plotMap3D(figure3, axis3, dataset.corner_idx, origin, cell_size)
set3DPlotRange(figure3, axis3, dataset)
for i in range(total_plot_step):
conf = plot_values.atVector(symbol(ord('x'), i))
plotRobotModel(figure3, axis3, arm, conf)
plt.pause(pause_time)
plt.show()
| 31.792271
| 89
| 0.718128
|
93242fcbe48ee00db7896aad48c99e68ee145d74
| 1,189
|
py
|
Python
|
services/backend/project/__init__.py
|
darshant5/vuejs-flask-docker
|
b17ce0000e2d8a9288821448c8b0238630d84c52
|
[
"MIT"
] | 16
|
2020-05-03T19:58:58.000Z
|
2021-08-23T19:37:13.000Z
|
services/backend/project/__init__.py
|
darshant5/vuejs-flask-docker
|
b17ce0000e2d8a9288821448c8b0238630d84c52
|
[
"MIT"
] | 3
|
2021-09-01T20:34:17.000Z
|
2022-02-27T18:09:27.000Z
|
services/backend/project/__init__.py
|
darshant5/vuejs-flask-docker
|
b17ce0000e2d8a9288821448c8b0238630d84c52
|
[
"MIT"
] | 13
|
2020-05-03T22:00:30.000Z
|
2021-06-20T02:44:20.000Z
|
# services/aktiver/project/__init__.py
import os
from flask import Flask
from flask_admin import Admin
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
from flask_bcrypt import Bcrypt
#import sentry_sdk
#from sentry_sdk.integrations.flask import FlaskIntegration
# instantiate the extensions
db = SQLAlchemy()
cors = CORS()
bcrypt = Bcrypt()
admin = Admin(template_mode="bootstrap3")
def create_app(script_info=None):
# Initialize Sentry.io
#sentry_sdk.init(
# dsn="http://bb28b1aa56e8475e964719f4f3416e63@sentry:9000/1",
# integrations=[FlaskIntegration()]
#)
# instantiate the app
app = Flask(__name__)
# set config
app_settings = os.getenv("APP_SETTINGS")
app.config.from_object(app_settings)
# set up extensions
db.init_app(app)
cors.init_app(app, resources={r"*": {"origins": "*"}})
bcrypt.init_app(app)
if os.getenv("FLASK_ENV") == "development":
admin.init_app(app)
# register api
from project.api import api
api.init_app(app)
# shell context for flask cli
@app.shell_context_processor
def ctx():
return {"app": app, "db": db}
return app
| 21.618182
| 69
| 0.695542
|
c1dd16119721b27b417a5fecc9452db2418e68db
| 4,327
|
py
|
Python
|
src/pbrdna/barcode/trim_barcodes.py
|
bioinformaticsgeek/rDnaTools
|
54998b1f1305e80c8e4681c0473ff27021cdc9c0
|
[
"BSD-3-Clause"
] | 9
|
2015-10-02T10:57:19.000Z
|
2021-05-07T09:41:45.000Z
|
src/pbrdna/barcode/trim_barcodes.py
|
bioinformaticsgeek/rDnaTools
|
54998b1f1305e80c8e4681c0473ff27021cdc9c0
|
[
"BSD-3-Clause"
] | null | null | null |
src/pbrdna/barcode/trim_barcodes.py
|
bioinformaticsgeek/rDnaTools
|
54998b1f1305e80c8e4681c0473ff27021cdc9c0
|
[
"BSD-3-Clause"
] | 14
|
2015-01-30T06:56:12.000Z
|
2020-12-28T17:44:32.000Z
|
#! /usr/bin/env python
import csv, sys, log
from collections import namedtuple
from pbcore.io.FastaIO import FastaReader, FastaWriter, FastaRecord
from pbcore.io.FastqIO import FastqReader, FastqWriter, FastqRecord
barcode = namedtuple('barcode', 'id strand seen5 seenA seen3 end5 endA end3 primer')
log = log.getLogger()
class BarcodeTrimmer( object ):
def __init__( self, input_file, barcode_file, prefix=None, filetype=None ):
self.input_file = input_file
self.barcode_file = barcode_file
self.prefix = prefix or get_prefix( input_file )
self.filetype = filetype or get_filetype( input_file )
self.positions = {}
def run( self ):
self.parse_barcode_data()
self.open_reader()
self.open_writer()
self.trim_sequences()
def parse_barcode_data( self ):
with open( self.barcode_file ) as handle:
for entry in map(barcode._make, csv.reader(handle, delimiter='\t')):
if entry.id == 'ID':
continue
start = None if entry.end5 == 'NA' else int(entry.end5)
end = None if entry.end3 == 'NA' else int(entry.end3)
self.positions[entry.id] = (start, end)
def open_reader( self ):
if self.filetype == 'fasta':
self.reader = FastaReader( self.input_file )
elif self.filetype == 'fastq':
self.reader = FastqReader( self.input_file )
def open_writer( self ):
if self.filetype == 'fasta':
output_file = '%s.trim.fasta' % self.prefix
self.writer = FastaWriter( output_file )
elif self.filetype == 'fastq':
output_file = '%s.trim.fastq' % self.prefix
self.writer = FastqWriter( output_file )
def trim_sequences( self ):
for record in self.reader:
try:
start, end = self.positions[record.name]
except:
msg = 'Unknown sequence record "%s"!' % record.name
log.error( msg )
raise ValueError( msg )
trimmed_record = trim_record( record, start, end )
self.writer.writeRecord( trimmed_record )
def trim_record( record, start, end ):
if isinstance(record, FastaRecord):
return trim_fasta_record( record, start, end )
elif isinstance(record, FastqRecord):
return trim_fastq_record( record, start, end )
else:
msg = 'Unrecognized record type "%s"' % type(record)
log.error( msg )
raise TypeError( msg )
def trim_fasta_record( record, start, end ):
if start is None and end is None:
trimmed_sequence = record.sequence
elif start is None:
trimmed_sequence = record.sequence[:end]
elif end is None:
trimmed_sequence = record.sequence[start:]
else:
trimmed_sequence = record.sequence[start:end]
return FastaRecord( record.name,
trimmed_sequence )
def trim_fastq_record( record, start, end ):
if start is None and end is None:
trimmed_sequence = record.sequence
trimmed_quality = record.quality
elif start is None:
trimmed_sequence = record.sequence[:end]
trimmed_quality = record.quality[:end]
elif end is None:
trimmed_sequence = record.sequence[start:]
trimmed_quality = record.quality[start:]
else:
trimmed_sequence = record.sequence[start:end]
trimmed_quality = record.quality[start:end]
return FastqRecord( record.name,
trimmed_sequence,
trimmed_quality )
def get_prefix( filename ):
return '.'.join( filename.split('.')[:-1] )
def get_filetype( filename ):
if (filename.lower().endswith( '.fa' ) or
filename.lower().endswith( '.fsa' ) or
filename.lower().endswith( '.fasta' )):
return 'fasta'
elif (filename.lower().endswith( '.fq' ) or
filename.lower().endswith( '.fastq' )):
return 'fastq'
else:
msg = 'Input file is not a recognized filetype!'
log.error( msg )
raise TypeError( msg )
if __name__ == '__main__':
log.basicConfig( level=log.INFO )
sequence_file = sys.argv[1]
barcode_file = sys.argv[2]
BarcodeTrimmer( sequence_file, barcode_file ).run()
| 35.178862
| 84
| 0.614745
|
bff0b44dcd50e1c00966fffa92895e2a39924086
| 6,069
|
py
|
Python
|
google/appengine/runtime/apiproxy.py
|
Arachnid/google_appengine
|
2e950619f5027f414131fafc3cc253af4875a0fe
|
[
"Apache-2.0"
] | 1
|
2016-05-09T12:41:59.000Z
|
2016-05-09T12:41:59.000Z
|
google/appengine/runtime/apiproxy.py
|
Arachnid/google_appengine
|
2e950619f5027f414131fafc3cc253af4875a0fe
|
[
"Apache-2.0"
] | null | null | null |
google/appengine/runtime/apiproxy.py
|
Arachnid/google_appengine
|
2e950619f5027f414131fafc3cc253af4875a0fe
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Makes API calls to various Google-provided services.
Provides methods for making calls into Google Apphosting services and APIs
from your application code. This code will only work properly from within
the Google Apphosting environment.
"""
import sys
from google.net.proto import ProtocolBuffer
from google.appengine import runtime
from google.appengine.api import apiproxy_rpc
from google3.apphosting.runtime import _apphosting_runtime___python__apiproxy
from google.appengine.runtime import apiproxy_errors
OK = 0
RPC_FAILED = 1
CALL_NOT_FOUND = 2
ARGUMENT_ERROR = 3
DEADLINE_EXCEEDED = 4
CANCELLED = 5
APPLICATION_ERROR = 6
OTHER_ERROR = 7
OVER_QUOTA = 8
REQUEST_TOO_LARGE = 9
CAPABILITY_DISABLED = 10
_ExceptionsMap = {
RPC_FAILED:
(apiproxy_errors.RPCFailedError,
"The remote RPC to the application server failed for the call %s.%s()."),
CALL_NOT_FOUND:
(apiproxy_errors.CallNotFoundError,
"The API package '%s' or call '%s()' was not found."),
ARGUMENT_ERROR:
(apiproxy_errors.ArgumentError,
"An error occurred parsing (locally or remotely) the arguments to %s.%s()."),
DEADLINE_EXCEEDED:
(apiproxy_errors.DeadlineExceededError,
"The API call %s.%s() took too long to respond and was cancelled."),
CANCELLED:
(apiproxy_errors.CancelledError,
"The API call %s.%s() was explicitly cancelled."),
OTHER_ERROR:
(apiproxy_errors.Error,
"An error occurred for the API request %s.%s()."),
OVER_QUOTA:
(apiproxy_errors.OverQuotaError,
"The API call %s.%s() required more quota than is available."),
REQUEST_TOO_LARGE:
(apiproxy_errors.RequestTooLargeError,
"The request to API call %s.%s() was too large."),
}
class RPC(apiproxy_rpc.RPC):
"""A RPC object, suitable for talking to remote services.
Each instance of this object can be used only once, and should not be reused.
Stores the data members and methods for making RPC calls via the APIProxy.
"""
def __init__(self, *args, **kargs):
"""Constructor for the RPC object. All arguments are optional, and
simply set members on the class. These data members will be
overriden by values passed to MakeCall.
"""
super(RPC, self).__init__(*args, **kargs)
self.__result_dict = {}
def _WaitImpl(self):
"""Waits on the API call associated with this RPC. The callback,
if provided, will be executed before Wait() returns. If this RPC
is already complete, or if the RPC was never started, this
function will return immediately.
Raises:
InterruptedError if a callback throws an uncaught exception.
"""
try:
rpc_completed = _apphosting_runtime___python__apiproxy.Wait(self)
except (runtime.DeadlineExceededError, apiproxy_errors.InterruptedError):
raise
except:
exc_class, exc, tb = sys.exc_info()
if (isinstance(exc, SystemError) and
exc.args[0] == 'uncaught RPC exception'):
raise
rpc = None
if hasattr(exc, "_appengine_apiproxy_rpc"):
rpc = exc._appengine_apiproxy_rpc
new_exc = apiproxy_errors.InterruptedError(exc, rpc)
raise new_exc.__class__, new_exc, tb
return True
def _MakeCallImpl(self):
assert isinstance(self.request, ProtocolBuffer.ProtocolMessage)
assert isinstance(self.response, ProtocolBuffer.ProtocolMessage)
e = ProtocolBuffer.Encoder()
self.request.Output(e)
self.__state = RPC.RUNNING
_apphosting_runtime___python__apiproxy.MakeCall(
self.package, self.call, e.buffer(), self.__result_dict,
self.__MakeCallDone, self, deadline=(self.deadline or -1))
def __MakeCallDone(self):
self.__state = RPC.FINISHING
if self.__result_dict['error'] == APPLICATION_ERROR:
self.__exception = apiproxy_errors.ApplicationError(
self.__result_dict['application_error'],
self.__result_dict['error_detail'])
elif self.__result_dict['error'] == CAPABILITY_DISABLED:
if self.__result_dict['error_detail']:
self.__exception = apiproxy_errors.CapabilityDisabledError(
self.__result_dict['error_detail'])
else:
self.__exception = apiproxy_errors.CapabilityDisabledError(
"The API call %s.%s() is temporarily unavailable." % (
self.package, self.call))
elif self.__result_dict['error'] in _ExceptionsMap:
exception_entry = _ExceptionsMap[self.__result_dict['error']]
self.__exception = exception_entry[0](
exception_entry[1] % (self.package, self.call))
else:
try:
self.response.ParseFromString(self.__result_dict['result_string'])
except Exception, e:
self.__exception = e
self.__Callback()
def CreateRPC():
"""Create a RPC instance. suitable for talking to remote services.
Each RPC instance can be used only once, and should not be reused.
Returns:
an instance of RPC object
"""
return RPC()
def MakeSyncCall(package, call, request, response):
"""Makes a synchronous (i.e. blocking) API call within the specified
package for the specified call method. request and response must be the
appropriately typed ProtocolBuffers for the API call. An exception is
thrown if an error occurs when communicating with the system.
Args:
See MakeCall above.
Raises:
See CheckSuccess() above.
"""
rpc = CreateRPC()
rpc.MakeCall(package, call, request, response)
rpc.Wait()
rpc.CheckSuccess()
| 32.983696
| 80
| 0.71478
|
a118be33fa0aee4d41163288a6d9327f8bdff895
| 8,865
|
py
|
Python
|
login.py
|
boopo/newCumtLogin
|
e873409c20eaae702e7f49c629521e3324a40613
|
[
"Apache-2.0"
] | 4
|
2020-12-16T14:23:32.000Z
|
2021-04-10T08:52:56.000Z
|
login.py
|
boopo/NewCumtLogin
|
e873409c20eaae702e7f49c629521e3324a40613
|
[
"Apache-2.0"
] | null | null | null |
login.py
|
boopo/NewCumtLogin
|
e873409c20eaae702e7f49c629521e3324a40613
|
[
"Apache-2.0"
] | 1
|
2022-03-24T08:19:35.000Z
|
2022-03-24T08:19:35.000Z
|
from time import time
import requests
from bs4 import BeautifulSoup
from encrypt import get_token
#from settings import username, password
url_login = 'http://authserver.cumt.edu.cn/authserver/login?service=http%3A//portal.cumt.edu.cn/casservice' # 登录
url_post = 'http://authserver.cumt.edu.cn/authserver/login?service=http%3A%2F%2Fportal.cumt.edu.cn%2Fcasservice' # 提交表单
url_balance = 'http://portal.cumt.edu.cn/ykt/balance' # 校园卡余额(未跳转)
url_balance_re1 = 'http://ykt.cumt.edu.cn:8088/ias/prelogin?sysid=FWDT' # 一卡通跳转1
url_balance_re2 = 'http://ykt.cumt.edu.cn/cassyno/index' # 一卡通跳转2
url_balance_history = 'http://ykt.cumt.edu.cn/Report/GetPersonTrjn' # 一卡通流水按时间查询
url_balance2 = 'http://portal.cumt.edu.cn/ykt/flow?flow_num=20' # 校园卡按逆序查询(未跳转)
url_balance_charge = 'http://ykt.cumt.edu.cn/User/Account_Pay' # 校园卡充值(慎用!!!)
url_library = 'http://portal.cumt.edu.cn/portal/api/v1/api/http/40' # 图书简单信息(未跳转)
url_library_re = 'http://121.248.104.188:8080/CASSSO/login.jsp' # 图书馆认证跳转
url_library_Loan = 'https://findcumt.libsp.com/find/loanInfo/loanList' # 图书馆当前借阅信息
url_library_loan_history = 'https://findcumt.libsp.com/find/loanInfo/loanHistoryList' # 图书馆历史借阅信息
url_library_favorite = 'https://findcumt.libsp.com/find/favorites/recordList' # 图书馆收藏列表
url_jwxt_login1 = 'http://jwxt.cumt.edu.cn/sso/jziotlogin' #教务系统跳转
url_jwxt_login2 = 'http://authserver.cumt.edu.cn/authserver/login?service=http%3A%2F%2Fjwxt.cumt.edu.cn%2Fsso%2Fjziotlogin'
url_jwxt_login3 = 'http://jwxt.cumt.edu.cn/sso/jziotlogin?ticket=ST-1138058-zPXeUMJe-H8kQpqweT0PHkbJt98Wisedu-New-IDS1'
headers = {
'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20100101 FireFox / 29.0",
"X-Requested-With": "XMLHttpRequest"
}
class newIds:
def __init__(self, username, password):
self.username = username
self.password = password
self.session = requests.session()
def login(self):
r = self.session.get(url=url_login, headers=headers)
soup = BeautifulSoup(r.text, 'html5lib')
salt = soup.find('input', id='pwdEncryptSalt')['value']
execution = soup.find('input', id='execution')['value']
salt_pwd = get_token(self.password, salt)
form_login = {
'username': self.username,
'password': salt_pwd,
'_eventId': 'submit',
'cllt': 'userNameLogin',
'execution': execution
}
rs = self.session.post(url=url_post, data=form_login, headers=headers, allow_redirects=False)
if rs.status_code == 302:
url_re = rs.headers['Location']
rss = self.session.get(url=url_re)
l1 = []
for a in self.session.cookies:
l1.append(a.value)
return True
else:
return False
def get_jwxt(self):
headers = {
'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20100101 FireFox / 29.0",
"X-Requested-With": "XMLHttpRequest"
}
r = self.session.get(url=url_jwxt_login2, headers=headers, allow_redirects= False)
if r.status_code == 302:
u1 = r.headers['Location']
r1 = self.session.get(url=u1, headers=headers)
l1 = []
for a in self.session.cookies:
l1.append(a.value)
print("教务系统所用Cookie"+l1[5])
print(l1)
else:
return False
def get_balance_simple(self):
r = self.session.get(url=url_balance, headers=headers)
return r.json()
def get_balance_history_simple(self):
r = self.session.get(url=url_balance2, headers=headers)
return r.json()
def get_balance_history_pro(self, sdata='2020-11-09', edate='2020-12-09', account='119192', page='1', rows='15'):
r = self.session.get(url=url_balance_re1, headers=headers)
soup = BeautifulSoup(r.text, 'html5lib')
token = soup.find('input', id='ssoticketid')['value']
form = {
"errorcode": 1,
"continueurl": '',
"ssoticketid": token
}
r1 = self.session.post(url=url_balance_re2, headers=headers, data=form)
l1 = []
for s in self.session.cookies:
l1.append(s)
# print("一卡通流水所用cookie", l1[7])
form_balance = {
"sdate": sdata,
"edate": edate,
"account": account,
"page": page,
"rows": rows
}
r2 = self.session.post(url=url_balance_history, headers=headers, data=form_balance)
return r2.json()
def get_library_simple(self):
r = self.session.get(url=url_library, headers=headers)
return r.json()
def get_library_token(self):
r = self.session.get(url=url_library_re, headers=headers, allow_redirects=False)
r2 = self.session.get(url=r.headers['Location'], headers=headers, allow_redirects=False)
r3 = self.session.get(url=r2.headers['Location'], headers=headers, allow_redirects=False)
r4 = self.session.get(url=r3.headers['Location'], headers=headers, allow_redirects=False)
print(r4.status_code)
print(r4.headers)
# 重定向出现问题,手动跳转获取token, 反正以后也得拆分。。。
# print('图书馆所用的jwtOpacAuth为:', r4.headers['Location'][43:-12])
return r4.headers['Location'][43:-12]
# account为卡号, tranamt单位为分,也就是100为1元
# 复用部分get_balance_history_pro, 应该用缓存来解决,但这只是个demo
def get_balance_charge(self, tranamt='100'):
# 获取Cookie
r = self.session.get(url=url_balance_re1, headers=headers)
soup = BeautifulSoup(r.text, 'html5lib')
token = soup.find('input', id='ssoticketid')['value']
form = {
"errorcode": 1,
"continueurl": '',
"ssoticketid": token
}
r1 = self.session.post(url=url_balance_re2, headers=headers, data=form)
l1 = []
for s in self.session.cookies:
l1.append(s)
# 获取卡号
r2 = self.session.get(url=url_balance, headers=headers)
account = r2.json()['data']['ZH']
# 以下为本次表单提交
header = {
"Referer": "http://ykt.cumt.edu.cn/Page/Page",
"Cookie": 'hallticket='+l1[6].value
}
form_charge = {
"account": account,
"acctype": "23%23%23",
"tranamt": tranamt,
"qpwd": "",
"paymethod": "2",
"paytype": "%E4%BD%BF%E7%94%A8%E7%BB%91%E5%AE%9A%E7%9A%84%E9%BB%98%E8%AE%A4%E8%B4%A6%E5%8F%B7",
"client_type": "web"
}
r3 = requests.post(url=url_balance_charge, headers=header, data=form_charge)
return r3.text
# 以下不需要request.session会话保持
# 请不要频繁请求图书馆,http连接池会满,可以令verify为Flase,或connection 为 close
# 有时候,学校的光缆被挖断后,会出现这种情况(手动狗头)
class libIds:
def __init__(self, jwt_token):
self.token = jwt_token
self.headers = {
'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20100101 FireFox / 29.0",
"X-Requested-With": "XMLHttpRequest",
"jwtOpacAuth": jwt_token,
"Referer": "https://findcumt.libsp.com/",
"Connection": "close"
}
def get_library_list(self, page='1', rows='20'): # page 页吗 rows 每页行数
form = {
"page": page,
"rows": rows
}
r = requests.post(url=url_library_Loan, headers=self.headers, json=form, verify=False)
return r.json()
def get_library_history_list(self, page='1', rows='20'):
form = {
"page": page,
"rows": rows
}
r = requests.post(url=url_library_loan_history, headers=self.headers, json=form, verify=False)
return r.json()
def get_library_favorite(self, page='1', rows='10'):
form = {
"favoritesId": "",
"page": page,
"rows": rows,
"searchField": "title",
"searchFieldContent": ""
}
r = requests.post(url=url_library_favorite, headers=self.headers, json=form, verify=False)
return r.json()
if __name__ == '__main__':
# username为学号, password为密码
a = newIds("08193109", "xxxxx")
a.login()
# a.get_jwxt()
# # 校园卡余额
# print(a.get_balance_simple())
#
# # 校园卡充值
# print(a.get_balance_charge())
#
# # 校园卡历史流水(按时间查)
# print(a.get_balance_history_pro())
#
# # 获取图书馆借阅信息(简约)
# print(a.get_library_simple())
#
# # 校园卡流水(按消费记录逆序)
# print(a.get_balance_simple())
# 获取图书馆的jwt_token
# token = a.get_library_token()
#
# b = libIds(token)
# #
# # # 获取目前的图书借阅信息
# # print(b.get_library_list())
#
# # 获取图书馆的历史借阅信息
# print(b.get_library_history_list())
# t4 = time()
# print(t4-t2)
#
# # 图书馆默认收藏信息
# print(b.get_library_favorite())
| 35.46
| 123
| 0.603158
|
b46ac68483f0f755f071354498c1d93e75816b25
| 6,306
|
py
|
Python
|
PyREMOT/tests/test_rmt_N2_CH4.py
|
sinagilassi/rmt-app
|
bbd5bb496f36116ecec15d75b4133a43a9233aaa
|
[
"MIT"
] | null | null | null |
PyREMOT/tests/test_rmt_N2_CH4.py
|
sinagilassi/rmt-app
|
bbd5bb496f36116ecec15d75b4133a43a9233aaa
|
[
"MIT"
] | null | null | null |
PyREMOT/tests/test_rmt_N2_CH4.py
|
sinagilassi/rmt-app
|
bbd5bb496f36116ecec15d75b4133a43a9233aaa
|
[
"MIT"
] | null | null | null |
# TEST
# STATIC MODELING
# ----------------
# REVIEW
# check unit
# flowrate [mol/s]
# rate formation [mol/m^3.s]
# import packages/modules
import numpy as np
import math
import json
from data import *
from core import constants as CONST
from rmt import rmtExe
from core.utilities import roundNum
from docs.rmtUtility import rmtUtilityClass as rmtUtil
# NOTE
### operating conditions ###
# pressure [Pa]
P = 3*1e5
# temperature [K]
T = 973
# operation period [s]
opT = 10
# NOTE
### reactions ###
# component all
compList = ["CH4", "C2H4", "H2"]
# reactions
# ignore: "R2": "CH4 <=> C + 2H2",
reactionSet = {
"R1": "2CH4 <=> C2H4 + 2H2",
}
# set feed mole fraction
MoFr_H2 = 0.05
MoFr_C2H4 = 0.05
MoFr_CH4 = 1 - (MoFr_H2 + MoFr_C2H4)
# inlet fixed bed superficial gas velocity [m/s]
SuGaVe = 0.01
# NOTE
### reactor ###
# voidage of the fixed bed
rea_por = 0.39
# solid fraction
rea_sol = 1-rea_por
# catalyst particle density (per catalyst volume) [kg/m^3]
cat_rho = 1982
# bulk density (per reactor volume) [kg/m^3]
bulk_rho = cat_rho*rea_sol
# fraction of solids
rea_solid = 1-rea_por
# reactor diameter [m]
rea_dia = 0.007
# reactor radius [m]
rea_rad = rea_dia/2
# reactor length [m]
rea_len = 1 # 0.011
# reactor cross sectional area [m^2]
rea_Ac = CONST.PI_CONST*(rea_rad**2)
# reactor volume [m^3]
rea_vol = (CONST.PI_CONST*(rea_rad**2)*rea_len)
# bulk density [kg/m^3]
bulk_rho0 = bulk_rho
bulk_rho1 = 260
# catalyst mass [kg]
cat_m = bulk_rho1*rea_vol
# reactor volume (real) [m^3]
rea_vol0 = rea_vol*rea_por
# catalyst heat capacity at constant pressure [J/kg·K]
cat_cp = 960
# catalyst thermal conductivity [J/s.m.K]
cat_ThCo = 0.22
# catalyst bed volume [m^3]
catBed_Vol = rea_vol*rea_solid
# NOTE
# reactor
# reactor volume [m^3]
ReVo = 5
# reactor length [m]
ReLe = rea_len
# reactor inner diameter [m]
# ReInDi = math.sqrt(ReVo/(ReLe*CONST.PI_CONST))
ReInDi = rea_dia
# particle dimeter [m]
PaDi = cat_d
# particle density [kg/m^3]
CaDe = cat_rho
# particle specific heat capacity [kJ/kg.K]
CaSpHeCa = cat_cp/1000
# catalyst bed dencity [kg/m^3]
CaBeDe = bulk_rho
# NOTE
### calculate ###
# mole fraction
MoFri0 = np.array([MoFr_CH4, MoFr_C2H4, MoFr_H2])
# concentration [kmol/m3]
ct0 = calConcentration(MoFri0, P, T, 'kmol/m^3')
# conversion
ct0_CONV = 1e3*ct0
# total concentration [kmol/m3]
ct0T = calTotalConcentration(ct0)
# inlet fixed bed interstitial gas velocity [m/s]
InGaVe = SuGaVe/bed_por
# flux [kmol/m2.s] -> total concentration x superficial velocity
Fl0 = ct0T*SuGaVe
# cross section of reactor x porosity [m^2]
rea_CSA = rmtUtil.reactorCrossSectionArea(bed_por, ReInDi)
# real flowrate @ P & T [m^3/s]
VoFlRa = InGaVe*rea_CSA
# flowrate at STP [m^3/s]
VoFlRaSTP = rmtUtil.volumetricFlowrateSTP(VoFlRa, P, T)
# molar flowrate @ ideal gas [mol/s]
MoFlRa0 = rmtUtil.VoFlRaSTPToMoFl(VoFlRaSTP)
# initial concentration[mol/m3]
Ct0 = MoFlRa0/VoFlRa
# molar flux
MoFl0 = MoFlRa0/(rea_CSA/bed_por)
# or
MoFl0_2 = Ct0*InGaVe*bed_por
# NOTE
# external heat
# overall heat transfer coefficient [J/m^2.s.K]
U = 50
# effective heat transfer area per unit of reactor volume [m^2/m^3]
a = 4/ReInDi
# medium temperature [K]
Tm = 0
# Ua
Ua = U*a
#
externalHeat = {
"OvHeTrCo": U,
"EfHeTrAr": a,
"MeTe": Tm
}
# gas mixture viscosity [Pa.s]
GaMiVi = 1e-5
# NOTE
# reaction rates
# initial values
# varis0 = {
# # loopVars
# # T,P,NoFri,SpCoi
# # other vars
# "bulk_rho1": bulk_rho1, # [kg/m^3]
# "krTref": 2.44e-5, # [variable]
# "EA": 18.96*1000, # [J/mol]
# "KxTref": 0.87, # [1/bar]
# "dH": 87.39*1000, # [J/mol]
# "Tref": 973.15, # [K]
# "RTref": lambda x: x['R_CONST']*x['Tref'], # [J/mol]
# "tetaEj": lambda x: x['EA']/x['RTref'],
# "tetakj": lambda x: math.log(x['krTref']),
# "kj": lambda x: math.exp(x['tetakj'])*math.exp(x['tetaEj']*(1 - (x['Tref']/x['T']))),
# "tetaKi": lambda x: math.log(x['KxTref']),
# "tetaHi": lambda x: x['dH']/x['RTref'],
# "Ki": lambda x: math.exp(x['tetaKi'])*math.exp(x['tetaHi']*(1 - (x['Tref']/x['T']))),
# "y_CH4": lambda x: x['MoFri'][0]*x['P']*1e-5, # [bar]
# "rA": lambda x: math.sqrt(x['Ki']*x['y_CH4']),
# "rB": lambda x: 1 + x['rA'],
# "rC": lambda x: x['kj']*x['rA']/(x['rB']**2)
# }
# reaction rates
# rates0 = {
# # [mol/m^3.s]
# "r1": lambda x: (x['kj']*x['rA']/(x['rB']**2))*x['bulk_rho1']/60
# }
# initial values
varis0 = {
# loopVars
# T,P,NoFri,SpCoi
# other vars
# [m^3/(mol*s)]
"k0": 0.0072*1e-1,
"y_CH4": lambda x: x['MoFri'][0],
"C_CH4": lambda x: x['SpCoi'][0]
}
# reaction rates
rates0 = {
# [mol/m^3.s]
"r1": lambda x: x['k0']*(x['C_CH4']**2)
}
# reaction rate
reactionRateSet = {
"VARS": varis0,
"RATES": rates0
}
# model: M2
# model: N2
# NOTE
# model input - feed
modelInput = {
"model": "N2",
"operating-conditions": {
"pressure": P,
"temperature": T,
"period": opT,
"process-type": "non-iso-thermal"
},
"feed": {
"mole-fraction": MoFri0,
"molar-flowrate": MoFlRa0,
"molar-flux": MoFl0,
"volumetric-flowrate": VoFlRa,
"concentration": ct0_CONV,
"mixture-viscosity": GaMiVi,
"components": {
"shell": compList,
"tube": [],
"medium": []
}
},
"reactions": reactionSet,
"reaction-rates": reactionRateSet,
"external-heat": externalHeat,
"reactor": {
"ReInDi": ReInDi,
"ReLe": ReLe,
"PaDi": PaDi,
"BeVoFr": bed_por,
"CaBeDe": bulk_rho,
"CaDe": CaDe,
"CaSpHeCa": CaSpHeCa
},
"solver-config": {
"ivp": "default"
}
}
# run exe
res = rmtExe(modelInput)
# print(f"modeling result: {res}")
# save modeling result
# with open('res.json', 'w') as f:
# json.dump(res, f)
# steady-state results
# concentration
# total concentration
# ssModelingData = res['resModel']['dataYs']
# save modeling result [txt]
# np.savetxt('ssModeling.txt', ssModelingData, fmt='%.10e')
# load
# c = np.loadtxt('ssModeling.txt', dtype=np.float64)
# print("c: ", c, " c Shape: ", c.shape)
# save binary file
# np.save('ResM1.npy', ssModelingData)
# load
# b2Load = np.load('res3.npy')
# print("b2Load: ", b2Load, b2Load.shape)
| 22.847826
| 91
| 0.615128
|
c1701afc000c2244a67f85be1821be0e11a7b711
| 2,889
|
py
|
Python
|
src/cityloops/urls.py
|
metabolism-of-cities/metabolism-of-cities-platform
|
6213de146b1bc7b7c2802531fdcda1e328c32c64
|
[
"MIT"
] | 4
|
2020-10-14T15:35:07.000Z
|
2022-01-13T15:31:16.000Z
|
src/cityloops/urls.py
|
metabolism-of-cities/metabolism-of-cities-platform
|
6213de146b1bc7b7c2802531fdcda1e328c32c64
|
[
"MIT"
] | null | null | null |
src/cityloops/urls.py
|
metabolism-of-cities/metabolism-of-cities-platform
|
6213de146b1bc7b7c2802531fdcda1e328c32c64
|
[
"MIT"
] | 2
|
2021-01-07T14:39:05.000Z
|
2022-01-18T12:31:50.000Z
|
from django.urls import path
from . import views
from data import views as data
from core import views as core
from ie.urls_baseline import baseline_urlpatterns
from ie.urls_staf_baseline import baseline_staf_urlpatterns
from ie.urls_education_baseline import baseline_education_urlpatterns
from django.views.generic.base import RedirectView
app_name = "cityloops"
urlpatterns = baseline_urlpatterns + baseline_education_urlpatterns + [
path("", data.progress, {"style": "grid"}, name="index"),
path("about/", views.about, name="about"),
path("partners/", views.partners, name="partners"),
path("team/", views.team, name="team"),
path("projects/", views.projects, name="projects"),
path("contact/", core.article, { "id":56 }, name="contact"),
path("videos/", views.videos),
path("methods/", core.article, { "id":49331 }),
path("reports/", core.article, { "id":51220 }),
path("instructions/", RedirectView.as_view(url="/courses/", permanent=False)),
path("overview/", data.progress, { "style": "grid"}, name="overview"),
path("eurostat/", data.eurostat, name="eurostat"),
path("eurostat/grid/", views.eurostat_grid, name="eurostat_grid"),
path("circular-city/", views.circular_city, name="circular_city"),
path("indicators/", views.indicators, name="indicators"),
path("cities-indicators/", views.cities_sectors, name="cities_sectors"),
path("cities-indicators/<slug:sector>/", views.cities_indicators, name="cities_indicators"),
path("city/<slug:slug>/", views.city, name="city"),
path("city/<slug:slug>/mockup/", views.dashboard_mockup, name="dashboard_mockup"),
path("city/<slug:slug>/indicators/", views.city_sectors, name="city_sectors"),
path("city/<slug:slug>/indicators/<slug:sector>/", views.city_indicators, name="city_indicators"),
path("city/<slug:slug>/indicators/<slug:sector>/form/", views.city_indicators_form, name="city_indicators_form"),
path("city/<slug:slug>/indicators/<slug:sector>/<int:id>/", views.city_indicator, name="city_indicator"),
path("city/<slug:slug>/indicators/<slug:sector>/<int:id>/form/", views.city_indicator_form, name="city_indicator_form"),
path("city/<slug:slug>/<slug:sector>/sca-report/", views.sca_report, name="sca_report"),
path("city/<slug:slug>/<slug:sector>/sca-report/form/", views.sca_report_form, name="sca_report_form"),
# copies of staf urls since they use cityloops specific tags
path("dashboards/<slug:space>/maps/", views.space_maps, name="space_maps"),
path("dashboards/<slug:space>/maps/overview/", views.space_map, name="space_map"),
# temporary plotly sankey testing
path("sankey/", views.sankey, name="sankey"),
# these are loaded after the cityloops maps, so the first URL used is the cityloops one
] + baseline_staf_urlpatterns + [
path("<slug:slug>/", core.article, name="article"),
]
| 56.647059
| 124
| 0.710627
|
19e9565a318aa6f883e1be60008c4c40f2e132a5
| 634
|
py
|
Python
|
tutos/conversations/serializers.py
|
UVG-Teams/Tutos-System
|
230dd9434f745c2e6e69e10f9908e9818c559d03
|
[
"MIT"
] | null | null | null |
tutos/conversations/serializers.py
|
UVG-Teams/Tutos-System
|
230dd9434f745c2e6e69e10f9908e9818c559d03
|
[
"MIT"
] | null | null | null |
tutos/conversations/serializers.py
|
UVG-Teams/Tutos-System
|
230dd9434f745c2e6e69e10f9908e9818c559d03
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from conversations.models import Conversation, Message
from users.serializers import UserSerializer
class ConversationSerializer(serializers.ModelSerializer):
user1 = UserSerializer()
user2 = UserSerializer()
class Meta:
model = Conversation
fields = (
'id',
'user1',
'user2',
)
class MessageSerializer(serializers.ModelSerializer):
class Meta:
model = Message
fields = (
'id',
'transmitter',
'message',
'date',
'conversation',
)
| 21.862069
| 58
| 0.582019
|
506a99b1eb7b3baa41d7cb3c8e975207ef7a9e1f
| 22,564
|
py
|
Python
|
improver_tests/test_set_up_test_cubes.py
|
ddlddl58/improver
|
37b5b12491a77feccb03e33813efe8ffdebfa25d
|
[
"BSD-3-Clause"
] | 1
|
2021-05-01T22:59:15.000Z
|
2021-05-01T22:59:15.000Z
|
improver_tests/test_set_up_test_cubes.py
|
ddlddl58/improver
|
37b5b12491a77feccb03e33813efe8ffdebfa25d
|
[
"BSD-3-Clause"
] | null | null | null |
improver_tests/test_set_up_test_cubes.py
|
ddlddl58/improver
|
37b5b12491a77feccb03e33813efe8ffdebfa25d
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2019 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Unit tests for cube setup functions
"""
import unittest
from datetime import datetime
import iris
import numpy as np
from iris.tests import IrisTest
from improver.grids import GLOBAL_GRID_CCRS, STANDARD_GRID_CCRS
from improver.metadata.check_datatypes import check_mandatory_standards
from improver.metadata.constants.time_types import TIME_COORDS
from improver.metadata.probabilistic import find_threshold_coordinate
from improver.utilities.temporal import iris_time_to_datetime
from .set_up_test_cubes import (
add_coordinate,
construct_scalar_time_coords,
construct_xy_coords,
set_up_percentile_cube,
set_up_probability_cube,
set_up_variable_cube,
)
class test_construct_xy_coords(IrisTest):
"""Test the construct_xy_coords method"""
def test_lat_lon(self):
"""Test coordinates created for a lat-lon grid"""
y_coord, x_coord = construct_xy_coords(4, 3, "latlon")
self.assertEqual(y_coord.name(), "latitude")
self.assertEqual(x_coord.name(), "longitude")
for crd in [y_coord, x_coord]:
self.assertEqual(crd.units, "degrees")
self.assertEqual(crd.dtype, np.float32)
self.assertEqual(crd.coord_system, GLOBAL_GRID_CCRS)
self.assertEqual(len(y_coord.points), 4)
self.assertEqual(len(x_coord.points), 3)
def test_lat_lon_values(self):
"""Test latitude and longitude point values are as expected"""
y_coord, x_coord = construct_xy_coords(3, 3, "latlon")
self.assertArrayAlmostEqual(x_coord.points, [-20.0, 0.0, 20.0])
self.assertArrayAlmostEqual(y_coord.points, [40.0, 60.0, 80.0])
def test_proj_xy(self):
"""Test coordinates created for an equal area grid"""
y_coord, x_coord = construct_xy_coords(4, 3, "equalarea")
self.assertEqual(y_coord.name(), "projection_y_coordinate")
self.assertEqual(x_coord.name(), "projection_x_coordinate")
for crd in [y_coord, x_coord]:
self.assertEqual(crd.units, "metres")
self.assertEqual(crd.dtype, np.float32)
self.assertEqual(crd.coord_system, STANDARD_GRID_CCRS)
self.assertEqual(len(y_coord.points), 4)
self.assertEqual(len(x_coord.points), 3)
class test_construct_scalar_time_coords(IrisTest):
"""Test the construct_scalar_time_coords method"""
def test_basic(self):
"""Test times can be set"""
coord_dims = construct_scalar_time_coords(
datetime(2017, 12, 1, 14, 0), None, datetime(2017, 12, 1, 9, 0)
)
time_coords = [item[0] for item in coord_dims]
for crd in time_coords:
self.assertIsInstance(crd, iris.coords.DimCoord)
self.assertEqual(time_coords[0].name(), "time")
self.assertEqual(
iris_time_to_datetime(time_coords[0])[0], datetime(2017, 12, 1, 14, 0)
)
self.assertEqual(time_coords[1].name(), "forecast_reference_time")
self.assertEqual(
iris_time_to_datetime(time_coords[1])[0], datetime(2017, 12, 1, 9, 0)
)
self.assertEqual(time_coords[2].name(), "forecast_period")
self.assertEqual(time_coords[2].points[0], 3600 * 5)
for crd in time_coords[:2]:
self.assertEqual(crd.dtype, np.int64)
self.assertEqual(crd.units, "seconds since 1970-01-01 00:00:00")
self.assertEqual(time_coords[2].units, "seconds")
self.assertEqual(time_coords[2].dtype, np.int32)
def test_error_negative_fp(self):
"""Test an error is raised if the calculated forecast period is
negative"""
msg = "Cannot set up cube with negative forecast period"
with self.assertRaisesRegex(ValueError, msg):
_ = construct_scalar_time_coords(
datetime(2017, 12, 1, 14, 0), None, datetime(2017, 12, 1, 16, 0)
)
def test_time_bounds(self):
"""Test creation of time coordinate with bounds"""
coord_dims = construct_scalar_time_coords(
datetime(2017, 12, 1, 14, 0),
(datetime(2017, 12, 1, 13, 0), datetime(2017, 12, 1, 14, 0)),
datetime(2017, 12, 1, 9, 0),
)
time_coord = coord_dims[0][0]
self.assertEqual(
iris_time_to_datetime(time_coord)[0], datetime(2017, 12, 1, 14, 0)
)
self.assertEqual(time_coord.bounds[0][0], time_coord.points[0] - 3600)
self.assertEqual(time_coord.bounds[0][1], time_coord.points[0])
def test_time_bounds_wrong_order(self):
"""Test time bounds are correctly applied even if supplied in the wrong
order"""
coord_dims = construct_scalar_time_coords(
datetime(2017, 12, 1, 14, 0),
(datetime(2017, 12, 1, 14, 0), datetime(2017, 12, 1, 13, 0)),
datetime(2017, 12, 1, 9, 0),
)
time_coord = coord_dims[0][0]
self.assertEqual(
iris_time_to_datetime(time_coord)[0], datetime(2017, 12, 1, 14, 0)
)
self.assertEqual(time_coord.bounds[0][0], time_coord.points[0] - 3600)
self.assertEqual(time_coord.bounds[0][1], time_coord.points[0])
def test_error_invalid_time_bounds(self):
"""Test an error is raised if the time point is not between the
specified bounds"""
msg = "not within bounds"
with self.assertRaisesRegex(ValueError, msg):
_ = construct_scalar_time_coords(
datetime(2017, 11, 10, 4, 0),
(datetime(2017, 12, 1, 13, 0), datetime(2017, 12, 1, 14, 0)),
datetime(2017, 11, 10, 0, 0),
)
class test_set_up_variable_cube(IrisTest):
"""Test the set_up_variable_cube base function"""
def setUp(self):
"""Set up simple temperature data array"""
self.data = np.linspace(275.0, 284.0, 12).reshape(3, 4).astype(np.float32)
self.data_3d = np.array([self.data, self.data, self.data])
def test_defaults(self):
"""Test default arguments produce cube with expected dimensions
and metadata"""
result = set_up_variable_cube(self.data)
# check type, data and attributes
self.assertIsInstance(result, iris.cube.Cube)
self.assertEqual(result.standard_name, "air_temperature")
self.assertEqual(result.name(), "air_temperature")
self.assertEqual(result.units, "K")
self.assertArrayAlmostEqual(result.data, self.data)
self.assertEqual(result.attributes, {})
# check dimension coordinates
self.assertEqual(result.coord_dims("latitude"), (0,))
self.assertEqual(result.coord_dims("longitude"), (1,))
# check scalar time coordinates
for time_coord in ["time", "forecast_reference_time"]:
self.assertEqual(result.coord(time_coord).dtype, np.int64)
self.assertEqual(result.coord("forecast_period").dtype, np.int32)
expected_time = datetime(2017, 11, 10, 4, 0)
time_point = iris_time_to_datetime(result.coord("time"))[0]
self.assertEqual(time_point, expected_time)
expected_frt = datetime(2017, 11, 10, 0, 0)
frt_point = iris_time_to_datetime(result.coord("forecast_reference_time"))[0]
self.assertEqual(frt_point, expected_frt)
self.assertEqual(result.coord("forecast_period").units, "seconds")
self.assertEqual(result.coord("forecast_period").points[0], 14400)
check_mandatory_standards(result)
def test_non_standard_name(self):
"""Test non CF standard cube naming"""
result = set_up_variable_cube(self.data, name="temp_in_the_air")
self.assertEqual(result.name(), "temp_in_the_air")
def test_name_and_units(self):
"""Test ability to set data name and units"""
result = set_up_variable_cube(
self.data - 273.15, name="wet_bulb_temperature", units="degC"
)
self.assertArrayAlmostEqual(result.data, self.data - 273.15)
self.assertEqual(result.name(), "wet_bulb_temperature")
self.assertEqual(result.units, "degC")
def test_attributes(self):
"""Test ability to set attributes"""
attributes = {"source": "IMPROVER"}
result = set_up_variable_cube(self.data, attributes=attributes)
self.assertEqual(result.attributes, attributes)
def test_spatial_grid(self):
"""Test ability to set up non lat-lon grid"""
result = set_up_variable_cube(self.data, spatial_grid="equalarea")
self.assertEqual(result.coord_dims("projection_y_coordinate"), (0,))
self.assertEqual(result.coord_dims("projection_x_coordinate"), (1,))
def test_time_points(self):
"""Test ability to configure time and forecast reference time"""
expected_time = datetime(2018, 3, 1, 12, 0)
expected_frt = datetime(2018, 3, 1, 9, 0)
result = set_up_variable_cube(self.data, time=expected_time, frt=expected_frt)
time_point = iris_time_to_datetime(result.coord("time"))[0]
self.assertEqual(time_point, expected_time)
frt_point = iris_time_to_datetime(result.coord("forecast_reference_time"))[0]
self.assertEqual(frt_point, expected_frt)
self.assertEqual(result.coord("forecast_period").points[0], 10800)
self.assertFalse(result.coords("time", dim_coords=True))
def test_realizations_from_data(self):
"""Test realization coordinate is added for 3D data"""
result = set_up_variable_cube(self.data_3d)
self.assertArrayAlmostEqual(result.data, self.data_3d)
self.assertEqual(result.coord_dims("realization"), (0,))
self.assertArrayEqual(result.coord("realization").points, np.array([0, 1, 2]))
self.assertEqual(result.coord_dims("latitude"), (1,))
self.assertEqual(result.coord_dims("longitude"), (2,))
def test_realizations(self):
"""Test specific realization values"""
result = set_up_variable_cube(self.data_3d, realizations=np.array([0, 3, 4]))
self.assertArrayEqual(result.coord("realization").points, np.array([0, 3, 4]))
def test_error_unmatched_realizations(self):
"""Test error is raised if the realizations provided do not match the
data dimensions"""
msg = "Cannot generate 4 realizations"
with self.assertRaisesRegex(ValueError, msg):
_ = set_up_variable_cube(self.data_3d, realizations=np.arange(4))
def test_error_too_many_dimensions(self):
"""Test error is raised if input cube has more than 3 dimensions"""
data_4d = np.array([self.data_3d, self.data_3d])
msg = "Expected 2 or 3 dimensions on input data: got 4"
with self.assertRaisesRegex(ValueError, msg):
_ = set_up_variable_cube(data_4d)
def test_standard_grid_metadata_uk(self):
"""Test standard grid metadata is added if specified"""
result = set_up_variable_cube(self.data, standard_grid_metadata="uk_det")
self.assertEqual(result.attributes["mosg__grid_type"], "standard")
self.assertEqual(result.attributes["mosg__grid_version"], "1.3.0")
self.assertEqual(result.attributes["mosg__grid_domain"], "uk_extended")
self.assertEqual(result.attributes["mosg__model_configuration"], "uk_det")
def test_standard_grid_metadata_global(self):
"""Test standard grid metadata is added if specified"""
result = set_up_variable_cube(self.data_3d, standard_grid_metadata="gl_ens")
self.assertEqual(result.attributes["mosg__grid_type"], "standard")
self.assertEqual(result.attributes["mosg__grid_version"], "1.3.0")
self.assertEqual(result.attributes["mosg__grid_domain"], "global")
self.assertEqual(result.attributes["mosg__model_configuration"], "gl_ens")
class test_set_up_percentile_cube(IrisTest):
"""Test the set_up_percentile_cube function"""
def setUp(self):
"""Set up simple array of percentile-type data"""
self.data = np.array(
[
[[273.5, 275.1, 274.9], [274.2, 274.8, 274.1]],
[[274.2, 276.4, 275.5], [275.1, 276.8, 274.6]],
[[275.6, 278.1, 277.2], [276.4, 277.5, 275.3]],
],
dtype=np.float32,
)
self.percentiles = np.array([20, 50, 80])
def test_defaults(self):
"""Test default arguments produce cube with expected dimensions
and metadata"""
result = set_up_percentile_cube(self.data, self.percentiles)
perc_coord = result.coord("percentile")
self.assertArrayEqual(perc_coord.points, self.percentiles)
self.assertEqual(perc_coord.units, "%")
check_mandatory_standards(result)
def test_standard_grid_metadata(self):
"""Test standard grid metadata"""
result = set_up_percentile_cube(
self.data, self.percentiles, standard_grid_metadata="uk_ens"
)
self.assertEqual(result.attributes["mosg__grid_type"], "standard")
self.assertEqual(result.attributes["mosg__grid_version"], "1.3.0")
self.assertEqual(result.attributes["mosg__grid_domain"], "uk_extended")
self.assertEqual(result.attributes["mosg__model_configuration"], "uk_ens")
class test_set_up_probability_cube(IrisTest):
"""Test the set_up_probability_cube function"""
def setUp(self):
"""Set up array of exceedance probabilities"""
self.data = np.array(
[
[[1.0, 1.0, 0.9], [0.9, 0.9, 0.8]],
[[0.8, 0.8, 0.7], [0.7, 0.6, 0.4]],
[[0.6, 0.4, 0.3], [0.3, 0.2, 0.1]],
[[0.2, 0.1, 0.0], [0.1, 0.0, 0.0]],
],
dtype=np.float32,
)
self.thresholds = np.array([275.0, 275.5, 276.0, 276.5], dtype=np.float32)
def test_defaults(self):
"""Test default arguments produce cube with expected dimensions
and metadata"""
result = set_up_probability_cube(self.data, self.thresholds)
thresh_coord = find_threshold_coordinate(result)
self.assertEqual(
result.name(), "probability_of_air_temperature_above_threshold"
)
self.assertEqual(result.units, "1")
self.assertArrayEqual(thresh_coord.points, self.thresholds)
self.assertEqual(thresh_coord.name(), "air_temperature")
self.assertEqual(thresh_coord.var_name, "threshold")
self.assertEqual(thresh_coord.units, "K")
self.assertEqual(len(thresh_coord.attributes), 1)
self.assertEqual(thresh_coord.attributes["spp__relative_to_threshold"], "above")
check_mandatory_standards(result)
def test_relative_to_threshold(self):
"""Test ability to reset the "spp__relative_to_threshold" attribute"""
data = np.flipud(self.data)
result = set_up_probability_cube(
data, self.thresholds, spp__relative_to_threshold="below"
)
self.assertEqual(len(result.coord(var_name="threshold").attributes), 1)
self.assertEqual(
result.coord(var_name="threshold").attributes["spp__relative_to_threshold"],
"below",
)
def test_relative_to_threshold_set(self):
"""Test that an error is raised if the "spp__relative_to_threshold"
attribute has not been set when setting up a probability cube"""
msg = "The spp__relative_to_threshold attribute MUST be set"
with self.assertRaisesRegex(ValueError, msg):
set_up_probability_cube(
self.data, self.thresholds, spp__relative_to_threshold=None
)
def test_standard_grid_metadata(self):
"""Test standard grid metadata"""
result = set_up_probability_cube(
self.data, self.thresholds, standard_grid_metadata="uk_ens"
)
self.assertEqual(result.attributes["mosg__grid_type"], "standard")
self.assertEqual(result.attributes["mosg__grid_version"], "1.3.0")
self.assertEqual(result.attributes["mosg__grid_domain"], "uk_extended")
self.assertEqual(result.attributes["mosg__model_configuration"], "uk_ens")
class test_add_coordinate(IrisTest):
"""Test the add_coordinate utility"""
def setUp(self):
"""Set up new coordinate descriptors"""
self.height_points = np.arange(100.0, 1001.0, 100.0)
self.height_unit = "metres"
self.input_cube = set_up_variable_cube(
np.ones((3, 4), dtype=np.float32),
time=datetime(2017, 10, 10, 1, 0),
frt=datetime(2017, 10, 9, 21, 0),
)
def test_basic(self):
"""Test addition of a leading height coordinate"""
result = add_coordinate(
self.input_cube, self.height_points, "height", coord_units=self.height_unit
)
self.assertIsInstance(result, iris.cube.Cube)
self.assertSequenceEqual(result.shape, (10, 3, 4))
self.assertEqual(result.coord_dims("height"), (0,))
self.assertArrayAlmostEqual(result.coord("height").points, self.height_points)
self.assertEqual(result.coord("height").dtype, np.float32)
self.assertEqual(result.coord("height").units, self.height_unit)
check_mandatory_standards(result)
def test_adding_coordinate_with_attribute(self):
"""Test addition of a leading height coordinate with an appropriate
attribute."""
height_attribute = {"positive": "up"}
result = add_coordinate(
self.input_cube,
self.height_points,
"height",
coord_units=self.height_unit,
attributes=height_attribute,
)
self.assertIsInstance(result, iris.cube.Cube)
self.assertEqual(result.coord_dims("height"), (0,))
self.assertEqual(result.coord("height").attributes, height_attribute)
def test_reorder(self):
"""Test new coordinate can be placed in different positions"""
input_cube = set_up_variable_cube(np.ones((4, 3, 4), dtype=np.float32))
result = add_coordinate(
input_cube,
self.height_points,
"height",
coord_units=self.height_unit,
order=[1, 0, 2, 3],
)
self.assertSequenceEqual(result.shape, (4, 10, 3, 4))
self.assertEqual(result.coord_dims("height"), (1,))
def test_datatype(self):
"""Test coordinate datatype"""
result = add_coordinate(
self.input_cube,
self.height_points,
"height",
coord_units=self.height_unit,
dtype=np.int32,
)
self.assertEqual(result.coord("height").dtype, np.int32)
def test_datetime(self):
"""Test a leading time coordinate can be added successfully"""
datetime_points = [datetime(2017, 10, 10, 3, 0), datetime(2017, 10, 10, 4, 0)]
result = add_coordinate(
self.input_cube, datetime_points, "time", is_datetime=True
)
# check time is now the leading dimension
self.assertEqual(result.coord_dims("time"), (0,))
self.assertEqual(len(result.coord("time").points), 2)
# check forecast period has been updated
expected_fp_points = 3600 * np.array([6, 7], dtype=np.int64)
self.assertArrayAlmostEqual(
result.coord("forecast_period").points, expected_fp_points
)
def test_datetime_no_fp(self):
"""Test a leading time coordinate can be added successfully when there
is no forecast period on the input cube"""
self.input_cube.remove_coord("forecast_period")
datetime_points = [datetime(2017, 10, 10, 3, 0), datetime(2017, 10, 10, 4, 0)]
result = add_coordinate(
self.input_cube, datetime_points, "time", is_datetime=True
)
# check a forecast period coordinate has been added
expected_fp_points = 3600 * np.array([6, 7], dtype=np.int64)
self.assertArrayAlmostEqual(
result.coord("forecast_period").points, expected_fp_points
)
def test_time_points(self):
"""Test a time coordinate can be added using integer points rather
than datetimes, and that forecast period is correctly re-calculated"""
time_val = self.input_cube.coord("time").points[0]
time_points = np.array([time_val + 3600, time_val + 7200])
fp_val = self.input_cube.coord("forecast_period").points[0]
expected_fp_points = np.array([fp_val + 3600, fp_val + 7200])
result = add_coordinate(
self.input_cube,
time_points,
"time",
coord_units=TIME_COORDS["time"].units,
dtype=TIME_COORDS["time"].dtype,
)
self.assertArrayEqual(result.coord("time").points, time_points)
self.assertArrayEqual(
result.coord("forecast_period").points, expected_fp_points
)
if __name__ == "__main__":
unittest.main()
| 43.898833
| 88
| 0.657197
|
70736b4ee8f58587887f2a3e35437dfd52859c6a
| 1,265
|
py
|
Python
|
app/utils.py
|
rdp-jr/flask-mvc-starter-kit
|
187b874017a8f5bb0af65db3af387a6950c607e0
|
[
"MIT"
] | 1
|
2021-11-09T07:40:36.000Z
|
2021-11-09T07:40:36.000Z
|
app/utils.py
|
rdp-jr/flask-mvc-starter-kit
|
187b874017a8f5bb0af65db3af387a6950c607e0
|
[
"MIT"
] | null | null | null |
app/utils.py
|
rdp-jr/flask-mvc-starter-kit
|
187b874017a8f5bb0af65db3af387a6950c607e0
|
[
"MIT"
] | null | null | null |
from flask import render_template
import os
def register_error_handlers(app):
# 400 - Bad Request
@app.errorhandler(400)
def bad_request(e):
return render_template('errors/400.html'), 400
# 403 - Forbidden
@app.errorhandler(403)
def forbidden(e):
return render_template('errors/403.html'), 403
# 404 - Page Not Found
@app.errorhandler(404)
def page_not_found(e):
return render_template('/errors/404.html'), 404
# return "404"
# 405 - Method Not Allowed
@app.errorhandler(405)
def method_not_allowed(e):
return render_template('errors/405.html'), 405
# 500 - Internal Server Error
@app.errorhandler(500)
def server_error(e):
return render_template('errors/500.html'), 500
def register_blueprints(app):
"""
Automatically register routes (blueprints) using the route variable in each route file
"""
import pkgutil
import app.routes as routes
import importlib
pkgpath = os.path.dirname(routes.__file__)
blueprints = [name for _, name, _ in pkgutil.iter_modules([pkgpath])]
for blueprint in blueprints:
bp = importlib.import_module('app.routes.' + blueprint)
app.register_blueprint(bp.route)
| 27.5
| 90
| 0.667194
|
bd64ddc253345e1faa3f00dbcf8b6cc3b6b3812f
| 8,353
|
py
|
Python
|
sam/SAM.py
|
stereoboy/external_memory
|
f9572d5720216165af1ab46281f5c08357e151cd
|
[
"MIT"
] | null | null | null |
sam/SAM.py
|
stereoboy/external_memory
|
f9572d5720216165af1ab46281f5c08357e151cd
|
[
"MIT"
] | null | null | null |
sam/SAM.py
|
stereoboy/external_memory
|
f9572d5720216165af1ab46281f5c08357e151cd
|
[
"MIT"
] | null | null | null |
import common.utils as utils
import tensorflow as tf
#import tensorflow.contrib.rnn.python.ops.core_rnn_cell as rnn_cell
from tensorflow.contrib.rnn.python.ops import core_rnn_cell_impl as rnn_cell
#from tensorflow.python.ops.rnn_cell import LSTMStateTuple
from tensorflow.python.ops.rnn_cell_impl import _RNNCell as RNNCell
import collections
from tensorflow.python.ops.math_ops import sigmoid
from tensorflow.python.ops.math_ops import tanh
from tensorflow.python.ops import array_ops
_StateTuple = collections.namedtuple("StateTuple", ("c", "h"))
class Controller(RNNCell):
def __init__(self, x_size, h_size, w_mem, num_layers ):
"""Initialize the basic Controller RNN.
Args:
x_size: The dimension of input.
w_mem: memory word size
R: number of read heads
L: number of layers
"""
self.x_size = x_size
self.h_size = h_size
self.w_mem = w_mem
self.num_layers = num_layers
def __call__(self, x, reads, state, scope=None):
dtype = x.dtype
batch_size = x.get_shape()[0]
new_states = []
new_outs = []
h = tf.concat(values=[x] + reads, axis=1)
with tf.variable_scope("Controller"):
for l in range(self.num_layers):
if self.num_layers > 1:
s_prev, h_prev = state[l]
else:
print state
s_prev, h_prev = state
h = tf.concat(values=[x] + reads, axis=1)
with tf.variable_scope("cell_%d" % l):
ret = rnn_cell._linear([h_prev, h], 4*self.h_size, bias=True)
_i, _s_new, _f, _o = array_ops.split(value=ret, num_or_size_splits=4, axis=1)
i = sigmoid(_i)
f = sigmoid(_f)
s_new = f*s_prev + i*tanh(_s_new)
o = sigmoid(_o)
h_new = o*tanh(s_new)
if self.num_layers > 1:
new_outs.append(h_new)
new_states.append(_StateTuple(s_new, h_new))
else:
return (h_new, _StateTuple(s_new, h_new))
h = h_new
return (new_outs, tuple(new_states))
@property
def output_size(self):
return self.h_size
@property
def state_size(self):
if self.num_layers > 1:
return tuple( (self.h_size, self.h_size) for _ in xrange(self.num_layers))
else:
return (self.h_size, self.h_size)
def circular_convolution(w, s):
# build NxN matrix
def shift(s, i):
n = s.get_shape()[1]
#print s, n, i
#print [i, int(n-i)]
if i > 0 and i < n - 1:
left, right = array_ops.split(value=s, num_or_size_splits=[i, int(n-i)], axis=1)
s_ = tf.concat([right, left], axis=1)
return s_
else:
return s
if w.get_shape() != s.get_shape():
raise ValueError("w == s")
_S = []
n = w.get_shape()[1]
for i in xrange(n):
s_ = shift(s, i)
_S.append(s_)
S = tf.stack(_S, axis=1)
w_ = tf.expand_dims(w, axis=2)
w_ = tf.matmul(S, w_)
w = tf.squeeze(w_, axis=2)
return w
def focus_by_context(beta, k, memory):
def similarity(u, v):
#norm = tf.norm(u, axis=2, keep_dims=True)*tf.norm(v, axis=2, keep_dims=True)
u = u/tf.norm(u, axis=2, keep_dims=True)
v = v/tf.norm(v, axis=2, keep_dims=True)
#u = utils.print_debug(u, message="u:")
#v = utils.print_debug(v, message="v:")
#norm = utils.print_debug(norm, message="norm:")
ret = tf.matmul(u, v, transpose_b=True)
return ret
k_ = tf.expand_dims(k, axis=1) # expand dim batch x 1 x w_mem
#k_ = utils.print_debug(k_, message="k_:")
_w = similarity(memory, k_) # batch x n_mem x 1
#_w = utils.print_debug(_w, message="_w:")
_w = tf.squeeze(_w, axis=[2]) # batch x n_mem
_w = beta*_w
w = tf.nn.softmax(_w)
return w
def focus_by_location(w, w_prev, s, g, gamma):
w_ = g*w + (1-g)*w_prev
w_ = circular_convolution(w_, s)
w_ = tf.pow(w_, gamma)
w_ = w_/tf.reduce_sum(w_, axis=1, keep_dims=True)
return w_
def addressing(heads, memory, ws_prev):
ws = []
for w_prev, head in zip(ws_prev, heads):
k = head['k']
beta = head['beta']
g = head['g']
s = head['s']
gamma = head['gamma']
#gamma = utils.print_debug(gamma, message="gamma:")
w = focus_by_context(beta, k, memory)
w = focus_by_location(w, w_prev, s, g, gamma)
ws.append(w)
return ws
def process_read(memory, ws):
reads = []
for w in ws:
w = tf.expand_dims(w, axis=1)
_read = tf.matmul(w, memory)
read = tf.squeeze(_read, axis=[1])
reads.append(read)
return reads
def update_memory(memory, add_vec, erase_vec, write_ws):
for w in write_ws:
w_ = tf.expand_dims(w, axis=2) # batch x n_mem x 1
a = tf.expand_dims(add_vec, axis=1) # batch x 1 x w_mem
e = tf.expand_dims(erase_vec, axis=1) # batch x 1 x w_mem
A = tf.matmul(w_, a) # batch x n_mem x w_mem
E = tf.matmul(w_, e) # batch x n_mem x w_mem
memory = memory* (1 - E) + A
return memory
def init_weight(batch_size, n, dtype):
_ret = tf.constant(1.0, dtype=dtype, shape=[batch_size, n])
ret = _ret/tf.reduce_sum(_ret, axis=1, keep_dims=True)
return ret
def init_vector(batch_size, dim, dtype):
ret = tf.constant(0.0, dtype=dtype, shape=[batch_size, dim])
return ret
def debug_scope():
var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="/*")
print "============================"
for item in var_list:
print item.name
class SAMCell(object):
def __init__(self, y_size, batch_size, Controller, R, W, n_mem, dtype):
self.Controller = Controller
self.y_size = y_size
self.w_mem = self.Controller.w_mem
self.R = R
self.W = W
self.batch_size = batch_size
self.n_mem = n_mem
self.head_split_list = [self.w_mem, 1, 1, self.n_mem, 1] # for k, beta, g, s, gamma
head_size = sum(self.head_split_list)
self.xi_split_list = [head_size]*(R + W) + [self.w_mem] + [self.w_mem]
def __call__(self, x):
dtype = x.dtype
batch_size = x.get_shape()[0]
num_step = x.get_shape()[1]
xi_size = sum(self.xi_split_list)
# unroll LSTM RNN
outputs = []
with tf.variable_scope("NTM"):
self.memory = tf.constant(0.001, dtype=dtype, shape=[batch_size, self.n_mem, self.w_mem])
state = self.Controller.zero_state(batch_size, dtype=dtype)
reads = [init_vector(batch_size, self.w_mem, dtype) for _ in xrange(self.R)]
read_ws = [init_weight(batch_size, self.n_mem, dtype) for _ in xrange(self.R)]
write_ws = [init_weight(batch_size, self.n_mem, dtype) for _ in xrange(self.W)]
for t in xrange(num_step):
print "step:", t
if t > 0:
tf.get_variable_scope().reuse_variables()
cell_out, state = self.Controller(x[:, t, :], reads, state)
with tf.variable_scope("Nu"):
nu = rnn_cell._linear(cell_out, self.y_size, bias=False)
with tf.variable_scope("Xi"):
xi = rnn_cell._linear(cell_out, xi_size, bias=False)
_head_params = tf.split(value=xi, num_or_size_splits=self.xi_split_list, axis=1)
# extract add_vec, erase_vec
_add_vec, _erase_vec = _head_params[-2:]
add_vec = tf.sigmoid(_add_vec)
erase_vec = tf.sigmoid(_erase_vec)
# extract head parameters from controller outputs
read_heads = []
write_heads = []
for i, params in enumerate(_head_params[:-2]):
head = {}
_k, _beta, _g, _s, _gamma = tf.split(value=params, num_or_size_splits=self.head_split_list, axis=1)
head['k'] = _k
head['beta'] = utils.oneplus(_beta)
head['g'] = tf.sigmoid(_g)
head['s'] = tf.nn.softmax(_s)
head['gamma'] = utils.oneplus(_gamma)
if i < self.R:
read_heads.append(head)
else:
write_heads.append(head)
read_ws = addressing(read_heads, self.memory, read_ws)
write_ws = addressing(write_heads, self.memory, write_ws)
reads = process_read(self.memory, read_ws)
self.memory = update_memory(self.memory, add_vec, erase_vec, write_ws)
with tf.variable_scope("Out"):
y = nu + rnn_cell._linear(reads, self.y_size, bias=False)
outputs.append(y)
if t==0:
debug_out = y
debug_scope()
#debug_out = xi
#output = tf.reshape(tf.concat(values=outputs, axis=1), [-1, self.y_size])
output = tf.stack(values=outputs, axis=1)
return output, debug_out
| 30.155235
| 109
| 0.626122
|
158d978f8f370db30b77519a1de0ecee499f9bdb
| 11,081
|
py
|
Python
|
mmdet/models/roi_heads/standard_roi_head_with_text.py
|
asenina/mmdetection
|
951b23a7ecee7fa79caf7f80d71491b7f555a261
|
[
"Apache-2.0"
] | 4
|
2020-01-19T08:00:31.000Z
|
2020-02-14T03:25:45.000Z
|
mmdet/models/roi_heads/standard_roi_head_with_text.py
|
asenina/mmdetection
|
951b23a7ecee7fa79caf7f80d71491b7f555a261
|
[
"Apache-2.0"
] | 3
|
2021-03-12T12:06:37.000Z
|
2021-07-28T11:21:33.000Z
|
mmdet/models/roi_heads/standard_roi_head_with_text.py
|
asenina/mmdetection
|
951b23a7ecee7fa79caf7f80d71491b7f555a261
|
[
"Apache-2.0"
] | 1
|
2020-04-21T01:44:04.000Z
|
2020-04-21T01:44:04.000Z
|
import string
import numpy as np
import torch
from mmdet.core import bbox2roi, build_assigner, build_sampler
from mmdet.integration.nncf.utils import is_in_nncf_tracing
from ..builder import HEADS, build_head, build_roi_extractor
from .standard_roi_head import StandardRoIHead
from .test_mixins import dummy_pad
@HEADS.register_module()
class StandardRoIHeadWithText(StandardRoIHead):
"""Simplest base roi head including one bbox head, one mask head and one text head.
"""
def __init__(self, text_roi_extractor, text_head, text_thr,
alphabet=' ' + string.ascii_lowercase + string.digits,
mask_text_features=False,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.with_text = True
self.init_text_head(text_roi_extractor, text_head)
self.alphabet = alphabet
self.text_thr = text_thr
self.mask_text_features = mask_text_features
if self.train_cfg:
self.text_bbox_assigner = build_assigner(self.train_cfg.text_assigner)
self.text_bbox_sampler = build_sampler(self.train_cfg.text_sampler)
self.area_per_symbol_thr = self.train_cfg.get('area_per_symbol_thr', 0)
def init_text_head(self, text_roi_extractor, text_head):
self.text_roi_extractor = build_roi_extractor(text_roi_extractor)
self.text_head = build_head(text_head)
def forward_train(self,
x,
img_metas,
proposal_list,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
gt_texts=None):
"""
Args:
x (list[Tensor]): list of multi-level img features.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
proposals (list[Tensors]): list of region proposals.
gt_bboxes (list[Tensor]): each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
gt_texts (None | list[numpy.ndarray]) : true encoded texts for each box
used if the architecture supports a text task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
assert len(gt_texts) == len(gt_bboxes), f'{gt_texts} {gt_bboxes}'
losses = super().forward_train(
x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore, gt_masks)
if self.with_text:
num_imgs = len(img_metas)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
text_sampling_results = []
for i in range(num_imgs):
assign_result = self.text_bbox_assigner.assign(
proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
gt_labels[i])
sampling_result = self.text_bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes[i],
gt_labels=gt_labels[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
text_sampling_results.append(sampling_result)
text_results = self._text_forward_train(x, text_sampling_results, gt_texts, gt_masks)
if text_results['loss_text'] is not None:
losses.update(text_results)
return losses
def _text_forward(self, x, rois=None, pos_inds=None, bbox_feats=None, matched_gt_texts=None, det_masks=None):
assert ((rois is not None) ^
(pos_inds is not None and bbox_feats is not None))
if rois is not None:
text_feats = self.text_roi_extractor(
x[:self.text_roi_extractor.num_inputs], rois)
if self.with_shared_head:
text_feats = self.shared_head(text_feats)
else:
assert bbox_feats is not None
text_feats = bbox_feats[pos_inds]
if self.mask_text_features and det_masks:
hard_masks = det_masks > 0.5
hard_masks = torch.unsqueeze(hard_masks, 1)
hard_masks = hard_masks.repeat(1, text_feats.shape[1], 1, 1)
text_feats = text_feats * hard_masks
text_results = self.text_head.forward(text_feats, matched_gt_texts)
if self.training:
return dict(loss_text=text_results)
else:
return dict(text_results=text_results)
def _text_forward_train(self, x, sampling_results, gt_texts, gt_masks):
if not self.share_roi_extractor:
pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
with torch.no_grad():
matched_gt_texts = []
for text, res in zip(gt_texts, sampling_results):
assigned_gt_indices = res.pos_assigned_gt_inds.cpu().numpy()
matched_texts = text[assigned_gt_indices]
assert len(matched_texts) == len(assigned_gt_indices)
matched_gt_texts.extend(matched_texts)
if pos_rois.shape[0] == 0:
return dict(loss_mask=None)
areas = (pos_rois[:, 3] - pos_rois[:, 1]) * (pos_rois[:, 4] - pos_rois[:, 2])
areas = areas.detach().cpu().numpy().reshape(-1)
# since EOS symbol added to text, subtract it
text_lengths = np.array([max(len(text) - 1, 1) for text in matched_gt_texts])
area_per_symbol = areas / text_lengths
matched_gt_texts = [text if aps >= self.area_per_symbol_thr else
[] for text, aps in zip(matched_gt_texts, area_per_symbol)]
mask_targets = self.mask_head.get_targets(sampling_results, gt_masks, self.train_cfg)
text_results = self._text_forward(x, pos_rois, matched_gt_texts=matched_gt_texts, det_masks=mask_targets)
else:
raise NotImplementedError()
return text_results
def simple_test_text(self,
x,
img_metas,
det_bboxes,
det_masks,
rescale=False):
# image shape of the first image in the batch (only one)
ori_shape = img_metas['ori_shape']
scale_factor = img_metas['scale_factor']
if (torch.onnx.is_in_onnx_export() or is_in_nncf_tracing()) and det_bboxes.shape[0] == 0:
# If there are no detection there is nothing to do for a mask head.
# But during ONNX export we should run mask head
# for it to appear in the graph.
# So add one zero / dummy ROI that will be mapped
# to an Identity op in the graph.
det_bboxes = dummy_pad(det_bboxes, (0, 0, 0, 1))
if det_bboxes.shape[0] == 0:
decoded_texts = torch.empty([0, 0, 0],
dtype=det_bboxes.dtype,
device=det_bboxes.device)
confidences = torch.empty([0, 0, 0],
dtype=det_bboxes.dtype,
device=det_bboxes.device)
distributions = []
else:
# if det_bboxes is rescaled to the original image size, we need to
# rescale it back to the testing scale to obtain RoIs.
if rescale and not isinstance(scale_factor, float):
scale_factor = torch.from_numpy(scale_factor).to(
det_bboxes.device)
_bboxes = (
det_bboxes[:, :4] * scale_factor if rescale else det_bboxes)
text_rois = bbox2roi([_bboxes])
text_results = self._text_forward(x, text_rois, det_masks=det_masks)
if torch.onnx.is_in_onnx_export() or is_in_nncf_tracing():
return text_results
text_results = text_results['text_results'].permute(1, 0, 2)
text_results = torch.nn.functional.softmax(text_results, dim=-1)
confidences = []
decoded_texts = []
distributions = []
for text in text_results:
predicted_confidences, encoded = text.topk(1)
predicted_confidences = predicted_confidences.cpu().numpy()
encoded = encoded.cpu().numpy().reshape(-1)
decoded = ''
confidence = 1
for l, c in zip(encoded, predicted_confidences):
confidence *= c
if l == 1:
break
decoded += self.alphabet[l]
confidences.append(confidence)
assert self.alphabet[0] == self.alphabet[1] == ' '
distribution = np.transpose(text.cpu().numpy())[2:, :len(decoded) + 1]
distributions.append(distribution)
decoded_texts.append(decoded if confidence >= self.text_thr else '')
return decoded_texts, confidences, distributions
def simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False,
postprocess=True):
"""Test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
det_bboxes, det_labels = self.simple_test_bboxes(
x, img_metas, proposal_list, self.test_cfg, rescale=False)
det_masks = [None for _ in det_bboxes]
if self.with_mask:
det_masks = self.simple_test_mask(
x, img_metas, det_bboxes, det_labels, rescale=False)
det_texts = [self.simple_test_text(x, img_metas[0], det_bboxes[0], det_masks[0])]
if postprocess:
results = []
for i in range(len(det_bboxes)):
bbox_results, segm_results = self.postprocess(det_bboxes[i], det_labels[i], det_masks[i], img_metas[i], rescale=rescale)
results.append((bbox_results, segm_results, det_texts[i]))
return results
else:
if det_masks is None or None in det_masks:
return det_bboxes, det_labels
else:
return det_bboxes, det_labels, det_masks, det_texts
| 43.798419
| 136
| 0.582077
|
74dcab5458d42db5fb0c81c64fff231bc75b20cc
| 889
|
py
|
Python
|
Course3/Lab4/validations.py
|
juanmabass/it-cert-automation-practice
|
afaddd78b92ccb393143d4d15d1a633e5cfe4250
|
[
"Apache-2.0"
] | null | null | null |
Course3/Lab4/validations.py
|
juanmabass/it-cert-automation-practice
|
afaddd78b92ccb393143d4d15d1a633e5cfe4250
|
[
"Apache-2.0"
] | null | null | null |
Course3/Lab4/validations.py
|
juanmabass/it-cert-automation-practice
|
afaddd78b92ccb393143d4d15d1a633e5cfe4250
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import re
def validate_user(username, minlen):
"""Checks if the received username matches the required conditions."""
if type(username) != str:
raise TypeError("username must be a string")
if minlen < 1:
raise ValueError("minlen must be at least 1")
# Usernames can't be shorter than minlen
if len(username) < minlen:
return False
# Usernames can only use letters, numbers, dots and underscores
if not re.match('^[a-z0-9._]*$', username):
return False
# Usernames can't begin with a number
if username[0].isalpha():
return False
return True
print(validate_user("blue.kale", 3)) # True
print(validate_user(".blue.kale", 3)) # Currently True, should be False
print(validate_user("red_quinoa", 4)) # True
print(validate_user("_red_quinoa", 4)) # Currently True, should be False
| 29.633333
| 74
| 0.669291
|
45e21f210ef7dfdbf0cc0196e4cb334ccaa1c3e9
| 100,905
|
py
|
Python
|
Patches.py
|
lallaria/OoT-Randomizer
|
443ade6d1198af1c78b52c3544fb9f0bcfbdf63d
|
[
"MIT"
] | null | null | null |
Patches.py
|
lallaria/OoT-Randomizer
|
443ade6d1198af1c78b52c3544fb9f0bcfbdf63d
|
[
"MIT"
] | null | null | null |
Patches.py
|
lallaria/OoT-Randomizer
|
443ade6d1198af1c78b52c3544fb9f0bcfbdf63d
|
[
"MIT"
] | null | null | null |
import random
import struct
import itertools
import re
import zlib
from World import World
from Rom import Rom
from Spoiler import Spoiler
from LocationList import business_scrubs
from Hints import writeGossipStoneHints, buildBossRewardHints, \
buildGanonText, getSimpleHintNoPrefix
from Utils import data_path
from Messages import read_messages, update_message_by_id, read_shop_items, \
write_shop_items, remove_unused_messages, make_player_message, \
add_item_messages, repack_messages, shuffle_messages, \
get_message_by_id
from OcarinaSongs import replace_songs
from MQ import patch_files, File, update_dmadata, insert_space, add_relocations
from SaveContext import SaveContext
def patch_rom(spoiler:Spoiler, world:World, rom:Rom):
with open(data_path('generated/rom_patch.txt'), 'r') as stream:
for line in stream:
address, value = [int(x, 16) for x in line.split(',')]
rom.write_int32(address, value)
rom.scan_dmadata_update()
# Write Randomizer title screen logo
with open(data_path('title.bin'), 'rb') as stream:
writeAddress = 0x01795300
titleBytesComp = stream.read()
titleBytesDiff = zlib.decompress(titleBytesComp)
originalBytes = rom.original.buffer[writeAddress: writeAddress+ len(titleBytesDiff)]
titleBytes = bytearray([a ^ b for a, b in zip(titleBytesDiff, originalBytes)])
rom.write_bytes(writeAddress, titleBytes)
# Fixes the typo of keatan mask in the item select screen
with open(data_path('keaton.bin'), 'rb') as stream:
writeAddress = 0x8A7C00
keatonBytesComp = stream.read()
keatonBytesDiff = zlib.decompress(keatonBytesComp)
originalBytes = rom.original.buffer[writeAddress: writeAddress+ len(keatonBytesDiff)]
keatonBytes = bytearray([a ^ b for a, b in zip(keatonBytesDiff, originalBytes)])
rom.write_bytes(writeAddress, keatonBytes)
# Load Triforce model into a file
triforce_obj_file = File({ 'Name': 'object_gi_triforce' })
triforce_obj_file.copy(rom)
with open(data_path('triforce.bin'), 'rb') as stream:
obj_data = stream.read()
rom.write_bytes(triforce_obj_file.start, obj_data)
triforce_obj_file.end = triforce_obj_file.start + len(obj_data)
update_dmadata(rom, triforce_obj_file)
# Add it to the extended object table
add_to_extended_object_table(rom, 0x193, triforce_obj_file)
# Build a Double Defense model from the Heart Container model
dd_obj_file = File({
'Name': 'object_gi_hearts',
'Start': '014D9000',
'End': '014DA590',
})
dd_obj_file.copy(rom)
# Update colors for the Double Defense variant
rom.write_bytes(dd_obj_file.start + 0x1294, [0xFF, 0xCF, 0x0F]) # Exterior Primary Color
rom.write_bytes(dd_obj_file.start + 0x12B4, [0xFF, 0x46, 0x32]) # Exterior Env Color
rom.write_bytes(dd_obj_file.start + 0x1474, [0xFF, 0xFF, 0xFF]) # Interior Primary Color
rom.write_bytes(dd_obj_file.start + 0x1494, [0xFF, 0xFF, 0xFF]) # Interior Env Color
update_dmadata(rom, dd_obj_file)
# Add it to the extended object table
add_to_extended_object_table(rom, 0x194, dd_obj_file)
# Force language to be English in the event a Japanese rom was submitted
rom.write_byte(0x3E, 0x45)
rom.force_patch.append(0x3E)
# Increase the instance size of Bombchus prevent the heap from becoming corrupt when
# a Dodongo eats a Bombchu. Does not fix stale pointer issues with the animation
rom.write_int32(0xD6002C, 0x1F0)
# Can always return to youth
rom.write_byte(0xCB6844, 0x35)
rom.write_byte(0x253C0E2, 0x03) # Moves sheik from pedestal
# Fix Ice Cavern Alcove Camera
if not world.dungeon_mq['Ice Cavern']:
rom.write_byte(0x2BECA25,0x01);
rom.write_byte(0x2BECA2D,0x01);
# Fix GS rewards to be static
rom.write_int32(0xEA3934, 0)
rom.write_bytes(0xEA3940, [0x10, 0x00])
# Fix horseback archery rewards to be static
rom.write_byte(0xE12BA5, 0x00)
rom.write_byte(0xE12ADD, 0x00)
# Fix deku theater rewards to be static
rom.write_bytes(0xEC9A7C, [0x00, 0x00, 0x00, 0x00]) #Sticks
rom.write_byte(0xEC9CD5, 0x00) #Nuts
# Fix deku scrub who sells stick upgrade
rom.write_bytes(0xDF8060, [0x00, 0x00, 0x00, 0x00])
# Fix deku scrub who sells nut upgrade
rom.write_bytes(0xDF80D4, [0x00, 0x00, 0x00, 0x00])
# Fix rolling goron as child reward to be static
rom.write_bytes(0xED2960, [0x00, 0x00, 0x00, 0x00])
# Fix proximity text boxes (Navi) (Part 1)
rom.write_bytes(0xDF8B84, [0x00, 0x00, 0x00, 0x00])
# Fix final magic bean to cost 99
rom.write_byte(0xE20A0F, 0x63)
rom.write_bytes(0x94FCDD, [0x08, 0x39, 0x39])
# Remove locked door to Boss Key Chest in Fire Temple
if not world.keysanity and not world.dungeon_mq['Fire Temple']:
rom.write_byte(0x22D82B7, 0x3F)
# Remove the unused locked door in water temple
if not world.dungeon_mq['Water Temple']:
rom.write_byte(0x25B8197, 0x3F)
if world.bombchus_in_logic:
rom.write_int32(rom.sym('BOMBCHUS_IN_LOGIC'), 1)
# Change graveyard graves to not allow grabbing on to the ledge
rom.write_byte(0x0202039D, 0x20)
rom.write_byte(0x0202043C, 0x24)
# Fix Castle Courtyard to check for meeting Zelda, not Zelda fleeing, to block you
rom.write_bytes(0xCD5E76, [0x0E, 0xDC])
rom.write_bytes(0xCD5E12, [0x0E, 0xDC])
# Cutscene for all medallions never triggers when leaving shadow or spirit temples(hopefully stops warp to colossus on shadow completion with boss reward shuffle)
rom.write_byte(0xACA409, 0xAD)
rom.write_byte(0xACA49D, 0xCE)
# Speed Zelda's Letter scene
rom.write_bytes(0x290E08E, [0x05, 0xF0])
rom.write_byte(0xEFCBA7, 0x08)
rom.write_byte(0xEFE7C7, 0x05)
#rom.write_byte(0xEFEAF7, 0x08)
#rom.write_byte(0xEFE7C7, 0x05)
rom.write_bytes(0xEFE938, [0x00, 0x00, 0x00, 0x00])
rom.write_bytes(0xEFE948, [0x00, 0x00, 0x00, 0x00])
rom.write_bytes(0xEFE950, [0x00, 0x00, 0x00, 0x00])
# Speed Zelda escaping from Hyrule Castle
Block_code = [0x00, 0x00, 0x00, 0x01, 0x00, 0x21, 0x00, 0x01, 0x00, 0x02, 0x00, 0x02]
rom.write_bytes(0x1FC0CF8, Block_code)
# songs as items flag
songs_as_items = world.shuffle_song_items or \
world.start_with_fast_travel or \
world.distribution.song_as_items
# Speed learning Zelda's Lullaby
rom.write_int32s(0x02E8E90C, [0x000003E8, 0x00000001]) # Terminator Execution
if songs_as_items:
rom.write_int16s(None, [0x0073, 0x001, 0x0002, 0x0002]) # ID, start, end, end
else:
rom.write_int16s(None, [0x0073, 0x003B, 0x003C, 0x003C]) # ID, start, end, end
rom.write_int32s(0x02E8E91C, [0x00000013, 0x0000000C]) # Textbox, Count
if songs_as_items:
rom.write_int16s(None, [0xFFFF, 0x0000, 0x0010, 0xFFFF, 0xFFFF, 0xFFFF]) # ID, start, end, type, alt1, alt2
else:
rom.write_int16s(None, [0x0017, 0x0000, 0x0010, 0x0002, 0x088B, 0xFFFF]) # ID, start, end, type, alt1, alt2
rom.write_int16s(None, [0x00D4, 0x0011, 0x0020, 0x0000, 0xFFFF, 0xFFFF]) # ID, start, end, type, alt1, alt2
# Speed learning Sun's Song
if songs_as_items:
rom.write_int32(0x0332A4A4, 0xFFFFFFFF) # Header: frame_count
else:
rom.write_int32(0x0332A4A4, 0x0000003C) # Header: frame_count
rom.write_int32s(0x0332A868, [0x00000013, 0x00000008]) # Textbox, Count
rom.write_int16s(None, [0x0018, 0x0000, 0x0010, 0x0002, 0x088B, 0xFFFF]) # ID, start, end, type, alt1, alt2
rom.write_int16s(None, [0x00D3, 0x0011, 0x0020, 0x0000, 0xFFFF, 0xFFFF]) # ID, start, end, type, alt1, alt2
# Speed learning Saria's Song
if songs_as_items:
rom.write_int32(0x020B1734, 0xFFFFFFFF) # Header: frame_count
else:
rom.write_int32(0x020B1734, 0x0000003C) # Header: frame_count
rom.write_int32s(0x20B1DA8, [0x00000013, 0x0000000C]) # Textbox, Count
rom.write_int16s(None, [0x0015, 0x0000, 0x0010, 0x0002, 0x088B, 0xFFFF]) # ID, start, end, type, alt1, alt2
rom.write_int16s(None, [0x00D1, 0x0011, 0x0020, 0x0000, 0xFFFF, 0xFFFF]) # ID, start, end, type, alt1, alt2
rom.write_int32s(0x020B19C0, [0x0000000A, 0x00000006]) # Link, Count
rom.write_int16s(0x020B19C8, [0x0011, 0x0000, 0x0010, 0x0000]) #action, start, end, ????
rom.write_int16s(0x020B19F8, [0x003E, 0x0011, 0x0020, 0x0000]) #action, start, end, ????
rom.write_int32s(None, [0x80000000, # ???
0x00000000, 0x000001D4, 0xFFFFF731, # start_XYZ
0x00000000, 0x000001D4, 0xFFFFF712]) # end_XYZ
# Speed learning Epona's Song
rom.write_int32s(0x029BEF60, [0x000003E8, 0x00000001]) # Terminator Execution
if songs_as_items:
rom.write_int16s(None, [0x005E, 0x0001, 0x0002, 0x0002]) # ID, start, end, end
else:
rom.write_int16s(None, [0x005E, 0x000A, 0x000B, 0x000B]) # ID, start, end, end
rom.write_int32s(0x029BECB0, [0x00000013, 0x00000002]) # Textbox, Count
if songs_as_items:
rom.write_int16s(None, [0xFFFF, 0x0000, 0x0009, 0xFFFF, 0xFFFF, 0xFFFF]) # ID, start, end, type, alt1, alt2
else:
rom.write_int16s(None, [0x00D2, 0x0000, 0x0009, 0x0000, 0xFFFF, 0xFFFF]) # ID, start, end, type, alt1, alt2
rom.write_int16s(None, [0xFFFF, 0x000A, 0x003C, 0xFFFF, 0xFFFF, 0xFFFF]) # ID, start, end, type, alt1, alt2
# Speed learning Song of Time
rom.write_int32s(0x0252FB98, [0x000003E8, 0x00000001]) # Terminator Execution
if songs_as_items:
rom.write_int16s(None, [0x0035, 0x0001, 0x0002, 0x0002]) # ID, start, end, end
else:
rom.write_int16s(None, [0x0035, 0x003B, 0x003C, 0x003C]) # ID, start, end, end
rom.write_int32s(0x0252FC80, [0x00000013, 0x0000000C]) # Textbox, Count
if songs_as_items:
rom.write_int16s(None, [0xFFFF, 0x0000, 0x0010, 0xFFFF, 0xFFFF, 0xFFFF]) # ID, start, end, type, alt1, alt2
else:
rom.write_int16s(None, [0x0019, 0x0000, 0x0010, 0x0002, 0x088B, 0xFFFF]) # ID, start, end, type, alt1, alt2
rom.write_int16s(None, [0x00D5, 0x0011, 0x0020, 0x0000, 0xFFFF, 0xFFFF]) # ID, start, end, type, alt1, alt2
rom.write_int32(0x01FC3B84, 0xFFFFFFFF) # Other Header?: frame_count
# Speed learning Song of Storms
if songs_as_items:
rom.write_int32(0x03041084, 0xFFFFFFFF) # Header: frame_count
else:
rom.write_int32(0x03041084, 0x0000000A) # Header: frame_count
rom.write_int32s(0x03041088, [0x00000013, 0x00000002]) # Textbox, Count
rom.write_int16s(None, [0x00D6, 0x0000, 0x0009, 0x0000, 0xFFFF, 0xFFFF]) # ID, start, end, type, alt1, alt2
rom.write_int16s(None, [0xFFFF, 0x00BE, 0x00C8, 0xFFFF, 0xFFFF, 0xFFFF]) # ID, start, end, type, alt1, alt2
# Speed learning Minuet of Forest
if songs_as_items:
rom.write_int32(0x020AFF84, 0xFFFFFFFF) # Header: frame_count
else:
rom.write_int32(0x020AFF84, 0x0000003C) # Header: frame_count
rom.write_int32s(0x020B0800, [0x00000013, 0x0000000A]) # Textbox, Count
rom.write_int16s(None, [0x000F, 0x0000, 0x0010, 0x0002, 0x088B, 0xFFFF]) # ID, start, end, type, alt1, alt2
rom.write_int16s(None, [0x0073, 0x0011, 0x0020, 0x0000, 0xFFFF, 0xFFFF]) # ID, start, end, type, alt1, alt2
rom.write_int32s(0x020AFF88, [0x0000000A, 0x00000005]) # Link, Count
rom.write_int16s(0x020AFF90, [0x0011, 0x0000, 0x0010, 0x0000]) #action, start, end, ????
rom.write_int16s(0x020AFFC1, [0x003E, 0x0011, 0x0020, 0x0000]) #action, start, end, ????
rom.write_int32s(0x020B0488, [0x00000056, 0x00000001]) # Music Change, Count
rom.write_int16s(None, [0x003F, 0x0021, 0x0022, 0x0000]) #action, start, end, ????
rom.write_int32s(0x020B04C0, [0x0000007C, 0x00000001]) # Music Fade Out, Count
rom.write_int16s(None, [0x0004, 0x0000, 0x0000, 0x0000]) #action, start, end, ????
# Speed learning Bolero of Fire
if songs_as_items:
rom.write_int32(0x0224B5D4, 0xFFFFFFFF) # Header: frame_count
else:
rom.write_int32(0x0224B5D4, 0x0000003C) # Header: frame_count
rom.write_int32s(0x0224D7E8, [0x00000013, 0x0000000A]) # Textbox, Count
rom.write_int16s(None, [0x0010, 0x0000, 0x0010, 0x0002, 0x088B, 0xFFFF]) # ID, start, end, type, alt1, alt2
rom.write_int16s(None, [0x0074, 0x0011, 0x0020, 0x0000, 0xFFFF, 0xFFFF]) # ID, start, end, type, alt1, alt2
rom.write_int32s(0x0224B5D8, [0x0000000A, 0x0000000B]) # Link, Count
rom.write_int16s(0x0224B5E0, [0x0011, 0x0000, 0x0010, 0x0000]) #action, start, end, ????
rom.write_int16s(0x0224B610, [0x003E, 0x0011, 0x0020, 0x0000]) #action, start, end, ????
rom.write_int32s(0x0224B7F0, [0x0000002F, 0x0000000E]) # Sheik, Count
rom.write_int16s(0x0224B7F8, [0x0000]) #action
rom.write_int16s(0x0224B828, [0x0000]) #action
rom.write_int16s(0x0224B858, [0x0000]) #action
rom.write_int16s(0x0224B888, [0x0000]) #action
# Speed learning Serenade of Water
if songs_as_items:
rom.write_int32(0x02BEB254, 0xFFFFFFFF) # Header: frame_count
else:
rom.write_int32(0x02BEB254, 0x0000003C) # Header: frame_count
rom.write_int32s(0x02BEC880, [0x00000013, 0x00000010]) # Textbox, Count
rom.write_int16s(None, [0x0011, 0x0000, 0x0010, 0x0002, 0x088B, 0xFFFF]) # ID, start, end, type, alt1, alt2
rom.write_int16s(None, [0x0075, 0x0011, 0x0020, 0x0000, 0xFFFF, 0xFFFF]) # ID, start, end, type, alt1, alt2
rom.write_int32s(0x02BEB258, [0x0000000A, 0x0000000F]) # Link, Count
rom.write_int16s(0x02BEB260, [0x0011, 0x0000, 0x0010, 0x0000]) #action, start, end, ????
rom.write_int16s(0x02BEB290, [0x003E, 0x0011, 0x0020, 0x0000]) #action, start, end, ????
rom.write_int32s(0x02BEB530, [0x0000002F, 0x00000006]) # Sheik, Count
rom.write_int16s(0x02BEB538, [0x0000, 0x0000, 0x018A, 0x0000]) #action, start, end, ????
rom.write_int32s(None, [0x1BBB0000, # ???
0xFFFFFB10, 0x8000011A, 0x00000330, # start_XYZ
0xFFFFFB10, 0x8000011A, 0x00000330]) # end_XYZ
rom.write_int32s(0x02BEC848, [0x00000056, 0x00000001]) # Music Change, Count
rom.write_int16s(None, [0x0059, 0x0021, 0x0022, 0x0000]) #action, start, end, ????
# Speed learning Nocturne of Shadow
rom.write_int32s(0x01FFE458, [0x000003E8, 0x00000001]) # Other Scene? Terminator Execution
rom.write_int16s(None, [0x002F, 0x0001, 0x0002, 0x0002]) # ID, start, end, end
rom.write_int32(0x01FFFDF4, 0x0000003C) # Header: frame_count
rom.write_int32s(0x02000FD8, [0x00000013, 0x0000000E]) # Textbox, Count
if songs_as_items:
rom.write_int16s(None, [0xFFFF, 0x0000, 0x0010, 0xFFFF, 0xFFFF, 0xFFFF]) # ID, start, end, type, alt1, alt2
else:
rom.write_int16s(None, [0x0013, 0x0000, 0x0010, 0x0002, 0x088B, 0xFFFF]) # ID, start, end, type, alt1, alt2
rom.write_int16s(None, [0x0077, 0x0011, 0x0020, 0x0000, 0xFFFF, 0xFFFF]) # ID, start, end, type, alt1, alt2
rom.write_int32s(0x02000128, [0x000003E8, 0x00000001]) # Terminator Execution
if songs_as_items:
rom.write_int16s(None, [0x0032, 0x0001, 0x0002, 0x0002]) # ID, start, end, end
else:
rom.write_int16s(None, [0x0032, 0x003A, 0x003B, 0x003B]) # ID, start, end, end
# Speed learning Requiem of Spirit
rom.write_int32(0x0218AF14, 0x0000003C) # Header: frame_count
rom.write_int32s(0x0218C574, [0x00000013, 0x00000008]) # Textbox, Count
if songs_as_items:
rom.write_int16s(None, [0xFFFF, 0x0000, 0x0010, 0xFFFF, 0xFFFF, 0xFFFF]) # ID, start, end, type, alt1, alt2
else:
rom.write_int16s(None, [0x0012, 0x0000, 0x0010, 0x0002, 0x088B, 0xFFFF]) # ID, start, end, type, alt1, alt2
rom.write_int16s(None, [0x0076, 0x0011, 0x0020, 0x0000, 0xFFFF, 0xFFFF]) # ID, start, end, type, alt1, alt2
rom.write_int32s(0x0218B478, [0x000003E8, 0x00000001]) # Terminator Execution
if songs_as_items:
rom.write_int16s(None, [0x0030, 0x0001, 0x0002, 0x0002]) # ID, start, end, end
else:
rom.write_int16s(None, [0x0030, 0x003A, 0x003B, 0x003B]) # ID, start, end, end
rom.write_int32s(0x0218AF18, [0x0000000A, 0x0000000B]) # Link, Count
rom.write_int16s(0x0218AF20, [0x0011, 0x0000, 0x0010, 0x0000]) #action, start, end, ????
rom.write_int32s(None, [0x40000000, # ???
0xFFFFFAF9, 0x00000008, 0x00000001, # start_XYZ
0xFFFFFAF9, 0x00000008, 0x00000001, # end_XYZ
0x0F671408, 0x00000000, 0x00000001]) # normal_XYZ
rom.write_int16s(0x0218AF50, [0x003E, 0x0011, 0x0020, 0x0000]) #action, start, end, ????
# Speed learning Prelude of Light
if songs_as_items:
rom.write_int32(0x0252FD24, 0xFFFFFFFF) # Header: frame_count
else:
rom.write_int32(0x0252FD24, 0x0000003C) # Header: frame_count
rom.write_int32s(0x02531320, [0x00000013, 0x0000000E]) # Textbox, Count
rom.write_int16s(None, [0x0014, 0x0000, 0x0010, 0x0002, 0x088B, 0xFFFF]) # ID, start, end, type, alt1, alt2
rom.write_int16s(None, [0x0078, 0x0011, 0x0020, 0x0000, 0xFFFF, 0xFFFF]) # ID, start, end, type, alt1, alt2
rom.write_int32s(0x0252FF10, [0x0000002F, 0x00000009]) # Sheik, Count
rom.write_int16s(0x0252FF18, [0x0006, 0x0000, 0x0000, 0x0000]) #action, start, end, ????
rom.write_int32s(0x025313D0, [0x00000056, 0x00000001]) # Music Change, Count
rom.write_int16s(None, [0x003B, 0x0021, 0x0022, 0x0000]) #action, start, end, ????
# Speed scene after Deku Tree
rom.write_bytes(0x2077E20, [0x00, 0x07, 0x00, 0x01, 0x00, 0x02, 0x00, 0x02])
rom.write_bytes(0x2078A10, [0x00, 0x0E, 0x00, 0x1F, 0x00, 0x20, 0x00, 0x20])
Block_code = [0x00, 0x80, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0x00, 0x1E, 0x00, 0x28, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF]
rom.write_bytes(0x2079570, Block_code)
# Speed scene after Dodongo's Cavern
rom.write_bytes(0x2221E88, [0x00, 0x0C, 0x00, 0x3B, 0x00, 0x3C, 0x00, 0x3C])
rom.write_bytes(0x2223308, [0x00, 0x81, 0x00, 0x00, 0x00, 0x3A, 0x00, 0x00])
# Speed scene after Jabu Jabu's Belly
rom.write_bytes(0xCA3530, [0x00, 0x00, 0x00, 0x00])
rom.write_bytes(0x2113340, [0x00, 0x0D, 0x00, 0x3B, 0x00, 0x3C, 0x00, 0x3C])
rom.write_bytes(0x2113C18, [0x00, 0x82, 0x00, 0x00, 0x00, 0x3A, 0x00, 0x00])
rom.write_bytes(0x21131D0, [0x00, 0x01, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x3C])
# Speed scene after Forest Temple
rom.write_bytes(0xD4ED68, [0x00, 0x45, 0x00, 0x3B, 0x00, 0x3C, 0x00, 0x3C])
rom.write_bytes(0xD4ED78, [0x00, 0x3E, 0x00, 0x00, 0x00, 0x3A, 0x00, 0x00])
rom.write_bytes(0x207B9D4, [0xFF, 0xFF, 0xFF, 0xFF])
# Speed scene after Fire Temple
rom.write_bytes(0x2001848, [0x00, 0x1E, 0x00, 0x01, 0x00, 0x02, 0x00, 0x02])
rom.write_bytes(0xD100B4, [0x00, 0x62, 0x00, 0x3B, 0x00, 0x3C, 0x00, 0x3C])
rom.write_bytes(0xD10134, [0x00, 0x3C, 0x00, 0x00, 0x00, 0x3A, 0x00, 0x00])
# Speed scene after Water Temple
rom.write_bytes(0xD5A458, [0x00, 0x15, 0x00, 0x3B, 0x00, 0x3C, 0x00, 0x3C])
rom.write_bytes(0xD5A3A8, [0x00, 0x3D, 0x00, 0x00, 0x00, 0x3A, 0x00, 0x00])
rom.write_bytes(0x20D0D20, [0x00, 0x29, 0x00, 0xC7, 0x00, 0xC8, 0x00, 0xC8])
# Speed scene after Shadow Temple
rom.write_bytes(0xD13EC8, [0x00, 0x61, 0x00, 0x3B, 0x00, 0x3C, 0x00, 0x3C])
rom.write_bytes(0xD13E18, [0x00, 0x41, 0x00, 0x00, 0x00, 0x3A, 0x00, 0x00])
# Speed scene after Spirit Temple
rom.write_bytes(0xD3A0A8, [0x00, 0x60, 0x00, 0x3B, 0x00, 0x3C, 0x00, 0x3C])
rom.write_bytes(0xD39FF0, [0x00, 0x3F, 0x00, 0x00, 0x00, 0x3A, 0x00, 0x00])
# Speed Nabooru defeat scene
rom.write_bytes(0x2F5AF84, [0x00, 0x00, 0x00, 0x05])
rom.write_bytes(0x2F5C7DA, [0x00, 0x01, 0x00, 0x02])
rom.write_bytes(0x2F5C7A2, [0x00, 0x03, 0x00, 0x04])
rom.write_byte(0x2F5B369, 0x09)
rom.write_byte(0x2F5B491, 0x04)
rom.write_byte(0x2F5B559, 0x04)
rom.write_byte(0x2F5B621, 0x04)
rom.write_byte(0x2F5B761, 0x07)
# Speed scene with all medallions
rom.write_bytes(0x2512680, [0x00, 0x74, 0x00, 0x01, 0x00, 0x02, 0x00, 0x02])
# Speed collapse of Ganon's Tower
rom.write_bytes(0x33FB328, [0x00, 0x76, 0x00, 0x01, 0x00, 0x02, 0x00, 0x02])
# Speed Phantom Ganon defeat scene
rom.write_bytes(0xC944D8, [0x00, 0x00, 0x00, 0x00])
rom.write_bytes(0xC94548, [0x00, 0x00, 0x00, 0x00])
rom.write_bytes(0xC94730, [0x00, 0x00, 0x00, 0x00])
rom.write_bytes(0xC945A8, [0x00, 0x00, 0x00, 0x00])
rom.write_bytes(0xC94594, [0x00, 0x00, 0x00, 0x00])
# Speed Twinrova defeat scene
rom.write_bytes(0xD678CC, [0x24, 0x01, 0x03, 0xA2, 0xA6, 0x01, 0x01, 0x42])
rom.write_bytes(0xD67BA4, [0x10, 0x00])
# Speed scenes during final battle
# Ganondorf battle end
rom.write_byte(0xD82047, 0x09)
# Zelda descends
rom.write_byte(0xD82AB3, 0x66)
rom.write_byte(0xD82FAF, 0x65)
rom.write_int16s(0xD82D2E, [0x041F])
rom.write_int16s(0xD83142, [0x006B])
rom.write_bytes(0xD82DD8, [0x00, 0x00, 0x00, 0x00])
rom.write_bytes(0xD82ED4, [0x00, 0x00, 0x00, 0x00])
rom.write_byte(0xD82FDF, 0x33)
# After tower collapse
rom.write_byte(0xE82E0F, 0x04)
# Ganon intro
rom.write_bytes(0xE83D28, [0x00, 0x00, 0x00, 0x00])
rom.write_bytes(0xE83B5C, [0x00, 0x00, 0x00, 0x00])
rom.write_bytes(0xE84C80, [0x10, 0x00])
# Speed completion of the trials in Ganon's Castle
rom.write_int16s(0x31A8090, [0x006B, 0x0001, 0x0002, 0x0002]) #Forest
rom.write_int16s(0x31A9E00, [0x006E, 0x0001, 0x0002, 0x0002]) #Fire
rom.write_int16s(0x31A8B18, [0x006C, 0x0001, 0x0002, 0x0002]) #Water
rom.write_int16s(0x31A9430, [0x006D, 0x0001, 0x0002, 0x0002]) #Shadow
rom.write_int16s(0x31AB200, [0x0070, 0x0001, 0x0002, 0x0002]) #Spirit
rom.write_int16s(0x31AA830, [0x006F, 0x0001, 0x0002, 0x0002]) #Light
# Speed obtaining Fairy Ocarina
rom.write_bytes(0x2151230, [0x00, 0x72, 0x00, 0x3C, 0x00, 0x3D, 0x00, 0x3D])
Block_code = [0x00, 0x4A, 0x00, 0x00, 0x00, 0x3A, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0x00, 0x3C, 0x00, 0x81, 0xFF, 0xFF]
rom.write_bytes(0x2151240, Block_code)
rom.write_bytes(0x2150E20, [0xFF, 0xFF, 0xFA, 0x4C])
if world.shuffle_ocarinas:
symbol = rom.sym('OCARINAS_SHUFFLED')
rom.write_byte(symbol,0x01)
# Speed Zelda Light Arrow cutscene
rom.write_bytes(0x2531B40, [0x00, 0x28, 0x00, 0x01, 0x00, 0x02, 0x00, 0x02])
rom.write_bytes(0x2532FBC, [0x00, 0x75])
rom.write_bytes(0x2532FEA, [0x00, 0x75, 0x00, 0x80])
rom.write_byte(0x2533115, 0x05)
rom.write_bytes(0x2533141, [0x06, 0x00, 0x06, 0x00, 0x10])
rom.write_bytes(0x2533171, [0x0F, 0x00, 0x11, 0x00, 0x40])
rom.write_bytes(0x25331A1, [0x07, 0x00, 0x41, 0x00, 0x65])
rom.write_bytes(0x2533642, [0x00, 0x50])
rom.write_byte(0x253389D, 0x74)
rom.write_bytes(0x25338A4, [0x00, 0x72, 0x00, 0x75, 0x00, 0x79])
rom.write_bytes(0x25338BC, [0xFF, 0xFF])
rom.write_bytes(0x25338C2, [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF])
rom.write_bytes(0x25339C2, [0x00, 0x75, 0x00, 0x76])
rom.write_bytes(0x2533830, [0x00, 0x31, 0x00, 0x81, 0x00, 0x82, 0x00, 0x82])
# Speed Bridge of Light cutscene
rom.write_bytes(0x292D644, [0x00, 0x00, 0x00, 0xA0])
rom.write_bytes(0x292D680, [0x00, 0x02, 0x00, 0x0A, 0x00, 0x6C, 0x00, 0x00])
rom.write_bytes(0x292D6E8, [0x00, 0x27])
rom.write_bytes(0x292D718, [0x00, 0x32])
rom.write_bytes(0x292D810, [0x00, 0x02, 0x00, 0x3C])
rom.write_bytes(0x292D924, [0xFF, 0xFF, 0x00, 0x14, 0x00, 0x96, 0xFF, 0xFF])
#Speed Pushing of All Pushable Objects
rom.write_bytes(0xDD2B86, [0x40, 0x80]) #block speed
rom.write_bytes(0xDD2D26, [0x00, 0x01]) #block delay
rom.write_bytes(0xDD9682, [0x40, 0x80]) #milk crate speed
rom.write_bytes(0xDD981E, [0x00, 0x01]) #milk crate delay
rom.write_bytes(0xCE1BD0, [0x40, 0x80, 0x00, 0x00]) #amy puzzle speed
rom.write_bytes(0xCE0F0E, [0x00, 0x01]) #amy puzzle delay
rom.write_bytes(0xC77CA8, [0x40, 0x80, 0x00, 0x00]) #fire block speed
rom.write_bytes(0xC770C2, [0x00, 0x01]) #fire block delay
rom.write_bytes(0xCC5DBC, [0x29, 0xE1, 0x00, 0x01]) #forest basement puzzle delay
rom.write_bytes(0xDBCF70, [0x2B, 0x01, 0x00, 0x00]) #spirit cobra mirror startup
rom.write_bytes(0xDBCF70, [0x2B, 0x01, 0x00, 0x01]) #spirit cobra mirror delay
rom.write_bytes(0xDBA230, [0x28, 0x41, 0x00, 0x19]) #truth spinner speed
rom.write_bytes(0xDBA3A4, [0x24, 0x18, 0x00, 0x00]) #truth spinner delay
#Speed Deku Seed Upgrade Scrub Cutscene
rom.write_bytes(0xECA900, [0x24, 0x03, 0xC0, 0x00]) #scrub angle
rom.write_bytes(0xECAE90, [0x27, 0x18, 0xFD, 0x04]) #skip straight to giving item
rom.write_bytes(0xECB618, [0x25, 0x6B, 0x00, 0xD4]) #skip straight to digging back in
rom.write_bytes(0xECAE70, [0x00, 0x00, 0x00, 0x00]) #never initialize cs camera
rom.write_bytes(0xE5972C, [0x24, 0x08, 0x00, 0x01]) #timer set to 1 frame for giving item
# Remove remaining owls
rom.write_bytes(0x1FE30CE, [0x01, 0x4B])
rom.write_bytes(0x1FE30DE, [0x01, 0x4B])
rom.write_bytes(0x1FE30EE, [0x01, 0x4B])
rom.write_bytes(0x205909E, [0x00, 0x3F])
rom.write_byte(0x2059094, 0x80)
# Darunia won't dance
rom.write_bytes(0x22769E4, [0xFF, 0xFF, 0xFF, 0xFF])
# Zora moves quickly
rom.write_bytes(0xE56924, [0x00, 0x00, 0x00, 0x00])
# Speed Jabu Jabu swallowing Link
rom.write_bytes(0xCA0784, [0x00, 0x18, 0x00, 0x01, 0x00, 0x02, 0x00, 0x02])
# Ruto no longer points to Zora Sapphire
rom.write_bytes(0xD03BAC, [0xFF, 0xFF, 0xFF, 0xFF])
# Ruto never disappears from Jabu Jabu's Belly
rom.write_byte(0xD01EA3, 0x00)
#Shift octorock in jabu forward
rom.write_bytes(0x275906E, [0xFF, 0xB3, 0xFB, 0x20, 0xF9, 0x56])
#Move fire/forest temple switches down 1 unit to make it easier to press
rom.write_bytes(0x24860A8, [0xFC, 0xF4]) #forest basement 1
rom.write_bytes(0x24860C8, [0xFC, 0xF4]) #forest basement 2
rom.write_bytes(0x24860E8, [0xFC, 0xF4]) #forest basement 3
rom.write_bytes(0x236C148, [0x11, 0x93]) #fire hammer room
# Speed up Epona race start
rom.write_bytes(0x29BE984, [0x00, 0x00, 0x00, 0x02])
rom.write_bytes(0x29BE9CA, [0x00, 0x01, 0x00, 0x02])
# Speed start of Horseback Archery
#rom.write_bytes(0x21B2064, [0x00, 0x00, 0x00, 0x02])
#rom.write_bytes(0x21B20AA, [0x00, 0x01, 0x00, 0x02])
# Speed up Epona escape
rom.write_bytes(0x1FC8B36, [0x00, 0x2A])
# Speed up draining the well
rom.write_bytes(0xE0A010, [0x00, 0x2A, 0x00, 0x01, 0x00, 0x02, 0x00, 0x02])
rom.write_bytes(0x2001110, [0x00, 0x2B, 0x00, 0xB7, 0x00, 0xB8, 0x00, 0xB8])
# Speed up opening the royal tomb for both child and adult
rom.write_bytes(0x2025026, [0x00, 0x01])
rom.write_bytes(0x2023C86, [0x00, 0x01])
rom.write_byte(0x2025159, 0x02)
rom.write_byte(0x2023E19, 0x02)
#Speed opening of Door of Time
rom.write_bytes(0xE0A176, [0x00, 0x02])
rom.write_bytes(0xE0A35A, [0x00, 0x01, 0x00, 0x02])
# Speed up Lake Hylia Owl Flight
rom.write_bytes(0x20E60D2, [0x00, 0x01])
# Speed up Death Mountain Trail Owl Flight
rom.write_bytes(0x223B6B2, [0x00, 0x01])
# Poacher's Saw no longer messes up Deku Theater
rom.write_bytes(0xAE72CC, [0x00, 0x00, 0x00, 0x00])
# Change Prelude CS to check for medallion
rom.write_bytes(0x00C805E6, [0x00, 0xA6])
rom.write_bytes(0x00C805F2, [0x00, 0x01])
# Change Nocturne CS to check for medallions
rom.write_bytes(0x00ACCD8E, [0x00, 0xA6])
rom.write_bytes(0x00ACCD92, [0x00, 0x01])
rom.write_bytes(0x00ACCD9A, [0x00, 0x02])
rom.write_bytes(0x00ACCDA2, [0x00, 0x04])
# Change King Zora to move even if Zora Sapphire is in inventory
rom.write_bytes(0x00E55BB0, [0x85, 0xCE, 0x8C, 0x3C])
rom.write_bytes(0x00E55BB4, [0x84, 0x4F, 0x0E, 0xDA])
# Remove extra Forest Temple medallions
rom.write_bytes(0x00D4D37C, [0x00, 0x00, 0x00, 0x00])
# Remove extra Fire Temple medallions
rom.write_bytes(0x00AC9754, [0x00, 0x00, 0x00, 0x00])
rom.write_bytes(0x00D0DB8C, [0x00, 0x00, 0x00, 0x00])
# Remove extra Water Temple medallions
rom.write_bytes(0x00D57F94, [0x00, 0x00, 0x00, 0x00])
# Remove extra Spirit Temple medallions
rom.write_bytes(0x00D370C4, [0x00, 0x00, 0x00, 0x00])
rom.write_bytes(0x00D379C4, [0x00, 0x00, 0x00, 0x00])
# Remove extra Shadow Temple medallions
rom.write_bytes(0x00D116E0, [0x00, 0x00, 0x00, 0x00])
# Change Mido, Saria, and Kokiri to check for Deku Tree complete flag
# bitwise pointer for 0x80
kokiriAddresses = [0xE52836, 0xE53A56, 0xE51D4E, 0xE51F3E, 0xE51D96, 0xE51E1E, 0xE51E7E, 0xE51EDE, 0xE51FC6, 0xE51F96, 0xE293B6, 0xE29B8E, 0xE62EDA, 0xE630D6, 0xE633AA, 0xE6369E]
for kokiri in kokiriAddresses:
rom.write_bytes(kokiri, [0x8C, 0x0C])
# Kokiri
rom.write_bytes(0xE52838, [0x94, 0x48, 0x0E, 0xD4])
rom.write_bytes(0xE53A58, [0x94, 0x49, 0x0E, 0xD4])
rom.write_bytes(0xE51D50, [0x94, 0x58, 0x0E, 0xD4])
rom.write_bytes(0xE51F40, [0x94, 0x4B, 0x0E, 0xD4])
rom.write_bytes(0xE51D98, [0x94, 0x4B, 0x0E, 0xD4])
rom.write_bytes(0xE51E20, [0x94, 0x4A, 0x0E, 0xD4])
rom.write_bytes(0xE51E80, [0x94, 0x59, 0x0E, 0xD4])
rom.write_bytes(0xE51EE0, [0x94, 0x4E, 0x0E, 0xD4])
rom.write_bytes(0xE51FC8, [0x94, 0x49, 0x0E, 0xD4])
rom.write_bytes(0xE51F98, [0x94, 0x58, 0x0E, 0xD4])
# Saria
rom.write_bytes(0xE293B8, [0x94, 0x78, 0x0E, 0xD4])
rom.write_bytes(0xE29B90, [0x94, 0x68, 0x0E, 0xD4])
# Mido
rom.write_bytes(0xE62EDC, [0x94, 0x6F, 0x0E, 0xD4])
rom.write_bytes(0xE630D8, [0x94, 0x4F, 0x0E, 0xD4])
rom.write_bytes(0xE633AC, [0x94, 0x68, 0x0E, 0xD4])
rom.write_bytes(0xE636A0, [0x94, 0x48, 0x0E, 0xD4])
# Change adult Kokiri Forest to check for Forest Temple complete flag
rom.write_bytes(0xE5369E, [0xB4, 0xAC])
rom.write_bytes(0xD5A83C, [0x80, 0x49, 0x0E, 0xDC])
# Change adult Goron City to check for Fire Temple complete flag
rom.write_bytes(0xED59DC, [0x80, 0xC9, 0x0E, 0xDC])
# Change Pokey to check DT complete flag
rom.write_bytes(0xE5400A, [0x8C, 0x4C])
rom.write_bytes(0xE5400E, [0xB4, 0xA4])
if world.open_forest != 'closed':
rom.write_bytes(0xE5401C, [0x14, 0x0B])
# Fix Shadow Temple to check for different rewards for scene
rom.write_bytes(0xCA3F32, [0x00, 0x00, 0x25, 0x4A, 0x00, 0x10])
# Fix Spirit Temple to check for different rewards for scene
rom.write_bytes(0xCA3EA2, [0x00, 0x00, 0x25, 0x4A, 0x00, 0x08])
# Fix Biggoron to check a different flag.
rom.write_byte(0xED329B, 0x72)
rom.write_byte(0xED43E7, 0x72)
rom.write_bytes(0xED3370, [0x3C, 0x0D, 0x80, 0x12])
rom.write_bytes(0xED3378, [0x91, 0xB8, 0xA6, 0x42, 0xA1, 0xA8, 0xA6, 0x42])
rom.write_bytes(0xED6574, [0x00, 0x00, 0x00, 0x00])
# Remove the check on the number of days that passed for claim check.
rom.write_bytes(0xED4470, [0x00, 0x00, 0x00, 0x00])
rom.write_bytes(0xED4498, [0x00, 0x00, 0x00, 0x00])
# Fixed reward order for Bombchu Bowling
rom.write_bytes(0xE2E698, [0x80, 0xAA, 0xE2, 0x64])
rom.write_bytes(0xE2E6A0, [0x80, 0xAA, 0xE2, 0x4C])
rom.write_bytes(0xE2D440, [0x24, 0x19, 0x00, 0x00])
# Offset kakariko carpenter starting position
rom.write_bytes(0x1FF93A4, [0x01, 0x8D, 0x00, 0x11, 0x01, 0x6C, 0xFF, 0x92, 0x00, 0x00, 0x01, 0x78, 0xFF, 0x2E, 0x00, 0x00, 0x00, 0x03, 0xFD, 0x2B, 0x00, 0xC8, 0xFF, 0xF9, 0xFD, 0x03, 0x00, 0xC8, 0xFF, 0xA9, 0xFD, 0x5D, 0x00, 0xC8, 0xFE, 0x5F]) # re order the carpenter's path
rom.write_byte(0x1FF93D0, 0x06) # set the path points to 6
rom.write_bytes(0x20160B6, [0x01, 0x8D, 0x00, 0x11, 0x01, 0x6C]) # set the carpenter's start position
# Give hp after first ocarina minigame round
rom.write_bytes(0xDF2204, [0x24, 0x03, 0x00, 0x02])
# Allow owl to always carry the kid down Death Mountain
rom.write_bytes(0xE304F0, [0x24, 0x0E, 0x00, 0x01])
# Fix Vanilla Dodongo's Cavern Gossip Stone to not use a permanent flag for the fairy
if not world.dungeon_mq['Dodongos Cavern']:
rom.write_byte(0x1F281FE, 0x38)
# Fix "...???" textbox outside Child Colossus Fairy to use the right flag and disappear once the wall is destroyed
rom.write_byte(0x21A026F, 0xDD)
# Remove the "...???" textbox outside the Crater Fairy (change it to an actor that does nothing)
rom.write_int16s(0x225E7DC, [0x00B5, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF])
# Forbid Sun's Song from a bunch of cutscenes
Suns_scenes = [0x2016FC9, 0x2017219, 0x20173D9, 0x20174C9, 0x2017679, 0x20C1539, 0x20C15D9, 0x21A0719, 0x21A07F9, 0x2E90129, 0x2E901B9, 0x2E90249, 0x225E829, 0x225E939, 0x306D009]
for address in Suns_scenes:
rom.write_byte(address,0x01)
# Allow Warp Songs in additional places
rom.write_byte(0xB6D3D2, 0x00) # Gerudo Training Grounds
rom.write_byte(0xB6D42A, 0x00) # Inside Ganon's Castle
# Allow Farore's Wind in dungeons where it's normally forbidden
rom.write_byte(0xB6D3D3, 0x00) # Gerudo Training Grounds
rom.write_byte(0xB6D42B, 0x00) # Inside Ganon's Castle
# Remove disruptive text from Gerudo Training Grounds and early Shadow Temple (vanilla)
Wonder_text = [0x27C00BC, 0x27C00CC, 0x27C00DC, 0x27C00EC, 0x27C00FC, 0x27C010C, 0x27C011C, 0x27C012C, 0x27CE080,
0x27CE090, 0x2887070, 0x2887080, 0x2887090, 0x2897070, 0x28C7134, 0x28D91BC, 0x28A60F4, 0x28AE084,
0x28B9174, 0x28BF168, 0x28BF178, 0x28BF188, 0x28A1144, 0x28A6104, 0x28D0094]
for address in Wonder_text:
rom.write_byte(address, 0xFB)
# Speed dig text for Dampe
rom.write_bytes(0x9532F8, [0x08, 0x08, 0x08, 0x59])
# Make item descriptions into a single box
Short_item_descriptions = [0x92EC84, 0x92F9E3, 0x92F2B4, 0x92F37A, 0x92F513, 0x92F5C6, 0x92E93B, 0x92EA12]
for address in Short_item_descriptions:
rom.write_byte(address,0x02)
et_original = rom.read_bytes(0xB6FBF0, 4 * 0x0614)
exit_updates = []
def copy_entrance_record(source_index, destination_index, count=4):
ti = source_index * 4
rom.write_bytes(0xB6FBF0 + destination_index * 4, et_original[ti:ti+(4 * count)])
def generate_exit_lookup_table():
# Assumes that the last exit on a scene's exit list cannot be 0000
exit_table = {
0x0028: [0xAC95C2] #Jabu with the fish is entered from a cutscene hardcode
}
def add_scene_exits(scene_start, offset = 0):
current = scene_start + offset
exit_list_start_off = 0
exit_list_end_off = 0
command = 0
while command != 0x14:
command = rom.read_byte(current)
if command == 0x18: # Alternate header list
header_list = scene_start + (rom.read_int32(current + 4) & 0x00FFFFFF)
for alt_id in range(0,3):
header_offset = rom.read_int32(header_list) & 0x00FFFFFF
if header_offset != 0:
add_scene_exits(scene_start, header_offset)
header_list += 4
if command == 0x13: # Exit List
exit_list_start_off = rom.read_int32(current + 4) & 0x00FFFFFF
if command == 0x0F: # Lighting list, follows exit list
exit_list_end_off = rom.read_int32(current + 4) & 0x00FFFFFF
current += 8
if exit_list_start_off == 0 or exit_list_end_off == 0:
return
# calculate the exit list length
list_length = (exit_list_end_off - exit_list_start_off) // 2
last_id = rom.read_int16(scene_start + exit_list_end_off - 2)
if last_id == 0:
list_length -= 1
# update
addr = scene_start + exit_list_start_off
for _ in range(0, list_length):
index = rom.read_int16(addr)
if index not in exit_table:
exit_table[index] = []
exit_table[index].append(addr)
addr += 2
scene_table = 0x00B71440
for scene in range(0x00, 0x65):
scene_start = rom.read_int32(scene_table + (scene * 0x14));
add_scene_exits(scene_start)
return exit_table
def set_entrance_updates(entrances):
for entrance in entrances:
new_entrance = entrance.data
replaced_entrance = entrance.replaces.data
exit_updates.append((new_entrance['index'], replaced_entrance['index']))
for address in new_entrance.get('addresses', []):
rom.write_int16(address, replaced_entrance['index'])
if "blue_warp" in new_entrance:
if "blue_warp" in replaced_entrance:
blue_out_data = replaced_entrance["blue_warp"]
else:
blue_out_data = replaced_entrance["index"]
# Blue warps have multiple hardcodes leading to them. The good news is
# the blue warps (excluding deku sprout and lake fill special cases) each
# have a nice consistent 4-entry in the table we can just shuffle. So just
# catch all the hardcode with entrance table rewrite. This covers the
# Forest temple and Water temple blue warp revisits. Deku sprout remains
# vanilla as it never took you to the exit and the lake fill is handled
# above by removing the cutscene completely. Child has problems with Adult
# blue warps, so always use the return entrance if a child.
copy_entrance_record(blue_out_data + 2, new_entrance["blue_warp"] + 2, 2)
copy_entrance_record(replaced_entrance["index"], new_entrance["blue_warp"], 2)
exit_table = generate_exit_lookup_table()
if world.entrance_shuffle:
# Disable the fog state entirely to avoid fog glitches
rom.write_byte(rom.sym('NO_FOG_STATE'), 1)
if world.disable_trade_revert:
# Disable trade quest timers and prevent trade items from ever reverting
rom.write_byte(rom.sym('DISABLE_TIMERS'), 0x01)
rom.write_int16s(0xB6D460, [0x0030, 0x0035, 0x0036]) # Change trade items revert table to prevent all reverts
if world.shuffle_overworld_entrances:
rom.write_byte(rom.sym('OVERWORLD_SHUFFLED'), 1)
# Prevent the ocarina cutscene from leading straight to hyrule field
rom.write_byte(rom.sym('OCARINAS_SHUFFLED'), 1)
# Combine all fence hopping LLR exits to lead to the main LLR exit
for k in [0x028A, 0x028E, 0x0292]: # Southern, Western, Eastern Gates
exit_table[0x01F9] += exit_table[k] # Hyrule Field entrance from Lon Lon Ranch (main land entrance)
del exit_table[k]
exit_table[0x01F9].append(0xD52722) # 0x0476, Front Gate
# Combine the water exits between Hyrule Field and Zora River to lead to the land entrance instead of the water entrance
exit_table[0x00EA] += exit_table[0x01D9] # Hyrule Field -> Zora River
exit_table[0x0181] += exit_table[0x0311] # Zora River -> Hyrule Field
del exit_table[0x01D9]
del exit_table[0x0311]
# Change Impa escorts to bring link at the hyrule castle grounds entrance from market, instead of hyrule field
rom.write_int16(0xACAA2E, 0x0138) # 1st Impa escort
rom.write_int16(0xD12D6E, 0x0138) # 2nd+ Impa escort
if world.shuffle_dungeon_entrances:
rom.write_byte(rom.sym('DUNGEONS_SHUFFLED'), 1)
# Connect lake hylia fill exit to revisit exit (Hylia blue will then be rewired below)
rom.write_int16(0xAC995A, 0x060C)
# Remove deku sprout and drop player at SFM after forest (SFM blue will then be rewired by ER below)
rom.write_int16(0xAC9F96, 0x0608)
#Tell the well water we are always a child.
rom.write_int32(0xDD5BF4, 0x00000000)
#Tell Sheik at Ice Cavern we are always an Adult
rom.write_int32(0xC7B9C0, 0x00000000)
rom.write_int32(0xC7BAEC, 0x00000000)
rom.write_int32(0xc7BCA4, 0x00000000)
#Make the Adult well blocking stone dissappear if the well has been drained by
#checking the well drain event flag instead of links age. This actor doesn't need a
#code check for links age as the stone is absent for child via the scene alternate
#lists. So replace the age logic with drain logic.
rom.write_int32(0xE2887C, rom.read_int32(0xE28870)) #relocate this to nop delay slot
rom.write_int32(0xE2886C, 0x95CEB4B0) # lhu
rom.write_int32(0xE28870, 0x31CE0080) # andi
remove_entrance_blockers(rom)
#Purge temp flags on entrance to spirit from colossus through the front
#door.
rom.write_byte(0x021862E3, 0xC2)
# Set entrances to update, except grotto entrances which are handled on their own at a later point
set_entrance_updates(filter(lambda entrance: entrance.type != 'Grotto', world.get_shuffled_entrances()))
for k, v in [(k,v) for k, v in exit_updates if k in exit_table]:
for addr in exit_table[k]:
rom.write_int16(addr, v)
# Fix text for Pocket Cucco.
rom.write_byte(0xBEEF45, 0x0B)
# Fix stupid alcove cameras in Ice Cavern -- thanks to krim and mzx for the help
rom.write_byte(0x2BECA25,0x01);
rom.write_byte(0x2BECA2D,0x01);
configure_dungeon_info(rom, world)
hash_icons = 0
for i,icon in enumerate(spoiler.file_hash):
hash_icons |= (icon << (5 * i))
rom.write_int32(rom.sym('cfg_file_select_hash'), hash_icons)
save_context = SaveContext()
# Initial Save Data
if not world.useful_cutscenes:
save_context.write_bits(0x00D4 + 0x03 * 0x1C + 0x04 + 0x0, 0x08) # Forest Temple switch flag (Poe Sisters cutscene)
save_context.write_bits(0x00D4 + 0x05 * 0x1C + 0x04 + 0x1, 0x01) # Water temple switch flag (Ruto)
save_context.write_bits(0x00D4 + 0x51 * 0x1C + 0x04 + 0x2, 0x08) # Hyrule Field switch flag (Owl)
save_context.write_bits(0x00D4 + 0x55 * 0x1C + 0x04 + 0x0, 0x80) # Kokiri Forest switch flag (Owl)
save_context.write_bits(0x00D4 + 0x56 * 0x1C + 0x04 + 0x2, 0x40) # Sacred Forest Meadow switch flag (Owl)
save_context.write_bits(0x00D4 + 0x5B * 0x1C + 0x04 + 0x2, 0x01) # Lost Woods switch flag (Owl)
save_context.write_bits(0x00D4 + 0x5B * 0x1C + 0x04 + 0x3, 0x80) # Lost Woods switch flag (Owl)
save_context.write_bits(0x00D4 + 0x5C * 0x1C + 0x04 + 0x0, 0x80) # Desert Colossus switch flag (Owl)
save_context.write_bits(0x00D4 + 0x5F * 0x1C + 0x04 + 0x3, 0x20) # Hyrule Castle switch flag (Owl)
save_context.write_bits(0x0ED4, 0x10) # "Met Deku Tree"
save_context.write_bits(0x0ED5, 0x20) # "Deku Tree Opened Mouth"
save_context.write_bits(0x0ED6, 0x08) # "Rented Horse From Ingo"
save_context.write_bits(0x0ED6, 0x10) # "Spoke to Mido After Deku Tree's Death"
save_context.write_bits(0x0EDA, 0x08) # "Began Nabooru Battle"
save_context.write_bits(0x0EDC, 0x80) # "Entered the Master Sword Chamber"
save_context.write_bits(0x0EDD, 0x20) # "Pulled Master Sword from Pedestal"
save_context.write_bits(0x0EE0, 0x80) # "Spoke to Kaepora Gaebora by Lost Woods"
save_context.write_bits(0x0EE7, 0x20) # "Nabooru Captured by Twinrova"
save_context.write_bits(0x0EE7, 0x10) # "Spoke to Nabooru in Spirit Temple"
save_context.write_bits(0x0EED, 0x20) # "Sheik, Spawned at Master Sword Pedestal as Adult"
save_context.write_bits(0x0EED, 0x01) # "Nabooru Ordered to Fight by Twinrova"
save_context.write_bits(0x0EED, 0x80) # "Watched Ganon's Tower Collapse / Caught by Gerudo"
save_context.write_bits(0x0EF9, 0x01) # "Greeted by Saria"
save_context.write_bits(0x0F0A, 0x04) # "Spoke to Ingo Once as Adult"
save_context.write_bits(0x0F0F, 0x40) # "Met Poe Collector in Ruined Market"
if not world.useful_cutscenes:
save_context.write_bits(0x0F1A, 0x04) # "Met Darunia in Fire Temple"
save_context.write_bits(0x0ED7, 0x01) # "Spoke to Child Malon at Castle or Market"
save_context.write_bits(0x0ED7, 0x20) # "Spoke to Child Malon at Ranch"
save_context.write_bits(0x0ED7, 0x40) # "Invited to Sing With Child Malon"
save_context.write_bits(0x0F09, 0x10) # "Met Child Malon at Castle or Market"
save_context.write_bits(0x0F09, 0x20) # "Child Malon Said Epona Was Scared of You"
save_context.write_bits(0x0F21, 0x04) # "Ruto in JJ (M3) Talk First Time"
save_context.write_bits(0x0F21, 0x02) # "Ruto in JJ (M2) Meet Ruto"
save_context.write_bits(0x0EE2, 0x01) # "Began Ganondorf Battle"
save_context.write_bits(0x0EE3, 0x80) # "Began Bongo Bongo Battle"
save_context.write_bits(0x0EE3, 0x40) # "Began Barinade Battle"
if not world.useful_cutscenes:
save_context.write_bits(0x0EE3, 0x20) # "Began Twinrova Battle"
save_context.write_bits(0x0EE3, 0x10) # "Began Morpha Battle"
save_context.write_bits(0x0EE3, 0x08) # "Began Volvagia Battle"
save_context.write_bits(0x0EE3, 0x04) # "Began Phantom Ganon Battle"
save_context.write_bits(0x0EE3, 0x02) # "Began King Dodongo Battle"
save_context.write_bits(0x0EE3, 0x01) # "Began Gohma Battle"
save_context.write_bits(0x0EE8, 0x01) # "Entered Deku Tree"
save_context.write_bits(0x0EE9, 0x80) # "Entered Temple of Time"
save_context.write_bits(0x0EE9, 0x40) # "Entered Goron City"
save_context.write_bits(0x0EE9, 0x20) # "Entered Hyrule Castle"
save_context.write_bits(0x0EE9, 0x10) # "Entered Zora's Domain"
save_context.write_bits(0x0EE9, 0x08) # "Entered Kakariko Village"
save_context.write_bits(0x0EE9, 0x02) # "Entered Death Mountain Trail"
save_context.write_bits(0x0EE9, 0x01) # "Entered Hyrule Field"
save_context.write_bits(0x0EEA, 0x04) # "Entered Ganon's Castle (Exterior)"
save_context.write_bits(0x0EEA, 0x02) # "Entered Death Mountain Crater"
save_context.write_bits(0x0EEA, 0x01) # "Entered Desert Colossus"
save_context.write_bits(0x0EEB, 0x80) # "Entered Zora's Fountain"
save_context.write_bits(0x0EEB, 0x40) # "Entered Graveyard"
save_context.write_bits(0x0EEB, 0x20) # "Entered Jabu-Jabu's Belly"
save_context.write_bits(0x0EEB, 0x10) # "Entered Lon Lon Ranch"
save_context.write_bits(0x0EEB, 0x08) # "Entered Gerudo's Fortress"
save_context.write_bits(0x0EEB, 0x04) # "Entered Gerudo Valley"
save_context.write_bits(0x0EEB, 0x02) # "Entered Lake Hylia"
save_context.write_bits(0x0EEB, 0x01) # "Entered Dodongo's Cavern"
save_context.write_bits(0x0F08, 0x08) # "Entered Hyrule Castle"
# Set the number of chickens to collect
rom.write_byte(0x00E1E523, world.chicken_count)
# Change Anju to always say how many chickens are needed
# Does not affect text for collecting item or afterwards
rom.write_int16(0x00E1F3C2, 0x5036)
rom.write_int16(0x00E1F3C4, 0x5036)
rom.write_int16(0x00E1F3C6, 0x5036)
rom.write_int16(0x00E1F3C8, 0x5036)
rom.write_int16(0x00E1F3CA, 0x5036)
rom.write_int16(0x00E1F3CC, 0x5036)
if world.no_first_dampe_race:
save_context.write_bits(0x00D4 + 0x48 * 0x1C + 0x08 + 0x3, 0x10) # Beat First Dampe Race (& Chest Spawned)
# Make the Kakariko Gate not open with the MS
if not world.open_kakariko:
rom.write_int32(0xDD3538, 0x34190000) # li t9, 0
if world.zora_fountain == 'open':
save_context.write_bits(0x0EDB, 0x08) # "Moved King Zora"
elif world.zora_fountain == 'adult':
rom.write_byte(rom.sym('MOVED_ADULT_KING_ZORA'), 1)
# Make all chest opening animations fast
rom.write_byte(rom.sym('FAST_CHESTS'), int(world.fast_chests))
# Set up Rainbow Bridge conditions
symbol = rom.sym('RAINBOW_BRIDGE_CONDITION')
if world.bridge == 'open':
rom.write_int32(symbol, 0)
save_context.write_bits(0xEDC, 0x20) # "Rainbow Bridge Built by Sages"
elif world.bridge == 'medallions':
rom.write_int32(symbol, 1)
elif world.bridge == 'dungeons':
rom.write_int32(symbol, 2)
elif world.bridge == 'stones':
rom.write_int32(symbol, 3)
elif world.bridge == 'vanilla':
rom.write_int32(symbol, 4)
elif world.bridge == 'tokens':
rom.write_int32(symbol, 5)
rom.write_int16(rom.sym('RAINBOW_BRIDGE_TOKENS'), world.bridge_tokens)
if world.triforce_hunt:
rom.write_int16(rom.sym('triforce_pieces_requied'), world.triforce_goal)
rom.write_int16(rom.sym('triforce_hunt_enabled'), 1)
# Set up LACS conditions.
symbol = rom.sym('LACS_CONDITION')
if world.lacs_condition == 'medallions':
rom.write_int32(symbol, 1)
elif world.lacs_condition == 'dungeons':
rom.write_int32(symbol, 2)
elif world.lacs_condition == 'stones':
rom.write_int32(symbol, 3)
else:
rom.write_int32(symbol, 0)
if world.open_forest == 'open':
save_context.write_bits(0xED5, 0x10) # "Showed Mido Sword & Shield"
if world.open_door_of_time:
save_context.write_bits(0xEDC, 0x08) # "Opened the Door of Time"
# "fast-ganon" stuff
symbol = rom.sym('NO_ESCAPE_SEQUENCE')
if world.no_escape_sequence:
rom.write_bytes(0xD82A12, [0x05, 0x17]) # Sets exit from Ganondorf fight to entrance to Ganon fight
rom.write_bytes(0xB139A2, [0x05, 0x17]) # Sets Ganon deathwarp back to Ganon
rom.write_byte(symbol, 0x01)
else:
rom.write_byte(symbol, 0x00)
if world.skipped_trials['Forest']:
save_context.write_bits(0x0EEA, 0x08) # "Completed Forest Trial"
if world.skipped_trials['Fire']:
save_context.write_bits(0x0EEA, 0x40) # "Completed Fire Trial"
if world.skipped_trials['Water']:
save_context.write_bits(0x0EEA, 0x10) # "Completed Water Trial"
if world.skipped_trials['Spirit']:
save_context.write_bits(0x0EE8, 0x20) # "Completed Spirit Trial"
if world.skipped_trials['Shadow']:
save_context.write_bits(0x0EEA, 0x20) # "Completed Shadow Trial"
if world.skipped_trials['Light']:
save_context.write_bits(0x0EEA, 0x80) # "Completed Light Trial"
if world.trials == 0:
save_context.write_bits(0x0EED, 0x08) # "Dispelled Ganon's Tower Barrier"
# open gerudo fortress
if world.gerudo_fortress == 'open':
if not world.shuffle_gerudo_card:
save_context.write_bits(0x00A5, 0x40) # Give Gerudo Card
save_context.write_bits(0x0EE7, 0x0F) # Free all 4 carpenters
save_context.write_bits(0x00D4 + 0x0C * 0x1C + 0x04 + 0x1, 0x0F) # Thieves' Hideout switch flags (started all fights)
save_context.write_bits(0x00D4 + 0x0C * 0x1C + 0x04 + 0x2, 0x01) # Thieves' Hideout switch flags (heard yells/unlocked doors)
save_context.write_bits(0x00D4 + 0x0C * 0x1C + 0x04 + 0x3, 0xFE) # Thieves' Hideout switch flags (heard yells/unlocked doors)
save_context.write_bits(0x00D4 + 0x0C * 0x1C + 0x0C + 0x2, 0xD4) # Thieves' Hideout collection flags (picked up keys, marks fights finished as well)
elif world.gerudo_fortress == 'fast':
save_context.write_bits(0x0EE7, 0x0E) # Free 3 carpenters
save_context.write_bits(0x00D4 + 0x0C * 0x1C + 0x04 + 0x1, 0x0D) # Thieves' Hideout switch flags (started all fights)
save_context.write_bits(0x00D4 + 0x0C * 0x1C + 0x04 + 0x2, 0x01) # Thieves' Hideout switch flags (heard yells/unlocked doors)
save_context.write_bits(0x00D4 + 0x0C * 0x1C + 0x04 + 0x3, 0xDC) # Thieves' Hideout switch flags (heard yells/unlocked doors)
save_context.write_bits(0x00D4 + 0x0C * 0x1C + 0x0C + 0x2, 0xC4) # Thieves' Hideout collection flags (picked up keys, marks fights finished as well)
# Add a gate-opening guard on the Wasteland side of the Gerudo gate when the card is shuffled or certain levels of ER.
# Overrides the generic guard at the bottom of the ladder in Gerudo Fortress
if world.shuffle_gerudo_card or world.shuffle_overworld_entrances or world.shuffle_special_interior_entrances:
# Add a gate opening guard on the Wasteland side of the Gerudo Fortress' gate
new_gate_opening_guard = [0x0138, 0xFAC8, 0x005D, 0xF448, 0x0000, 0x95B0, 0x0000, 0x0301]
rom.write_int16s(0x21BD3EC, new_gate_opening_guard) # Adult Day
rom.write_int16s(0x21BD62C, new_gate_opening_guard) # Adult Night
# start with maps/compasses
if world.shuffle_mapcompass == 'startwith':
for dungeon in ['deku', 'dodongo', 'jabu', 'forest', 'fire', 'water', 'spirit', 'shadow', 'botw', 'ice']:
save_context.addresses['dungeon_items'][dungeon]['compass'].value = True
save_context.addresses['dungeon_items'][dungeon]['map'].value = True
if world.shuffle_smallkeys == 'vanilla':
if world.dungeon_mq['Spirit Temple']:
save_context.addresses['keys']['spirit'].value = 3
if world.start_with_rupees:
rom.write_byte(rom.sym('MAX_RUPEES'), 0x01)
# Set starting time of day
if world.starting_tod != 'default':
tod = {
'sunrise': 0x4555,
'morning': 0x6000,
'noon': 0x8001,
'afternoon': 0xA000,
'sunset': 0xC001,
'evening': 0xE000,
'midnight': 0x0000,
'witching-hour': 0x2000,
}
save_context.addresses['time_of_day'].value = tod[world.starting_tod]
if world.starting_age == 'adult':
save_context.addresses['link_age'].value = False # Set link's age to adult
save_context.addresses['scene_index'].value = 0x43 # Set the scene index to Temple of Time
save_context.addresses['equip_items']['master_sword'].value = True # Equip Master Sword by default
save_context.addresses['equip_items']['kokiri_tunic'].value = True # Equip Kokiri Tunic & Kokiri Boots by default
save_context.addresses['equip_items']['kokiri_boots'].value = True # (to avoid issues when going back child for the first time)
save_context.write_byte(0x0F33, 0x00) # Unset Swordless Flag (to avoid issues with sword getting unequipped)
# Revert change that Skips the Epona Race
if not world.no_epona_race:
rom.write_int32(0xA9E838, 0x03E00008)
else:
save_context.write_bits(0xF0E, 0x01) # Set talked to Malon flag
# skip castle guard stealth sequence
if world.no_guard_stealth:
# change the exit at child/day crawlspace to the end of zelda's goddess cutscene
rom.write_bytes(0x21F60DE, [0x05, 0xF0])
# patch mq scenes
mq_scenes = []
if world.dungeon_mq['Deku Tree']:
mq_scenes.append(0)
if world.dungeon_mq['Dodongos Cavern']:
mq_scenes.append(1)
if world.dungeon_mq['Jabu Jabus Belly']:
mq_scenes.append(2)
if world.dungeon_mq['Forest Temple']:
mq_scenes.append(3)
if world.dungeon_mq['Fire Temple']:
mq_scenes.append(4)
if world.dungeon_mq['Water Temple']:
mq_scenes.append(5)
if world.dungeon_mq['Spirit Temple']:
mq_scenes.append(6)
if world.dungeon_mq['Shadow Temple']:
mq_scenes.append(7)
if world.dungeon_mq['Bottom of the Well']:
mq_scenes.append(8)
if world.dungeon_mq['Ice Cavern']:
mq_scenes.append(9)
# Scene 10 has no layout changes, so it doesn't need to be patched
if world.dungeon_mq['Gerudo Training Grounds']:
mq_scenes.append(11)
if world.dungeon_mq['Ganons Castle']:
mq_scenes.append(13)
patch_files(rom, mq_scenes)
### Load Shop File
# Move shop actor file to free space
shop_item_file = File({
'Name':'En_GirlA',
'Start':'00C004E0',
'End':'00C02E00',
})
shop_item_file.relocate(rom)
# Increase the shop item table size
shop_item_vram_start = rom.read_int32(0x00B5E490 + (0x20 * 4) + 0x08)
insert_space(rom, shop_item_file, shop_item_vram_start, 1, 0x3C + (0x20 * 50), 0x20 * 50)
# Add relocation entries for shop item table
new_relocations = []
for i in range(50, 100):
new_relocations.append(shop_item_file.start + 0x1DEC + (i * 0x20) + 0x04)
new_relocations.append(shop_item_file.start + 0x1DEC + (i * 0x20) + 0x14)
new_relocations.append(shop_item_file.start + 0x1DEC + (i * 0x20) + 0x1C)
add_relocations(rom, shop_item_file, new_relocations)
# update actor table
rom.write_int32s(0x00B5E490 + (0x20 * 4),
[shop_item_file.start,
shop_item_file.end,
shop_item_vram_start,
shop_item_vram_start + (shop_item_file.end - shop_item_file.start)])
# Update DMA Table
update_dmadata(rom, shop_item_file)
# Create 2nd Bazaar Room
bazaar_room_file = File({
'Name':'shop1_room_1',
'Start':'028E4000',
'End':'0290D7B0',
})
bazaar_room_file.copy(rom)
# Add new Bazaar Room to Bazaar Scene
rom.write_int32s(0x28E3030, [0x00010000, 0x02000058]) #reduce position list size
rom.write_int32s(0x28E3008, [0x04020000, 0x02000070]) #expand room list size
rom.write_int32s(0x28E3070, [0x028E4000, 0x0290D7B0,
bazaar_room_file.start, bazaar_room_file.end]) #room list
rom.write_int16s(0x28E3080, [0x0000, 0x0001]) # entrance list
rom.write_int16(0x28E4076, 0x0005) # Change shop to Kakariko Bazaar
#rom.write_int16(0x3489076, 0x0005) # Change shop to Kakariko Bazaar
# Load Message and Shop Data
messages = read_messages(rom)
remove_unused_messages(messages)
shop_items = read_shop_items(rom, shop_item_file.start + 0x1DEC)
# Set Big Poe count to get reward from buyer
poe_points = world.big_poe_count * 100
rom.write_int16(0xEE69CE, poe_points)
# update dialogue
new_message = "\x08Hey, young man. What's happening \x01today? If you have a \x05\x41Poe\x05\x40, I will \x01buy it.\x04\x1AIf you earn \x05\x41%d points\x05\x40, you'll\x01be a happy man! Heh heh.\x04\x08Your card now has \x05\x45\x1E\x01 \x05\x40points.\x01Come back again!\x01Heh heh heh!\x02" % poe_points
update_message_by_id(messages, 0x70F5, new_message)
if world.big_poe_count != 10:
new_message = "\x1AOh, you brought a Poe today!\x04\x1AHmmmm!\x04\x1AVery interesting!\x01This is a \x05\x41Big Poe\x05\x40!\x04\x1AI'll buy it for \x05\x4150 Rupees\x05\x40.\x04On top of that, I'll put \x05\x41100\x01points \x05\x40on your card.\x04\x1AIf you earn \x05\x41%d points\x05\x40, you'll\x01be a happy man! Heh heh." % poe_points
update_message_by_id(messages, 0x70f7, new_message)
new_message = "\x1AWait a minute! WOW!\x04\x1AYou have earned \x05\x41%d points\x05\x40!\x04\x1AYoung man, you are a genuine\x01\x05\x41Ghost Hunter\x05\x40!\x04\x1AIs that what you expected me to\x01say? Heh heh heh!\x04\x1ABecause of you, I have extra\x01inventory of \x05\x41Big Poes\x05\x40, so this will\x01be the last time I can buy a \x01ghost.\x04\x1AYou're thinking about what I \x01promised would happen when you\x01earned %d points. Heh heh.\x04\x1ADon't worry, I didn't forget.\x01Just take this." % (poe_points, poe_points)
update_message_by_id(messages, 0x70f8, new_message)
# Update Child Anju's dialogue
new_message = "\x08What should I do!?\x01My \x05\x41Cuccos\x05\x40 have all flown away!\x04You, little boy, please!\x01Please gather at least \x05\x41%d Cuccos\x05\x40\x01for me.\x02" % world.chicken_count
update_message_by_id(messages, 0x5036, new_message)
# use faster jabu elevator
if not world.dungeon_mq['Jabu Jabus Belly'] and world.shuffle_scrubs == 'off':
symbol = rom.sym('JABU_ELEVATOR_ENABLE')
rom.write_byte(symbol, 0x01)
# Sets hooks for gossip stone changes
symbol = rom.sym("GOSSIP_HINT_CONDITION");
if world.hints == 'none':
rom.write_int32(symbol, 0)
else:
writeGossipStoneHints(spoiler, world, messages)
if world.hints == 'mask':
rom.write_int32(symbol, 0)
elif world.hints == 'always':
rom.write_int32(symbol, 2)
else:
rom.write_int32(symbol, 1)
# build silly ganon lines
buildGanonText(world, messages)
# Write item overrides
override_table = get_override_table(world)
rom.write_bytes(rom.sym('cfg_item_overrides'), get_override_table_bytes(override_table))
rom.write_byte(rom.sym('PLAYER_ID'), world.id + 1) # Write player ID
# Revert Song Get Override Injection
if not songs_as_items:
# general get song
rom.write_int32(0xAE5DF8, 0x240200FF)
rom.write_int32(0xAE5E04, 0xAD0F00A4)
# requiem of spirit
rom.write_int32s(0xAC9ABC, [0x3C010001, 0x00300821])
# sun song
rom.write_int32(0xE09F68, 0x8C6F00A4)
rom.write_int32(0xE09F74, 0x01CFC024)
rom.write_int32(0xE09FB0, 0x240F0001)
# epona's song
rom.write_int32s(0xD7E140, [0x8DCE8C24, 0x8C6F00A4])
rom.write_int32( 0xD7E77C, 0x8C4900A4)
rom.write_int32( 0xD7E784, 0x8D088C24)
rom.write_int32s(0xD7E8D4, [0x8DCE8C24, 0x8C4F00A4])
rom.write_int32( 0xD7EBBC, 0x14410008)
rom.write_int32( 0xD7EC1C, 0x17010010)
# song of time
rom.write_int32(0xDB532C, 0x24050003)
# Set damage multiplier
if world.damage_multiplier == 'half':
rom.write_byte(rom.sym('CFG_DAMAGE_MULTIPLYER'), 0xFF)
if world.damage_multiplier == 'normal':
rom.write_byte(rom.sym('CFG_DAMAGE_MULTIPLYER'), 0)
if world.damage_multiplier == 'double':
rom.write_byte(rom.sym('CFG_DAMAGE_MULTIPLYER'), 1)
if world.damage_multiplier == 'quadruple':
rom.write_byte(rom.sym('CFG_DAMAGE_MULTIPLYER'), 2)
if world.damage_multiplier == 'ohko':
rom.write_byte(rom.sym('CFG_DAMAGE_MULTIPLYER'), 3)
# Patch songs and boss rewards
for location in world.get_filled_locations():
item = location.item
special = item.special
locationaddress = location.address
secondaryaddress = location.address2
if location.type == 'Song' and not songs_as_items:
bit_mask_pointer = 0x8C34 + ((special['item_id'] - 0x65) * 4)
rom.write_byte(locationaddress, special['song_id'])
next_song_id = special['song_id'] + 0x0D
rom.write_byte(secondaryaddress, next_song_id)
if location.name == 'Impa at Castle':
rom.write_byte(0x0D12ECB, special['item_id'])
rom.write_byte(0x2E8E931, special['text_id']) #Fix text box
elif location.name == 'Song from Malon':
rom.write_int16(0xD7E142, bit_mask_pointer)
rom.write_int16(0xD7E8D6, bit_mask_pointer)
rom.write_int16(0xD7E786, bit_mask_pointer)
rom.write_byte(0x29BECB9, special['text_id']) #Fix text box
elif location.name == 'Song from Composer Grave':
rom.write_int16(0xE09F66, bit_mask_pointer)
rom.write_byte(0x332A87D, special['text_id']) #Fix text box
elif location.name == 'Song from Saria':
rom.write_byte(0x0E2A02B, special['item_id'])
rom.write_byte(0x20B1DBD, special['text_id']) #Fix text box
elif location.name == 'Song from Ocarina of Time':
rom.write_byte(0x252FC95, special['text_id']) #Fix text box
elif location.name == 'Song at Windmill':
rom.write_byte(0x0E42ABF, special['item_id'])
rom.write_byte(0x3041091, special['text_id']) #Fix text box
elif location.name == 'Sheik Forest Song':
rom.write_byte(0x0C7BAA3, special['item_id'])
rom.write_byte(0x20B0815, special['text_id']) #Fix text box
elif location.name == 'Sheik at Temple':
rom.write_byte(0x0C805EF, special['item_id'])
rom.write_byte(0x2531335, special['text_id']) #Fix text box
elif location.name == 'Sheik in Crater':
rom.write_byte(0x0C7BC57, special['item_id'])
rom.write_byte(0x224D7FD, special['text_id']) #Fix text box
elif location.name == 'Sheik in Ice Cavern':
rom.write_byte(0x0C7BD77, special['item_id'])
rom.write_byte(0x2BEC895, special['text_id']) #Fix text box
elif location.name == 'Sheik in Kakariko':
rom.write_byte(0x0AC9A5B, special['item_id'])
rom.write_byte(0x2000FED, special['text_id']) #Fix text box
elif location.name == 'Sheik at Colossus':
rom.write_byte(0x218C589, special['text_id']) #Fix text box
elif location.type == 'Boss':
if location.name == 'Links Pocket':
save_context.give_item(item.name)
else:
rom.write_byte(locationaddress, special['item_id'])
rom.write_byte(secondaryaddress, special['addr2_data'])
bit_mask_hi = special['bit_mask'] >> 16
bit_mask_lo = special['bit_mask'] & 0xFFFF
if location.name == 'Bongo Bongo':
rom.write_int16(0xCA3F32, bit_mask_hi)
rom.write_int16(0xCA3F36, bit_mask_lo)
elif location.name == 'Twinrova':
rom.write_int16(0xCA3EA2, bit_mask_hi)
rom.write_int16(0xCA3EA6, bit_mask_lo)
# add a cheaper bombchu pack to the bombchu shop
# describe
update_message_by_id(messages, 0x80FE, '\x08\x05\x41Bombchu (5 pieces) 60 Rupees\x01\x05\x40This looks like a toy mouse, but\x01it\'s actually a self-propelled time\x01bomb!\x09\x0A', 0x03)
# purchase
update_message_by_id(messages, 0x80FF, '\x08Bombchu 5 Pieces 60 Rupees\x01\x01\x1B\x05\x42Buy\x01Don\'t buy\x05\x40\x09', 0x03)
rbl_bombchu = shop_items[0x0018]
rbl_bombchu.price = 60
rbl_bombchu.pieces = 5
rbl_bombchu.get_item_id = 0x006A
rbl_bombchu.description_message = 0x80FE
rbl_bombchu.purchase_message = 0x80FF
# Reduce 10 Pack Bombchus from 100 to 99 Rupees
shop_items[0x0015].price = 99
shop_items[0x0019].price = 99
shop_items[0x001C].price = 99
update_message_by_id(messages, shop_items[0x001C].description_message, "\x08\x05\x41Bombchu (10 pieces) 99 Rupees\x01\x05\x40This looks like a toy mouse, but\x01it's actually a self-propelled time\x01bomb!\x09\x0A")
update_message_by_id(messages, shop_items[0x001C].purchase_message, "\x08Bombchu 10 pieces 99 Rupees\x09\x01\x01\x1B\x05\x42Buy\x01Don't buy\x05\x40")
shuffle_messages.shop_item_messages = []
# kokiri shop
shop_objs = place_shop_items(rom, world, shop_items, messages,
world.get_region('Kokiri Shop').locations, True)
shop_objs |= {0x00FC, 0x00B2, 0x0101, 0x0102, 0x00FD, 0x00C5} # Shop objects
rom.write_byte(0x2587029, len(shop_objs))
rom.write_int32(0x258702C, 0x0300F600)
rom.write_int16s(0x2596600, list(shop_objs))
# kakariko bazaar
shop_objs = place_shop_items(rom, world, shop_items, messages,
world.get_region('Kakariko Bazaar').locations)
shop_objs |= {0x005B, 0x00B2, 0x00C5, 0x0107, 0x00C9, 0x016B} # Shop objects
rom.write_byte(0x28E4029, len(shop_objs))
rom.write_int32(0x28E402C, 0x03007A40)
rom.write_int16s(0x28EBA40, list(shop_objs))
# castle town bazaar
shop_objs = place_shop_items(rom, world, shop_items, messages,
world.get_region('Castle Town Bazaar').locations)
shop_objs |= {0x005B, 0x00B2, 0x00C5, 0x0107, 0x00C9, 0x016B} # Shop objects
rom.write_byte(bazaar_room_file.start + 0x29, len(shop_objs))
rom.write_int32(bazaar_room_file.start + 0x2C, 0x03007A40)
rom.write_int16s(bazaar_room_file.start + 0x7A40, list(shop_objs))
# goron shop
shop_objs = place_shop_items(rom, world, shop_items, messages,
world.get_region('Goron Shop').locations)
shop_objs |= {0x00C9, 0x00B2, 0x0103, 0x00AF} # Shop objects
rom.write_byte(0x2D33029, len(shop_objs))
rom.write_int32(0x2D3302C, 0x03004340)
rom.write_int16s(0x2D37340, list(shop_objs))
# zora shop
shop_objs = place_shop_items(rom, world, shop_items, messages,
world.get_region('Zora Shop').locations)
shop_objs |= {0x005B, 0x00B2, 0x0104, 0x00FE} # Shop objects
rom.write_byte(0x2D5B029, len(shop_objs))
rom.write_int32(0x2D5B02C, 0x03004B40)
rom.write_int16s(0x2D5FB40, list(shop_objs))
# kakariko potion shop
shop_objs = place_shop_items(rom, world, shop_items, messages,
world.get_region('Kakariko Potion Shop Front').locations)
shop_objs |= {0x0159, 0x00B2, 0x0175, 0x0122} # Shop objects
rom.write_byte(0x2D83029, len(shop_objs))
rom.write_int32(0x2D8302C, 0x0300A500)
rom.write_int16s(0x2D8D500, list(shop_objs))
# market potion shop
shop_objs = place_shop_items(rom, world, shop_items, messages,
world.get_region('Castle Town Potion Shop').locations)
shop_objs |= {0x0159, 0x00B2, 0x0175, 0x00C5, 0x010C, 0x016B} # Shop objects
rom.write_byte(0x2DB0029, len(shop_objs))
rom.write_int32(0x2DB002C, 0x03004E40)
rom.write_int16s(0x2DB4E40, list(shop_objs))
# bombchu shop
shop_objs = place_shop_items(rom, world, shop_items, messages,
world.get_region('Castle Town Bombchu Shop').locations)
shop_objs |= {0x0165, 0x00B2} # Shop objects
rom.write_byte(0x2DD8029, len(shop_objs))
rom.write_int32(0x2DD802C, 0x03006A40)
rom.write_int16s(0x2DDEA40, list(shop_objs))
# Scrub text stuff.
def update_scrub_text(message, text_replacement, default_price, price, item_name=None):
scrub_strip_text = ["some ", "1 piece ", "5 pieces ", "30 pieces "]
for text in scrub_strip_text:
message = message.replace(text.encode(), b'')
message = message.replace(text_replacement[0].encode(), text_replacement[1].encode())
message = message.replace(b'they are', b'it is')
if default_price != price:
message = message.replace(('%d Rupees' % default_price).encode(), ('%d Rupees' % price).encode())
if item_name is not None:
message = message.replace(b'mysterious item', item_name.encode())
return message
single_item_scrubs = {
0x3E: world.get_location("HF Grotto Deku Scrub Piece of Heart"),
0x77: world.get_location("LW Deku Scrub Deku Stick Upgrade"),
0x79: world.get_location("LW Grotto Deku Scrub Deku Nut Upgrade"),
}
scrub_message_dict = {}
if world.shuffle_scrubs == 'off':
# Revert Deku Scrubs changes
rom.write_int32s(0xEBB85C, [
0x24010002, # addiu at, zero, 2
0x3C038012, # lui v1, 0x8012
0x14410004, # bne v0, at, 0xd8
0x2463A5D0, # addiu v1, v1, -0x5a30
0x94790EF0])# lhu t9, 0xef0(v1)
rom.write_int32(0xDF7CB0,
0xA44F0EF0) # sh t7, 0xef0(v0)
# Replace scrub text for 3 default shuffled scrubs.
for (scrub_item, default_price, text_id, text_replacement) in business_scrubs:
if scrub_item not in single_item_scrubs.keys():
continue
scrub_message_dict[text_id] = update_scrub_text(get_message_by_id(messages, text_id).raw_text, text_replacement, default_price, default_price)
else:
# Rebuild Business Scrub Item Table
rom.seek_address(0xDF8684)
for (scrub_item, default_price, text_id, text_replacement) in business_scrubs:
price = world.scrub_prices[scrub_item]
rom.write_int16(None, price) # Price
rom.write_int16(None, 1) # Count
rom.write_int32(None, scrub_item) # Item
rom.write_int32(None, 0x80A74FF8) # Can_Buy_Func
rom.write_int32(None, 0x80A75354) # Buy_Func
scrub_message_dict[text_id] = update_scrub_text(get_message_by_id(messages, text_id).raw_text, text_replacement, default_price, price)
# update actor IDs
set_deku_salesman_data(rom)
# Update scrub messages.
for text_id, message in scrub_message_dict.items():
update_message_by_id(messages, text_id, message)
if world.shuffle_grotto_entrances:
# Build the Grotto Load Table based on grotto entrance data
for entrance in world.get_shuffled_entrances(type='Grotto'):
if entrance.primary:
load_table_pointer = rom.sym('GROTTO_LOAD_TABLE') + 4 * entrance.data['grotto_id']
rom.write_int16(load_table_pointer, entrance.data['entrance'])
rom.write_byte(load_table_pointer + 2, entrance.data['content'])
else:
return_table_pointer = rom.sym('GROTTO_RETURN_TABLE') + 32 * entrance.data['grotto_id']
rom.write_int16(return_table_pointer, entrance.data['entrance'])
rom.write_byte(return_table_pointer + 2, entrance.data['room'])
rom.write_int16(return_table_pointer + 4, entrance.data['angle'])
rom.write_int32s(return_table_pointer + 8, entrance.data['pos'])
# Update grotto actors based on their new entrance
set_grotto_shuffle_data(rom, world)
if world.shuffle_cows:
rom.write_byte(rom.sym('SHUFFLE_COWS'), 0x01)
# Move some cows because they are too close from each other in vanilla
rom.write_bytes(0x33650CA, [0xFE, 0xD3, 0x00, 0x00, 0x00, 0x6E, 0x00, 0x00, 0x4A, 0x34]) # LLR Tower right cow
rom.write_bytes(0x2C550AE, [0x00, 0x82]) # LLR Stable right cow
set_cow_id_data(rom, world)
if world.shuffle_beans:
rom.write_byte(rom.sym('SHUFFLE_BEANS'), 0x01)
# Update bean salesman messages to better fit the fact that he sells a randomized item
update_message_by_id(messages, 0x405E, "\x1AChomp chomp chomp...\x01We have... \x05\x41a mysterious item\x05\x40! \x01Do you want it...huh? Huh?\x04\x05\x41\x0860 Rupees\x05\x40 and it's yours!\x01Keyahahah!\x01\x1B\x05\x42Yes\x01No\x05\x40\x02")
update_message_by_id(messages, 0x4069, "You don't have enough money.\x01I can't sell it to you.\x01Chomp chomp...\x02")
update_message_by_id(messages, 0x406C, "We hope you like it!\x01Chomp chomp chomp.\x02")
# Change first magic bean to cost 60 (is used as the price for the one time item when beans are shuffled)
rom.write_byte(0xE209FD, 0x3C)
if world.shuffle_smallkeys == 'remove' or world.shuffle_bosskeys == 'remove' or world.shuffle_ganon_bosskey == 'remove':
locked_doors = get_locked_doors(rom, world)
for _,[door_byte, door_bits] in locked_doors.items():
save_context.write_bits(door_byte, door_bits)
# Fix chest animations
if world.bombchus_in_logic:
bombchu_ids = [0x6A, 0x03, 0x6B]
for i in bombchu_ids:
item = read_rom_item(rom, i)
item['chest_type'] = 0
write_rom_item(rom, i, item)
if world.bridge == 'tokens':
item = read_rom_item(rom, 0x5B)
item['chest_type'] = 0
write_rom_item(rom, 0x5B, item)
# Update chest type sizes
if world.correct_chest_sizes:
symbol = rom.sym('CHEST_SIZE_MATCH_CONTENTS')
rom.write_int32(symbol, 0x00000001)
# Move Ganon's Castle's Zelda's Lullaby Chest back so is reachable if large
if not world.dungeon_mq['Ganons Castle']:
rom.write_int16(0x321B176, 0xFC40) # original 0xFC48
# Move Spirit Temple Compass Chest if it is a small chest so it is reachable with hookshot
if not world.dungeon_mq['Spirit Temple']:
chest_name = 'Spirit Temple Compass Chest'
chest_address = 0x2B6B07C
location = world.get_location(chest_name)
item = read_rom_item(rom, location.item.index)
if item['chest_type'] in (1, 3):
rom.write_int16(chest_address + 2, 0x0190) # X pos
rom.write_int16(chest_address + 6, 0xFABC) # Z pos
# Move Silver Gauntlets chest if it is small so it is reachable from Spirit Hover Seam
if world.logic_rules != 'glitchless':
chest_name = 'Silver Gauntlets Chest'
chest_address_0 = 0x21A02D0 # Address in setup 0
chest_address_2 = 0x21A06E4 # Address in setup 2
location = world.get_location(chest_name)
item = read_rom_item(rom, location.item.index)
if item['chest_type'] in (1, 3):
rom.write_int16(chest_address_0 + 6, 0x0172) # Z pos
rom.write_int16(chest_address_2 + 6, 0x0172) # Z pos
# give dungeon items the correct messages
add_item_messages(messages, shop_items, world)
if world.enhance_map_compass:
reward_list = {'Kokiri Emerald': "\x05\x42Kokiri Emerald\x05\x40",
'Goron Ruby': "\x05\x41Goron Ruby\x05\x40",
'Zora Sapphire': "\x05\x43Zora Sapphire\x05\x40",
'Forest Medallion': "\x05\x42Forest Medallion\x05\x40",
'Fire Medallion': "\x05\x41Fire Medallion\x05\x40",
'Water Medallion': "\x05\x43Water Medallion\x05\x40",
'Spirit Medallion': "\x05\x46Spirit Medallion\x05\x40",
'Shadow Medallion': "\x05\x45Shadow Medallion\x05\x40",
'Light Medallion': "\x05\x44Light Medallion\x05\x40"
}
dungeon_list = {'Deku Tree': ("the \x05\x42Deku Tree", 'Queen Gohma', 0x62, 0x88),
'Dodongos Cavern': ("\x05\x41Dodongo\'s Cavern", 'King Dodongo', 0x63, 0x89),
'Jabu Jabus Belly': ("\x05\x43Jabu Jabu\'s Belly", 'Barinade', 0x64, 0x8a),
'Forest Temple': ("the \x05\x42Forest Temple", 'Phantom Ganon', 0x65, 0x8b),
'Fire Temple': ("the \x05\x41Fire Temple", 'Volvagia', 0x7c, 0x8c),
'Water Temple': ("the \x05\x43Water Temple", 'Morpha', 0x7d, 0x8e),
'Spirit Temple': ("the \x05\x46Spirit Temple", 'Twinrova', 0x7e, 0x8f),
'Ice Cavern': ("the \x05\x44Ice Cavern", None, 0x87, 0x92),
'Bottom of the Well': ("the \x05\x45Bottom of the Well", None, 0xa2, 0xa5),
'Shadow Temple': ("the \x05\x45Shadow Temple", 'Bongo Bongo', 0x7f, 0xa3),
}
for dungeon in world.dungeon_mq:
if dungeon in ['Gerudo Training Grounds', 'Ganons Castle']:
pass
elif dungeon in ['Bottom of the Well', 'Ice Cavern']:
dungeon_name, boss_name, compass_id, map_id = dungeon_list[dungeon]
if world.world_count > 1:
map_message = "\x13\x76\x08\x05\x42\x0F\x05\x40 found the \x05\x41Dungeon Map\x05\x40\x01for %s\x05\x40!\x09" % (dungeon_name)
else:
map_message = "\x13\x76\x08You found the \x05\x41Dungeon Map\x05\x40\x01for %s\x05\x40!\x01It\'s %s!\x09" % (dungeon_name, "masterful" if world.dungeon_mq[dungeon] else "ordinary")
if world.mq_dungeons_random or world.mq_dungeons != 0 and world.mq_dungeons != 12:
update_message_by_id(messages, map_id, map_message)
else:
dungeon_name, boss_name, compass_id, map_id = dungeon_list[dungeon]
dungeon_reward = reward_list[world.get_location(boss_name).item.name]
if world.world_count > 1:
compass_message = "\x13\x75\x08\x05\x42\x0F\x05\x40 found the \x05\x41Compass\x05\x40\x01for %s\x05\x40!\x09" % (dungeon_name)
else:
compass_message = "\x13\x75\x08You found the \x05\x41Compass\x05\x40\x01for %s\x05\x40!\x01It holds the %s!\x09" % (dungeon_name, dungeon_reward)
update_message_by_id(messages, compass_id, compass_message)
if world.mq_dungeons_random or world.mq_dungeons != 0 and world.mq_dungeons != 12:
if world.world_count > 1:
map_message = "\x13\x76\x08\x05\x42\x0F\x05\x40 found the \x05\x41Dungeon Map\x05\x40\x01for %s\x05\x40!\x09" % (dungeon_name)
else:
map_message = "\x13\x76\x08You found the \x05\x41Dungeon Map\x05\x40\x01for %s\x05\x40!\x01It\'s %s!\x09" % (dungeon_name, "masterful" if world.dungeon_mq[dungeon] else "ordinary")
update_message_by_id(messages, map_id, map_message)
else:
# Set hints for boss reward shuffle
rom.write_bytes(0xE2ADB2, [0x70, 0x7A])
rom.write_bytes(0xE2ADB6, [0x70, 0x57])
buildBossRewardHints(world, messages)
if world.tokensanity == 'off':
# Change the GS token pickup message to fade out after 2 seconds (40 frames)
update_message_by_id(messages, 0x00B4, bytearray(get_message_by_id(messages, 0x00B4).raw_text)[:-1] + b'\x0E\x28')
# Prevent the GS token actor from freezing the player and waiting for the textbox to be closed
rom.write_int32s(0xEC68C0, [0x00000000, 0x00000000])
rom.write_int32s(0xEC69B0, [0x00000000, 0x00000000])
rom.write_int32(0xEC6A10, 0x34020002) # li v0, 2
# update happy mask shop to use new SOLD OUT text id
rom.write_int16(shop_item_file.start + 0x1726, shop_items[0x26].description_message)
# Add 3rd Wallet Upgrade
rom.write_int16(0xB6D57E, 0x0003)
rom.write_int16(0xB6EC52, 999)
tycoon_message = "\x08\x13\x57You got a \x05\x43Tycoon's Wallet\x05\x40!\x01Now you can hold\x01up to \x05\x46999\x05\x40 \x05\x46Rupees\x05\x40."
if world.world_count > 1:
tycoon_message = make_player_message(tycoon_message)
update_message_by_id(messages, 0x00F8, tycoon_message, 0x23)
write_shop_items(rom, shop_item_file.start + 0x1DEC, shop_items)
permutation = None
# text shuffle
if world.text_shuffle == 'except_hints':
permutation = shuffle_messages(messages, except_hints=True)
elif world.text_shuffle == 'complete':
permutation = shuffle_messages(messages, except_hints=False)
repack_messages(rom, messages, permutation)
# output a text dump, for testing...
#with open('keysanity_' + str(world.seed) + '_dump.txt', 'w', encoding='utf-16') as f:
# messages = read_messages(rom)
# f.write('item_message_strings = {\n')
# for m in messages:
# f.write("\t0x%04X: \"%s\",\n" % (m.id, m.get_python_string()))
# f.write('}\n')
if world.free_scarecrow:
# Played song as adult
save_context.write_bits(0x0EE6, 0x10)
# Direct scarecrow behavior
symbol = rom.sym('FREE_SCARECROW_ENABLED')
rom.write_byte(symbol, 0x01)
if world.ocarina_songs:
replace_songs(rom)
# actually write the save table to rom
world.distribution.give_items(save_context)
if world.starting_age == 'adult':
# When starting as adult, the pedestal doesn't handle child default equips when going back child the first time, so we have to equip them ourselves
save_context.equip_default_items('child')
save_context.equip_current_items(world.starting_age)
save_context.write_save_table(rom)
return rom
NUM_VANILLA_OBJECTS = 0x192
def add_to_extended_object_table(rom, object_id, object_file):
extended_id = object_id - NUM_VANILLA_OBJECTS - 1
extended_object_table = rom.sym('EXTENDED_OBJECT_TABLE')
rom.write_int32s(extended_object_table + extended_id * 8, [object_file.start, object_file.end])
item_row_struct = struct.Struct('>BBHHBBIIhh') # Match item_row_t in item_table.h
item_row_fields = [
'base_item_id', 'action_id', 'text_id', 'object_id', 'graphic_id', 'chest_type',
'upgrade_fn', 'effect_fn', 'effect_arg1', 'effect_arg2',
]
def read_rom_item(rom, item_id):
addr = rom.sym('item_table') + (item_id * item_row_struct.size)
row_bytes = rom.read_bytes(addr, item_row_struct.size)
row = item_row_struct.unpack(row_bytes)
return { item_row_fields[i]: row[i] for i in range(len(item_row_fields)) }
def write_rom_item(rom, item_id, item):
addr = rom.sym('item_table') + (item_id * item_row_struct.size)
row = [item[f] for f in item_row_fields]
row_bytes = item_row_struct.pack(*row)
rom.write_bytes(addr, row_bytes)
def get_override_table(world):
return list(filter(lambda val: val != None, map(get_override_entry, world.get_filled_locations())))
override_struct = struct.Struct('>xBBBHBB') # match override_t in get_items.c
def get_override_table_bytes(override_table):
return b''.join(sorted(itertools.starmap(override_struct.pack, override_table)))
def get_override_entry(location):
scene = location.scene
default = location.default
item_id = location.item.index
if None in [scene, default, item_id]:
return None
player_id = location.item.world.id + 1
if location.item.looks_like_item is not None:
looks_like_item_id = location.item.looks_like_item.index
else:
looks_like_item_id = 0
if location.type in ['NPC', 'BossHeart']:
type = 0
elif location.type == 'Chest':
type = 1
default &= 0x1F
elif location.type == 'Collectable':
type = 2
elif location.type == 'GS Token':
type = 3
elif location.type == 'Shop' and location.item.type != 'Shop':
type = 0
elif location.type == 'GrottoNPC' and location.item.type != 'Shop':
type = 4
elif location.type in ['Song', 'Cutscene']:
type = 5
else:
return None
return (scene, type, default, item_id, player_id, looks_like_item_id)
chestTypeMap = {
# small big boss
0x0000: [0x5000, 0x0000, 0x2000], #Large
0x1000: [0x7000, 0x1000, 0x1000], #Large, Appears, Clear Flag
0x2000: [0x5000, 0x0000, 0x2000], #Boss Key’s Chest
0x3000: [0x8000, 0x3000, 0x3000], #Large, Falling, Switch Flag
0x4000: [0x6000, 0x4000, 0x4000], #Large, Invisible
0x5000: [0x5000, 0x0000, 0x2000], #Small
0x6000: [0x6000, 0x4000, 0x4000], #Small, Invisible
0x7000: [0x7000, 0x1000, 0x1000], #Small, Appears, Clear Flag
0x8000: [0x8000, 0x3000, 0x3000], #Small, Falling, Switch Flag
0x9000: [0x9000, 0x9000, 0x9000], #Large, Appears, Zelda's Lullaby
0xA000: [0xA000, 0xA000, 0xA000], #Large, Appears, Sun's Song Triggered
0xB000: [0xB000, 0xB000, 0xB000], #Large, Appears, Switch Flag
0xC000: [0x5000, 0x0000, 0x2000], #Large
0xD000: [0x5000, 0x0000, 0x2000], #Large
0xE000: [0x5000, 0x0000, 0x2000], #Large
0xF000: [0x5000, 0x0000, 0x2000], #Large
}
def room_get_actors(rom, actor_func, room_data, scene, alternate=None):
actors = {}
room_start = alternate if alternate else room_data
command = 0
while command != 0x14: # 0x14 = end header
command = rom.read_byte(room_data)
if command == 0x01: # actor list
actor_count = rom.read_byte(room_data + 1)
actor_list = room_start + (rom.read_int32(room_data + 4) & 0x00FFFFFF)
for _ in range(0, actor_count):
actor_id = rom.read_int16(actor_list)
entry = actor_func(rom, actor_id, actor_list, scene)
if entry:
actors[actor_list] = entry
actor_list = actor_list + 16
if command == 0x18: # Alternate header list
header_list = room_start + (rom.read_int32(room_data + 4) & 0x00FFFFFF)
for alt_id in range(0,3):
header_data = room_start + (rom.read_int32(header_list) & 0x00FFFFFF)
if header_data != 0 and not alternate:
actors.update(room_get_actors(rom, actor_func, header_data, scene, room_start))
header_list = header_list + 4
room_data = room_data + 8
return actors
def scene_get_actors(rom, actor_func, scene_data, scene, alternate=None, processed_rooms=None):
if processed_rooms == None:
processed_rooms = []
actors = {}
scene_start = alternate if alternate else scene_data
command = 0
while command != 0x14: # 0x14 = end header
command = rom.read_byte(scene_data)
if command == 0x04: #room list
room_count = rom.read_byte(scene_data + 1)
room_list = scene_start + (rom.read_int32(scene_data + 4) & 0x00FFFFFF)
for _ in range(0, room_count):
room_data = rom.read_int32(room_list);
if not room_data in processed_rooms:
actors.update(room_get_actors(rom, actor_func, room_data, scene))
processed_rooms.append(room_data)
room_list = room_list + 8
if command == 0x0E: #transition actor list
actor_count = rom.read_byte(scene_data + 1)
actor_list = scene_start + (rom.read_int32(scene_data + 4) & 0x00FFFFFF)
for _ in range(0, actor_count):
actor_id = rom.read_int16(actor_list + 4)
entry = actor_func(rom, actor_id, actor_list, scene)
if entry:
actors[actor_list] = entry
actor_list = actor_list + 16
if command == 0x18: # Alternate header list
header_list = scene_start + (rom.read_int32(scene_data + 4) & 0x00FFFFFF)
for alt_id in range(0,3):
header_data = scene_start + (rom.read_int32(header_list) & 0x00FFFFFF)
if header_data != 0 and not alternate:
actors.update(scene_get_actors(rom, actor_func, header_data, scene, scene_start, processed_rooms))
header_list = header_list + 4
scene_data = scene_data + 8
return actors
def get_actor_list(rom, actor_func):
actors = {}
scene_table = 0x00B71440
for scene in range(0x00, 0x65):
scene_data = rom.read_int32(scene_table + (scene * 0x14));
actors.update(scene_get_actors(rom, actor_func, scene_data, scene))
return actors
def get_override_itemid(override_table, scene, type, flags):
for entry in override_table:
if entry[0] == scene and (entry[1] & 0x07) == type and entry[2] == flags:
return entry[4]
return None
def remove_entrance_blockers(rom):
def remove_entrance_blockers_do(rom, actor_id, actor, scene):
if actor_id == 0x014E and scene == 97:
actor_var = rom.read_int16(actor + 14);
if actor_var == 0xFF01:
rom.write_int16(actor + 14, 0x0700)
get_actor_list(rom, remove_entrance_blockers_do)
def set_cow_id_data(rom, world):
def set_cow_id(rom, actor_id, actor, scene):
nonlocal last_scene
nonlocal cow_count
nonlocal last_actor
if actor_id == 0x01C6: #Cow
if scene == last_scene and last_actor != actor:
cow_count += 1
else:
cow_count = 1
last_scene = scene
last_actor = actor
if world.dungeon_mq['Jabu Jabus Belly'] and scene == 2: #If its an MQ jabu cow
rom.write_int16(actor + 0x8, 1 if cow_count == 17 else 0) #Give all wall cows ID 0, and set cow 11's ID to 1
else:
rom.write_int16(actor + 0x8, cow_count)
last_actor = -1
last_scene = -1
cow_count = 1
get_actor_list(rom, set_cow_id)
def set_grotto_shuffle_data(rom, world):
def override_grotto_data(rom, actor_id, actor, scene):
if actor_id == 0x009B: #Grotto
actor_zrot = rom.read_int16(actor + 12)
actor_var = rom.read_int16(actor + 14)
grotto_type = (actor_var >> 8) & 0x0F
grotto_id = (scene << 8) + (actor_var & 0x00FF)
rom.write_int16(actor + 12, grotto_entrances_override[grotto_id])
rom.write_byte(actor + 14, grotto_type + 0x20)
# Build the override table based on shuffled grotto entrances
grotto_entrances_override = {}
for entrance in world.get_shuffled_entrances(type='Grotto'):
if entrance.primary:
grotto_id = (entrance.data['scene'] << 8) + entrance.data['content']
grotto_entrances_override[grotto_id] = entrance.replaces.data['index']
else:
rom.write_int16(rom.sym('GROTTO_EXIT_LIST') + 2 * entrance.data['grotto_id'], entrance.replaces.data['index'])
# Override grotto actors data with the new data
get_actor_list(rom, override_grotto_data)
def set_deku_salesman_data(rom):
def set_deku_salesman(rom, actor_id, actor, scene):
if actor_id == 0x0195: #Salesman
actor_var = rom.read_int16(actor + 14)
if actor_var == 6:
rom.write_int16(actor + 14, 0x0003)
get_actor_list(rom, set_deku_salesman)
def get_locked_doors(rom, world):
def locked_door(rom, actor_id, actor, scene):
actor_var = rom.read_int16(actor + 14)
actor_type = actor_var >> 6
actor_flag = actor_var & 0x003F
flag_id = (1 << actor_flag)
flag_byte = 3 - (actor_flag >> 3)
flag_bits = 1 << (actor_flag & 0x07)
# If locked door, set the door's unlock flag
if world.shuffle_smallkeys == 'remove':
if actor_id == 0x0009 and actor_type == 0x02:
return [0x00D4 + scene * 0x1C + 0x04 + flag_byte, flag_bits]
if actor_id == 0x002E and actor_type == 0x0B:
return [0x00D4 + scene * 0x1C + 0x04 + flag_byte, flag_bits]
# If boss door, set the door's unlock flag
if (world.shuffle_bosskeys == 'remove' and scene != 0x0A) or (world.shuffle_ganon_bosskey == 'remove' and scene == 0x0A):
if actor_id == 0x002E and actor_type == 0x05:
return [0x00D4 + scene * 0x1C + 0x04 + flag_byte, flag_bits]
return get_actor_list(rom, locked_door)
def create_fake_name(name):
vowels = 'aeiou'
list_name = list(name)
vowel_indexes = [i for i,c in enumerate(list_name) if c in vowels]
for i in random.sample(vowel_indexes, min(2, len(vowel_indexes))):
c = list_name[i]
list_name[i] = random.choice([v for v in vowels if v != c])
# keeping the game E...
new_name = ''.join(list_name)
censor = ['cum', 'cunt', 'dike', 'penis', 'puss', 'shit']
new_name_az = re.sub(r'[^a-zA-Z]', '', new_name.lower(), re.UNICODE)
for cuss in censor:
if cuss in new_name_az:
return create_fake_name(name)
return new_name
def place_shop_items(rom, world, shop_items, messages, locations, init_shop_id=False):
if init_shop_id:
place_shop_items.shop_id = 0x32
shop_objs = { 0x0148 } # "Sold Out" object
for location in locations:
if location.item.type == 'Shop':
shop_objs.add(location.item.special['object'])
rom.write_int16(location.address, location.item.index)
else:
if location.item.looks_like_item is not None:
item_display = location.item.looks_like_item
else:
item_display = location.item
# bottles in shops should look like empty bottles
# so that that are different than normal shop refils
if 'shop_object' in item_display.special:
rom_item = read_rom_item(rom, item_display.special['shop_object'])
else:
rom_item = read_rom_item(rom, item_display.index)
shop_objs.add(rom_item['object_id'])
shop_id = place_shop_items.shop_id
rom.write_int16(location.address, shop_id)
shop_item = shop_items[shop_id]
shop_item.object = rom_item['object_id']
shop_item.model = rom_item['graphic_id'] - 1
shop_item.price = location.price
shop_item.pieces = 1
shop_item.get_item_id = location.default
shop_item.func1 = 0x808648CC
shop_item.func2 = 0x808636B8
shop_item.func3 = 0x00000000
shop_item.func4 = 0x80863FB4
message_id = (shop_id - 0x32) * 2
shop_item.description_message = 0x8100 + message_id
shop_item.purchase_message = 0x8100 + message_id + 1
shuffle_messages.shop_item_messages.extend(
[shop_item.description_message, shop_item.purchase_message])
if item_display.dungeonitem:
split_item_name = item_display.name.split('(')
split_item_name[1] = '(' + split_item_name[1]
if location.item.name == 'Ice Trap':
split_item_name[0] = create_fake_name(split_item_name[0])
if world.world_count > 1:
description_text = '\x08\x05\x41%s %d Rupees\x01%s\x01\x05\x42Player %d\x05\x40\x01Special deal! ONE LEFT!\x09\x0A\x02' % (split_item_name[0], location.price, split_item_name[1], location.item.world.id + 1)
else:
description_text = '\x08\x05\x41%s %d Rupees\x01%s\x01\x05\x40Special deal! ONE LEFT!\x01Get it while it lasts!\x09\x0A\x02' % (split_item_name[0], location.price, split_item_name[1])
purchase_text = '\x08%s %d Rupees\x09\x01%s\x01\x1B\x05\x42Buy\x01Don\'t buy\x05\x40\x02' % (split_item_name[0], location.price, split_item_name[1])
else:
shop_item_name = getSimpleHintNoPrefix(item_display)
if location.item.name == 'Ice Trap':
shop_item_name = create_fake_name(shop_item_name)
if world.world_count > 1:
description_text = '\x08\x05\x41%s %d Rupees\x01\x05\x42Player %d\x05\x40\x01Special deal! ONE LEFT!\x09\x0A\x02' % (shop_item_name, location.price, location.item.world.id + 1)
else:
description_text = '\x08\x05\x41%s %d Rupees\x01\x05\x40Special deal! ONE LEFT!\x01Get it while it lasts!\x09\x0A\x02' % (shop_item_name, location.price)
purchase_text = '\x08%s %d Rupees\x09\x01\x01\x1B\x05\x42Buy\x01Don\'t buy\x05\x40\x02' % (shop_item_name, location.price)
update_message_by_id(messages, shop_item.description_message, description_text, 0x03)
update_message_by_id(messages, shop_item.purchase_message, purchase_text, 0x03)
place_shop_items.shop_id += 1
return shop_objs
def boss_reward_index(world, boss_name):
code = world.get_location(boss_name).item.special['item_id']
if code >= 0x6C:
return code - 0x6C
else:
return 3 + code - 0x66
def configure_dungeon_info(rom, world):
mq_enable = (world.mq_dungeons_random or world.mq_dungeons != 0 and world.mq_dungeons != 12)
mapcompass_keysanity = world.settings.enhance_map_compass
bosses = ['Queen Gohma', 'King Dodongo', 'Barinade', 'Phantom Ganon',
'Volvagia', 'Morpha', 'Twinrova', 'Bongo Bongo']
dungeon_rewards = [boss_reward_index(world, boss) for boss in bosses]
codes = ['Deku Tree', 'Dodongos Cavern', 'Jabu Jabus Belly', 'Forest Temple',
'Fire Temple', 'Water Temple', 'Spirit Temple', 'Shadow Temple',
'Bottom of the Well', 'Ice Cavern', 'Tower (N/A)',
'Gerudo Training Grounds', 'Hideout (N/A)', 'Ganons Castle']
dungeon_is_mq = [1 if world.dungeon_mq.get(c) else 0 for c in codes]
rom.write_int32(rom.sym('cfg_dungeon_info_enable'), 1)
rom.write_int32(rom.sym('cfg_dungeon_info_mq_enable'), int(mq_enable))
rom.write_int32(rom.sym('cfg_dungeon_info_mq_need_map'), int(mapcompass_keysanity))
rom.write_int32(rom.sym('cfg_dungeon_info_reward_need_compass'), int(mapcompass_keysanity))
rom.write_int32(rom.sym('cfg_dungeon_info_reward_need_altar'), int(not mapcompass_keysanity))
rom.write_bytes(rom.sym('cfg_dungeon_rewards'), dungeon_rewards)
rom.write_bytes(rom.sym('cfg_dungeon_is_mq'), dungeon_is_mq)
| 48.465418
| 544
| 0.666756
|
d2606e108015be4ebeb7f8c83258bec2e38e6e02
| 901
|
py
|
Python
|
Lib/site-packages/ipykernel/tests/__init__.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/ipykernel/tests/__init__.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/ipykernel/tests/__init__.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import shutil
import sys
import tempfile
from unittest.mock import patch
from ipykernel.kernelspec import install
pjoin = os.path.join
tmp = None
patchers = []
def setup():
"""setup temporary env for tests"""
global tmp
tmp = tempfile.mkdtemp()
patchers[:] = [
patch.dict(
os.environ,
{
"HOME": tmp,
# Let tests work with --user install when HOME is changed:
"PYTHONPATH": os.pathsep.join(sys.path),
},
),
]
for p in patchers:
p.start()
# install IPython in the temp home:
install(user=True)
def teardown():
for p in patchers:
p.stop()
try:
shutil.rmtree(tmp)
except OSError:
# no such file
pass
| 18.770833
| 74
| 0.576027
|
88f9c43b7a33972289d724c99290ce090b247622
| 365
|
py
|
Python
|
tests/test_for.py
|
presztak/lilang
|
0560b407316defe710d611d33f0b2db6c2667bf0
|
[
"MIT"
] | null | null | null |
tests/test_for.py
|
presztak/lilang
|
0560b407316defe710d611d33f0b2db6c2667bf0
|
[
"MIT"
] | null | null | null |
tests/test_for.py
|
presztak/lilang
|
0560b407316defe710d611d33f0b2db6c2667bf0
|
[
"MIT"
] | null | null | null |
from llvm_test_case import LLVMTestCase
class ForTestCase(LLVMTestCase):
def test_for(self):
code = '''
int counter = 0;
for (int i = 0; i < 10; i += 1;) {
counter = counter + 1;
}
printi(counter);
'''
result = self.run_code(code)
self.assertEqual(result, '10')
| 22.8125
| 46
| 0.493151
|
0c83089ea13f302c25fb9d280bb521e1a6a5194d
| 1,142
|
py
|
Python
|
tests/models/onnx-model-zoo/vision/classification/shufflenet/test_shufflenet-6.py
|
ChanSiYuan/nncase
|
6f95be62d1686f5ea2a9806ac97e5817bf1eda3c
|
[
"Apache-2.0"
] | 510
|
2018-12-29T06:49:36.000Z
|
2022-03-30T08:36:29.000Z
|
tests/models/onnx-model-zoo/vision/classification/shufflenet/test_shufflenet-6.py
|
ChanSiYuan/nncase
|
6f95be62d1686f5ea2a9806ac97e5817bf1eda3c
|
[
"Apache-2.0"
] | 459
|
2019-02-17T13:31:29.000Z
|
2022-03-31T05:55:38.000Z
|
tests/models/onnx-model-zoo/vision/classification/shufflenet/test_shufflenet-6.py
|
ChanSiYuan/nncase
|
6f95be62d1686f5ea2a9806ac97e5817bf1eda3c
|
[
"Apache-2.0"
] | 155
|
2019-04-16T08:43:24.000Z
|
2022-03-21T07:27:26.000Z
|
# Copyright 2019-2021 Canaan Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, unused-argument, import-outside-toplevel
import pytest
from onnx_test_runner import OnnxTestRunner
def test_shufflenet_6(request):
overwrite_cfg = open('tests/models/onnx-model-zoo/vision/classification/test_config.yml', 'r', encoding="utf8").read()
runner = OnnxTestRunner(request.node.name, overwrite_configs=overwrite_cfg)
model_file = 'onnx-models/vision/classification/shufflenet/model/shufflenet-6.onnx'
runner.run(model_file)
if __name__ == "__main__":
pytest.main(['-vv', 'test_shufflenet-6.py'])
| 39.37931
| 122
| 0.764448
|
603d9818acef49dbce5d6fe671ce9e3dae851ab3
| 6,485
|
py
|
Python
|
cryptopublisher/cryptopublisher.py
|
stuianna/cryptoPublsiher
|
f3c799def0ce3896eaedf4de1f28cf780ec1d14f
|
[
"MIT"
] | null | null | null |
cryptopublisher/cryptopublisher.py
|
stuianna/cryptoPublsiher
|
f3c799def0ce3896eaedf4de1f28cf780ec1d14f
|
[
"MIT"
] | null | null | null |
cryptopublisher/cryptopublisher.py
|
stuianna/cryptoPublsiher
|
f3c799def0ce3896eaedf4de1f28cf780ec1d14f
|
[
"MIT"
] | null | null | null |
import logging
import appdirs
import os
import time
import argparse
import socket
import subprocess
from cryptopublisher._version import __version__
import dbops.timeconverter as timeconverter
from dbops.influxhelper import InfluxHelper
log = logging.getLogger(__name__)
PUBLISHER_NAME = 'cryptoPublisher'
MAXIMUM_UPDATE_SIZE = 5000
class CryptoPublisher():
def setup_logging(logLevel, directory, output_file=None):
full_path = os.path.join(directory, output_file)
logging.basicConfig(format='%(asctime)s %(levelname)s %(module)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S%p',
level=logLevel,
filename=full_path)
def get_working_directory(publisher_name):
dir_path = os.path.join(appdirs.user_config_dir(PUBLISHER_NAME), publisher_name)
return CryptoPublisher.create_dir_if_not_exit(dir_path)
def create_dir_if_not_exit(dir_path):
if not os.path.exists(dir_path):
try:
os.makedirs(dir_path)
except OSError as e:
log.error("Cannot create required directory. Excpetion {}".format(e))
return None
return dir_path
def setup_influx_database(db_name):
influx = InfluxHelper(db_name)
if influx.exists():
return influx
else:
return None
def reset_influx_database(influx, db_name):
influx.remove_database(db_name)
influx = CryptoPublisher.setup_influx_database(db_name)
def remove_influx_db_measurements(influx, symbols):
log.warning("Removing influx database measurements")
for symbol in symbols:
if not influx.remove_measurement(symbol):
log.error("Failed to remove measurement {}".format(symbol))
def get_fiends_and_drop_na(df, fields):
extra_entries = False
if len(df) > MAXIMUM_UPDATE_SIZE:
extra_entries = True
if len(df) == 0:
return df, False, fields
df = df.loc[0:MAXIMUM_UPDATE_SIZE - 1, fields + ['timestamp']]
na_entries = df.isnull().sum().sum()
if na_entries > 0:
na_bool = df.isnull().any(axis=1)
min_index = na_bool[na_bool == True].idxmax()
if min_index == 0:
field_to_drop = df.isnull()[na_bool].idxmax(axis=1).iloc[0]
df = df.drop([field_to_drop], axis=1).head(1)
fields.remove(field_to_drop)
log.warning("Dropping field {} due to na entry".format(field_to_drop))
else:
df = df.head(min_index)
extra_entries = True
return df, extra_entries, fields
def measurement_exists(influx, measurement):
all_measurements = influx.get_measurement_names()
return measurement in all_measurements
def get_last_influx_timestamp(influx, measurement, field, tags=None, tag_filter=None):
if CryptoPublisher.measurement_exists(influx, measurement):
last_influx_timestamp = influx.get_last_time_entry(measurement, field, tags, tag_filter, as_unix=True)
if last_influx_timestamp is None:
last_influx_timestamp = 0
else:
# Add one to timestamp, otherwise the last value in the DB is always returned
last_influx_timestamp = last_influx_timestamp['time'] + 1
else:
last_influx_timestamp = 0
log.debug("Last timestamp for {}: {}".format(measurement, timeconverter.unix_to_rfc3339(last_influx_timestamp)))
return last_influx_timestamp
def get_new_sqlite_entries(sqlite, table, column_filter, timestamp):
new_entries = sqlite.get_row_range(table, 'timestamp', timestamp, int(time.time()))
if new_entries is None:
return None, True, column_filter
new_entries, extra_entries, used_fields = CryptoPublisher.get_fiends_and_drop_na(new_entries, column_filter)
return new_entries, extra_entries, used_fields
def create_common_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('-w',
'--working_directory',
nargs=1,
help="Specify the directory for configuration and logs."
"If not supplied, set to user configuration directory.",
default=None)
parser.add_argument('-k', '--kill', action='store_true', help="Kills any running instance", default=False)
parser.add_argument('-g',
'--generate_config',
action='store_true',
help="Generates the neccessary configuration files and directories.")
parser.add_argument('-l',
'--log',
nargs=1,
help="Log level. Must be one of either DEBUG,"
"INFO, WARNING, ERROR or CRITICAL. Default = INFO",
default="INFO")
parser.add_argument('-c', '--clean', action='store_true', help="Remove (clean) the target influx database.")
parser.add_argument('-v',
'--version',
action='version',
version="Crypto publisher collection. Version {}".format(__version__))
return parser
def process_log_level(level):
if level == 'DEBUG':
return logging.DEBUG
elif level == 'INFO':
return logging.INFO
elif level == 'WARNING':
return logging.WARNING
elif level == 'ERROR':
return logging.ERROR
elif level == 'CRITICAL':
return logging.CRITICAL
else:
return logging.INFO
def already_running(process_name):
processName = process_name
CryptoPublisher.already_running._lock_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
CryptoPublisher.already_running._lock_socket.bind('\0' + processName)
log.info("New publisher instance started")
return False
except Exception as e:
_ = e
log.warning("Attempting to start daemon which is already running")
return True
def kill(process_name):
subprocess.Popen(['killall', process_name])
| 39.066265
| 120
| 0.603855
|
1de9b8701fee5ed0ca251fe8e4d792e48e681f32
| 1,214
|
py
|
Python
|
applications/station/urls.py
|
awwong1/apollo
|
5571b5f222265bec3eed45b21e862636ccdc9a97
|
[
"MIT"
] | null | null | null |
applications/station/urls.py
|
awwong1/apollo
|
5571b5f222265bec3eed45b21e862636ccdc9a97
|
[
"MIT"
] | null | null | null |
applications/station/urls.py
|
awwong1/apollo
|
5571b5f222265bec3eed45b21e862636ccdc9a97
|
[
"MIT"
] | null | null | null |
from applications.station import views
from django.conf.urls import url, patterns
urlpatterns = patterns(
'',
# Station Generic Views
url(r'^create/(?P<business_pk>\d*)/$', views.StationViewCreate.as_view(), name='station_create'),
url(r'^(?P<pk>\d*)/$', views.StationViewDetail.as_view(), name='station_detail'),
url(r'^$', views.StationViewList.as_view(), name='station_list'),
url(r'^(?P<pk>\d*)/update/$', views.StationViewUpdate.as_view(), name='station_update'),
url(r'^(?P<pk>\d*)/delete/$', views.StationViewDelete.as_view(), name='station_delete'),
# Station Business Association Generic Views
url(r'^station_business/(?P<station_pk>\d*)/create/$', views.StationBusinessViewCreate.as_view(),
name='stationbusiness_create'),
url(r'station_business/(?P<pk>\d*)/delete/$', views.StationBusinessViewDelete.as_view(),
name='stationbusiness_delete'),
# Station Rentals associated generic views
url(r'^station_rental/(?P<pk>\d*)/update/$', views.StationRentalViewUpdate.as_view(),
name='stationrental_update'),
url(r'station_rental/(?P<pk>\d*)/delete/$', views.StationRentalViewDelete.as_view(),
name='stationrental_delete'),
)
| 55.181818
| 101
| 0.694399
|
e18fa1a1f71efd517b72a17d71ffe5b325794f6a
| 5,250
|
py
|
Python
|
neocore/Cryptography/MerkleTree.py
|
simplitech/neo-python-core
|
b2ce968cd9c46b45f6ff97eda7018d13dc7b60c1
|
[
"MIT"
] | 22
|
2018-01-09T15:14:35.000Z
|
2021-11-08T12:14:32.000Z
|
neocore/Cryptography/MerkleTree.py
|
simplitech/neo-python-core
|
b2ce968cd9c46b45f6ff97eda7018d13dc7b60c1
|
[
"MIT"
] | 192
|
2017-12-31T14:48:47.000Z
|
2019-09-10T08:42:11.000Z
|
neocore/Cryptography/MerkleTree.py
|
simplitech/neo-python-core
|
b2ce968cd9c46b45f6ff97eda7018d13dc7b60c1
|
[
"MIT"
] | 28
|
2018-01-02T21:53:45.000Z
|
2020-11-07T03:13:47.000Z
|
import sys
from .Crypto import *
from neocore.UInt256 import UInt256
class MerkleTreeNode(object):
Hash = None
Parent = None
LeftChild = None
RightChild = None
def __init__(self, hash=None):
"""
Create an instance.
Args:
hash (bytes):
"""
self.Hash = hash
def IsLeaf(self):
"""
If the node is a leaf.
Returns:
bool: True if node is a leaf. False, otherwise.
"""
if not self.LeftChild and not self.RightChild:
return True
return False
def IsRoot(self):
"""
If the node is the root.
Returns:
bool: True if the root. False otherwise.
"""
return self.Parent is None
def Size(self):
"""
Get the size of self in bytes.
Returns:
int: number of bytes.
"""
return sys.getsizeof(self)
class MerkleTree(object):
Root = None
Depth = 0
def __init__(self, hashes):
"""
Crease an instance.
Args:
hashes (list): each hash is of bytearray type.
"""
self.Root = MerkleTree.__Build([MerkleTreeNode(hash) for hash in hashes])
depth = 1
i = self.Root
while i.LeftChild is not None:
depth = depth + 1
i = i.LeftChild
self.Depth = depth
@staticmethod
def __Build(leaves):
"""
Build the merkle tree.
Args:
leaves (list): items are of type MerkleTreeNode.
Returns:
MerkleTreeNode: the root node.
"""
if len(leaves) < 1:
raise Exception('Leaves must have length')
if len(leaves) == 1:
return leaves[0]
num_parents = int((len(leaves) + 1) / 2)
parents = [MerkleTreeNode() for i in range(0, num_parents)]
for i in range(0, num_parents):
node = parents[i]
node.LeftChild = leaves[i * 2]
leaves[i * 2].Parent = node
if (i * 2 + 1 == len(leaves)):
node.RightChild = node.LeftChild
else:
node.RightChild = leaves[i * 2 + 1]
leaves[i * 2 + 1].Parent = node
hasharray = bytearray(node.LeftChild.Hash.ToArray() + node.RightChild.Hash.ToArray())
node.Hash = UInt256(data=Crypto.Hash256(hasharray))
return MerkleTree.__Build(parents)
# < summary >
# 计算根节点的值
# < / summary >
# < param name = "hashes" > 子节点列表 < / param >
# < returns > 返回计算的结果 < / returns >
@staticmethod
def ComputeRoot(hashes):
"""
Compute the root hash.
Args:
hashes (list): the list of hashes to build the root from.
Returns:
bytes: the root hash.
"""
if not len(hashes):
raise Exception('Hashes must have length')
if len(hashes) == 1:
return hashes[0]
tree = MerkleTree(hashes)
return tree.Root.Hash
@staticmethod
def __DepthFirstSearch(node, hashes):
"""
Internal helper method.
Args:
node (MerkleTreeNode):
hashes (list): each item is a bytearray.
"""
if node.LeftChild is None:
hashes.add(node.Hash)
else:
MerkleTree.__DepthFirstSearch(node.LeftChild, hashes)
MerkleTree.__DepthFirstSearch(node.RightChild, hashes)
def ToHashArray(self):
"""
Turn the tree into a list of hashes.
Returns:
list:
"""
hashes = set()
MerkleTree.__DepthFirstSearch(self.Root, hashes)
return list(hashes)
def Trim(self, flags):
"""
Trim the nodes from the tree keeping only the root hash.
Args:
flags: "0000" for trimming, any other value for keeping the nodes.
"""
logger.info("Trimming!")
flags = bytearray(flags)
length = 1 << self.Depth - 1
while len(flags) < length:
flags.append(0)
MerkleTree._TrimNode(self.Root, 0, self.Depth, flags)
@staticmethod
def _TrimNode(node, index, depth, flags):
"""
Internal helper method to trim a node.
Args:
node (MerkleTreeNode):
index (int): flag index.
depth (int): node tree depth to start trim from.
flags (bytearray): of left/right pairs. 1 byte for the left node, 1 byte for the right node.
00 to erase, 11 to keep. Will keep the node if either left or right is not-0
"""
if depth == 1 or node.LeftChild is None:
return
if depth == 2:
if not flags[index * 2] and not flags[index * 2 + 1]:
node.LeftChild = None
node.RightChild = None
else:
MerkleTree._TrimNode(node.LeftChild, index * 2, depth - 1, flags)
MerkleTree._TrimNode(node.RightChild, index * 2, depth - 1, flags)
if node.LeftChild.LeftChild is None and node.RightChild.RightChild is None:
node.LeftChild = None
node.RightChild = None
| 26.785714
| 108
| 0.532952
|
c133770bf67edce19896ad2f6c2f87f7012a6c67
| 1,239
|
py
|
Python
|
core/environments.py
|
eastack/getman
|
99308272ea091aee54f2f392cd757bba340c019e
|
[
"MIT"
] | null | null | null |
core/environments.py
|
eastack/getman
|
99308272ea091aee54f2f392cd757bba340c019e
|
[
"MIT"
] | null | null | null |
core/environments.py
|
eastack/getman
|
99308272ea091aee54f2f392cd757bba340c019e
|
[
"MIT"
] | null | null | null |
from enum import Enum, unique, auto
import requests
@unique
class Environments(Enum):
DEVELOPMENT = auto()
TESTING = auto()
STAGING = auto()
PRODUCTION = auto()
current = Environments.PRODUCTION
def session():
if current == Environments.DEVELOPMENT:
development = requests.Session()
development.headers.update({'hello': 'world'})
return development
elif current == Environments.TESTING:
development = requests.Session()
development.headers.update({'hello': 'world'})
return development
elif current == Environments.STAGING:
development = requests.Session()
development.headers.update({'hello': 'world'})
return development
elif current == Environments.PRODUCTION:
development = requests.Session()
development.headers.update({'hello': 'world'})
return development
def server():
if current == Environments.DEVELOPMENT:
return "https://httpbin.org"
elif current == Environments.TESTING:
return "https://httpbin.org"
elif current == Environments.STAGING:
return "https://httpbin.org"
elif current == Environments.PRODUCTION:
return "https://httpbin.org"
| 25.285714
| 54
| 0.653753
|
6c25fb5eb91b22aff052b3fe3e7c181e2f69b742
| 7,410
|
py
|
Python
|
tests/suite/test_v_s_route_redirects.py
|
jontambi/kubernetes-ingress
|
d5a1cf1db8685ae482fb58522d081a14962c89d7
|
[
"Apache-2.0"
] | 1
|
2020-09-05T14:36:36.000Z
|
2020-09-05T14:36:36.000Z
|
tests/suite/test_v_s_route_redirects.py
|
u2prakash/kubernetes-ingress
|
739e35a78649bca06576f75a5419b47ca34aa08f
|
[
"Apache-2.0"
] | 2
|
2021-06-02T03:17:31.000Z
|
2021-06-02T03:17:38.000Z
|
tests/suite/test_v_s_route_redirects.py
|
u2prakash/kubernetes-ingress
|
739e35a78649bca06576f75a5419b47ca34aa08f
|
[
"Apache-2.0"
] | 1
|
2021-07-10T05:46:21.000Z
|
2021-07-10T05:46:21.000Z
|
import pytest
import requests
from kubernetes.client.rest import ApiException
from settings import TEST_DATA
from suite.custom_assertions import assert_event_and_get_count, wait_and_assert_status_code, \
assert_event_count_increased, assert_event_starts_with_text_and_contains_errors
from suite.custom_resources_utils import get_vs_nginx_template_conf, patch_v_s_route_from_yaml
from suite.resources_utils import get_first_pod_name, get_events, wait_before_test
@pytest.mark.vsr
@pytest.mark.parametrize('crd_ingress_controller, v_s_route_setup',
[({"type": "complete", "extra_args": [f"-enable-custom-resources"]},
{"example": "virtual-server-route-redirects"})],
indirect=True)
class TestVSRRedirects:
def test_config(self, kube_apis, ingress_controller_prerequisites, crd_ingress_controller, v_s_route_setup):
wait_before_test(1)
ic_pod_name = get_first_pod_name(kube_apis.v1, ingress_controller_prerequisites.namespace)
config = get_vs_nginx_template_conf(kube_apis.v1,
v_s_route_setup.namespace,
v_s_route_setup.vs_name,
ic_pod_name,
ingress_controller_prerequisites.namespace)
assert 'error_page 418 =307' in config and 'error_page 418 =301' in config
def test_custom_redirect(self, kube_apis, crd_ingress_controller, v_s_route_setup):
req_host = f"{v_s_route_setup.public_endpoint.public_ip}:{v_s_route_setup.public_endpoint.port}"
req_url = f"http://{req_host}{v_s_route_setup.route_m.paths[0]}?arg1=arg"
wait_and_assert_status_code(307, req_url, v_s_route_setup.vs_host, allow_redirects=False)
resp = requests.get(req_url, headers={"host": v_s_route_setup.vs_host}, allow_redirects=False)
assert resp.headers['location'] == "http://example.com"
def test_default_redirect(self, kube_apis, crd_ingress_controller, v_s_route_setup):
req_host = f"{v_s_route_setup.public_endpoint.public_ip}:{v_s_route_setup.public_endpoint.port}"
req_url = f"http://{req_host}{v_s_route_setup.route_m.paths[1]}"
wait_and_assert_status_code(301, req_url, v_s_route_setup.vs_host, allow_redirects=False)
resp = requests.get(req_url, headers={"host": v_s_route_setup.vs_host}, allow_redirects=False)
assert resp.headers['location'] == f"http://{v_s_route_setup.vs_host}/backends/default-redirect?arg="
def test_update(self, kube_apis, crd_ingress_controller, v_s_route_setup):
req_host = f"{v_s_route_setup.public_endpoint.public_ip}:{v_s_route_setup.public_endpoint.port}"
req_url_1 = f"http://{req_host}{v_s_route_setup.route_m.paths[0]}"
req_url_2 = f"http://{req_host}{v_s_route_setup.route_m.paths[1]}"
vs_name = f"{v_s_route_setup.namespace}/{v_s_route_setup.vs_name}"
vsr_name = f"{v_s_route_setup.namespace}/{v_s_route_setup.route_m.name}"
vs_event_text = f"Configuration for {vs_name} was added or updated"
vsr_event_text = f"Configuration for {vsr_name} was added or updated"
wait_before_test(1)
events_ns = get_events(kube_apis.v1, v_s_route_setup.namespace)
initial_count_vs = assert_event_and_get_count(vs_event_text, events_ns)
initial_count_vsr = assert_event_and_get_count(vsr_event_text, events_ns)
vsr_src = f"{TEST_DATA}/virtual-server-route-redirects/route-multiple-updated.yaml"
patch_v_s_route_from_yaml(kube_apis.custom_objects,
v_s_route_setup.route_m.name, vsr_src, v_s_route_setup.namespace)
wait_and_assert_status_code(301, req_url_1, v_s_route_setup.vs_host, allow_redirects=False)
resp = requests.get(req_url_1, headers={"host": v_s_route_setup.vs_host}, allow_redirects=False)
assert resp.headers['location'] == "http://demo.nginx.com"
wait_and_assert_status_code(302, req_url_2, v_s_route_setup.vs_host, allow_redirects=False)
resp = requests.get(req_url_2, headers={"host": v_s_route_setup.vs_host}, allow_redirects=False)
assert resp.headers['location'] == "http://demo.nginx.com"
new_events_ns = get_events(kube_apis.v1, v_s_route_setup.namespace)
assert_event_count_increased(vs_event_text, initial_count_vs, new_events_ns)
assert_event_count_increased(vsr_event_text, initial_count_vsr, new_events_ns)
def test_validation_flow(self, kube_apis, crd_ingress_controller, v_s_route_setup):
req_host = f"{v_s_route_setup.public_endpoint.public_ip}:{v_s_route_setup.public_endpoint.port}"
req_url = f"http://{req_host}{v_s_route_setup.route_s.paths[0]}"
text = f"{v_s_route_setup.namespace}/{v_s_route_setup.route_m.name}"
event_text = f"VirtualServerRoute {text} is invalid and was rejected: "
invalid_fields = [
"spec.subroutes[0].action.redirect.code", "spec.subroutes[1].action.redirect.url"
]
vsr_src = f"{TEST_DATA}/virtual-server-route-redirects/route-multiple-invalid.yaml"
patch_v_s_route_from_yaml(kube_apis.custom_objects,
v_s_route_setup.route_m.name, vsr_src, v_s_route_setup.namespace)
wait_before_test(2)
wait_and_assert_status_code(404, req_url, v_s_route_setup.vs_host, allow_redirects=False)
events = get_events(kube_apis.v1, v_s_route_setup.route_m.namespace)
assert_event_starts_with_text_and_contains_errors(event_text, events, invalid_fields)
def test_openapi_validation_flow(self, kube_apis, ingress_controller_prerequisites,
crd_ingress_controller, v_s_route_setup):
ic_pod_name = get_first_pod_name(kube_apis.v1, ingress_controller_prerequisites.namespace)
config_old = get_vs_nginx_template_conf(kube_apis.v1,
v_s_route_setup.namespace,
v_s_route_setup.vs_name,
ic_pod_name,
ingress_controller_prerequisites.namespace)
vsr_src = f"{TEST_DATA}/virtual-server-route-redirects/route-multiple-invalid-openapi.yaml"
try:
patch_v_s_route_from_yaml(kube_apis.custom_objects,
v_s_route_setup.route_m.name, vsr_src, v_s_route_setup.namespace)
except ApiException as ex:
assert ex.status == 422 \
and "spec.subroutes.action.redirect.url" in ex.body \
and "spec.subroutes.action.redirect.code" in ex.body
except Exception as ex:
pytest.fail(f"An unexpected exception is raised: {ex}")
else:
pytest.fail("Expected an exception but there was none")
wait_before_test(1)
config_new = get_vs_nginx_template_conf(kube_apis.v1,
v_s_route_setup.namespace,
v_s_route_setup.vs_name,
ic_pod_name,
ingress_controller_prerequisites.namespace)
assert config_old == config_new, "Expected: config doesn't change"
| 65
| 112
| 0.679757
|
d5f50d2ec0d12befc6aa98ae540881334c1e2155
| 9,695
|
py
|
Python
|
fabfile.py
|
simplepractice/card.io-iOS-source
|
41cde7aa1fbe10961ad35f0a4ec4b3329673a4af
|
[
"MIT"
] | null | null | null |
fabfile.py
|
simplepractice/card.io-iOS-source
|
41cde7aa1fbe10961ad35f0a4ec4b3329673a4af
|
[
"MIT"
] | null | null | null |
fabfile.py
|
simplepractice/card.io-iOS-source
|
41cde7aa1fbe10961ad35f0a4ec4b3329673a4af
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import atexit
import glob
import os
import re
import shutil
import sys
import tempfile
import textwrap
from fabric.api import env, local, hide
from fabric.context_managers import lcd, settings, shell_env
from fabric.contrib.console import confirm
from fabric.contrib.files import exists
from fabric.decorators import runs_once
from fabric.utils import abort
from fabric import colors
sys.path.append('scripts')
from string_scripts.confirm_ready_for_release import confirm_ready_for_release as _confirm_ready_for_release
# --- Configuration ---------------------------------------------------------
env.verbose = False
env.libname = "libCardIO.a"
env.developer_dir = local("xcode-select -p", capture=True)
# --- Tasks -----------------------------------------------------------------
def verbose(be_verbose=True):
"""
Makes all following tasks more verbose.
"""
env.verbose = be_verbose
def developer_dir(dir):
"""
Sets DEVELOPER_DIR environment variable to correct Xcode
For example, `fab developer_dir:"/Applications/Xcode6.2.app"
"""
if os.path.exists(dir):
env.developer_dir = dir
else:
print(colors.red("{dir} is not a valid path".format(dir=dir), bold=True))
sys.exit(1)
def _locate(fileset, root=os.curdir):
# based on http://code.activestate.com/recipes/499305-locating-files-throughout-a-directory-tree/
"""
Locate supplied files in supplied root directory.
"""
for path, dirs, files in os.walk(os.path.abspath(root)):
for filename in files:
if filename in fileset:
yield os.path.join(path, filename)
def _add_version_to_header_file(version_str, file):
lines = []
for line in file.readlines():
lines.append(line)
m = re.match("^(//\s+)CardIO.*\.h$", line)
if m:
lines.append("{0}Version {1}\n".format(m.groups()[0], version_str))
lines.append("//\n")
file.seek(0)
file.truncate()
for line in lines:
file.write(line)
def _version_str(show_dirty=False):
git_describe_cmd = "git describe --match='iOS_[0-9]*.[0-9]*' --tags --always --dirty"
version_str = local(git_describe_cmd, capture=True).strip()[4:]
if not show_dirty:
version_str = version_str.replace('-dirty', '')
return version_str
def _copy(source_files, dest_dir):
for public_header_file in source_files:
with open(public_header_file, "rb") as in_file:
contents = in_file.read()
unused, header_filename = os.path.split(public_header_file)
header_filename = os.path.join(dest_dir, header_filename)
with open(header_filename, "wb") as out_file:
out_file.write(contents)
with open(header_filename, "r+") as out_file:
_add_version_to_header_file(_version_str(), out_file)
def build(outdir=None, device_sdk=None, simulator_sdk=None, **kwargs):
"""
Build card.io SDK.
"""
print(colors.white("Setup", bold=True))
to_hide = [] if env.verbose else ["stdout", "stderr", "running"]
xcode_preprocessor_flags = {}
if not outdir:
message = """
You must provide outdir=<sdk output parent dir>
Example usage:
`fab build:outdir=~` - normal build
`fab build:outdir=~,SCAN_EXPIRY=0` - to disable the experimental expiry-scan feature
"""
abort(textwrap.dedent(message).format(**locals()))
if _confirm_ready_for_release("assets/strings"):
sys.exit(1)
outdir = os.path.abspath(os.path.expanduser(outdir))
print colors.yellow("Will save release sdk to {outdir}".format(outdir=outdir))
out_subdir = "card.io_ios_sdk_{0}".format(_version_str(show_dirty=True))
xcode_preprocessor_flags.update(kwargs)
formatted_xcode_preprocessor_flags = " ".join("{k}={v}".format(k=k, v=v) for k, v in xcode_preprocessor_flags.iteritems())
extra_xcodebuild_settings = "GCC_PREPROCESSOR_DEFINITIONS='$(value) {formatted_xcode_preprocessor_flags}'".format(**locals())
device_sdk = device_sdk or "iphoneos"
simulator_sdk = simulator_sdk or "iphonesimulator"
arch_to_sdk = (
("arm64", device_sdk),
("arm64e", device_sdk),
("x86_64", simulator_sdk)
)
with settings(hide(*to_hide)):
icc_root = local("git rev-parse --show-toplevel", capture=True)
temp_dir = tempfile.mkdtemp() + os.sep
atexit.register(shutil.rmtree, temp_dir, True)
print(colors.white("Preparing dmz", bold=True))
with settings(hide(*to_hide)):
with lcd(os.path.join(icc_root, "dmz")):
dmz_all_filename = os.path.join("dmz", "dmz_all.cpp")
with open(dmz_all_filename) as f:
old_dmz_all = f.read()
local("fab concat")
with open(dmz_all_filename) as f:
new_dmz_all = f.read()
if old_dmz_all != new_dmz_all:
print(colors.red("WARNING: dmz_all.h was not up to date!", bold=True))
print(colors.white("Building", bold=True))
print(colors.white("Using temp dir {temp_dir}".format(**locals())))
print(colors.white("Using extra Xcode flags: {formatted_xcode_preprocessor_flags}".format(**locals())))
print(colors.white("Using developer directory: {}".format(env.developer_dir)))
with lcd(icc_root):
with shell_env(DEVELOPER_DIR=env.developer_dir):
with settings(hide(*to_hide)):
lipo_build_dirs = {}
build_config = "Release"
arch_build_dirs = {}
for arch, sdk in arch_to_sdk:
print(colors.blue("({build_config}) Building {arch}".format(**locals())))
base_xcodebuild_command = "xcrun xcodebuild OTHER_CFLAGS='-fembed-bitcode' -target CardIO-static -arch {arch} -sdk {sdk} -configuration {build_config}".format(**locals())
clean_cmd = "{base_xcodebuild_command} clean".format(**locals())
local(clean_cmd)
build_dir = os.path.join(temp_dir, build_config, arch)
arch_build_dirs[arch] = build_dir
os.makedirs(build_dir)
parallelize = "" if env.verbose else "-parallelizeTargets" # don't parallelize verbose builds, it's hard to read the output
build_cmd = "{base_xcodebuild_command} {parallelize} CONFIGURATION_BUILD_DIR={build_dir} {extra_xcodebuild_settings}".format(**locals())
local(build_cmd)
print(colors.blue("({build_config}) Lipoing".format(**locals())))
lipo_dir = os.path.join(temp_dir, build_config, "universal")
lipo_build_dirs[build_config] = lipo_dir
os.makedirs(lipo_dir)
arch_build_dirs["universal"] = lipo_dir
lipo_cmd = "xcrun lipo " \
" -create" \
" -output {universal}/{libname}" \
" -arch arm64 {arm64}/{libname}" \
" -arch arm64e {arm64e}/{libname}" \
" -arch x86_64 {x86_64}/{libname}".format(libname=env.libname, **arch_build_dirs)
local(lipo_cmd)
print(colors.blue("({build_config}) Stripping debug symbols".format(**locals())))
strip_cmd = "xcrun strip -S {universal}/{libname}".format(libname=env.libname, **arch_build_dirs)
local(strip_cmd)
out_subdir_suffix = "_".join("{k}-{v}".format(k=k, v=v) for k, v in kwargs.iteritems())
if out_subdir_suffix:
out_subdir_suffix = "_" + out_subdir_suffix
out_subdir += out_subdir_suffix
sdk_dir = os.path.join(outdir, out_subdir)
print(colors.white("Assembling release SDK in {sdk_dir}".format(sdk_dir=sdk_dir), bold=True))
if os.path.isdir(sdk_dir):
shutil.rmtree(sdk_dir)
cardio_dir = os.path.join(sdk_dir, "CardIO")
os.makedirs(cardio_dir)
header_files = glob.glob(os.path.join("CardIO_Public_API", "*.h"))
_copy(header_files, cardio_dir)
opencv_libraries = glob.glob(os.path.join("opencv_device/lib/", "*.a"))
_copy(opencv_libraries, cardio_dir)
libfile = os.path.join(lipo_build_dirs["Release"], env.libname)
shutil.copy2(libfile, cardio_dir)
release_dir = os.path.join(icc_root, "Release")
shutil.copy2(os.path.join(release_dir, "release_notes.txt"), sdk_dir)
shutil.copy2(os.path.join(release_dir, "CardIO.podspec"), sdk_dir)
shutil.copy2(os.path.join(release_dir, "acknowledgments.md"), sdk_dir)
shutil.copy2(os.path.join(release_dir, "LICENSE.md"), sdk_dir)
shutil.copy2(os.path.join(release_dir, "README.md"), sdk_dir)
shutil.copy2(os.path.join(release_dir, "CardIO/CardIO.m"), os.path.join(sdk_dir, "CardIO"))
shutil.copytree(os.path.join(release_dir, "SampleApp"), os.path.join(sdk_dir, "SampleApp"), ignore=shutil.ignore_patterns(".DS_Store"))
shutil.copytree(os.path.join(release_dir, "SampleApp-Swift"), os.path.join(sdk_dir, "SampleApp-Swift"), ignore=shutil.ignore_patterns(".DS_Store"))
shutil.make_archive("cardio-release", "zip", sdk_dir)
| 42.152174
| 190
| 0.604642
|
d57d1f280a9fd9f56e2be0815805bfec46613760
| 162
|
py
|
Python
|
savu/plugins/corrections/base_correction_tools.py
|
elainehoml/Savu
|
e4772704606f71d6803d832084e10faa585e7358
|
[
"Apache-2.0"
] | 39
|
2015-03-30T14:03:42.000Z
|
2022-03-16T16:50:33.000Z
|
savu/plugins/corrections/base_correction_tools.py
|
elainehoml/Savu
|
e4772704606f71d6803d832084e10faa585e7358
|
[
"Apache-2.0"
] | 670
|
2015-02-11T11:08:09.000Z
|
2022-03-21T09:27:57.000Z
|
savu/plugins/corrections/base_correction_tools.py
|
elainehoml/Savu
|
e4772704606f71d6803d832084e10faa585e7358
|
[
"Apache-2.0"
] | 54
|
2015-02-13T14:09:52.000Z
|
2022-01-24T13:57:09.000Z
|
from savu.plugins.plugin_tools import PluginTools
class BaseCorrectionTools(PluginTools):
"""A base class for dark and flat field correction plugins.
"""
| 32.4
| 63
| 0.771605
|
25b3252aef3940800ecad37418fc87ee6e321138
| 76,214
|
bzl
|
Python
|
tensorflow/workspace.bzl
|
ml-resources/tensorflow
|
4ecd72b68cd70c3930551aebbf0c80badc301d28
|
[
"Apache-2.0"
] | 1
|
2019-06-19T08:43:26.000Z
|
2019-06-19T08:43:26.000Z
|
tensorflow/workspace.bzl
|
liudgit/tensorflow
|
4ecd72b68cd70c3930551aebbf0c80badc301d28
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/workspace.bzl
|
liudgit/tensorflow
|
4ecd72b68cd70c3930551aebbf0c80badc301d28
|
[
"Apache-2.0"
] | 1
|
2019-06-19T08:43:23.000Z
|
2019-06-19T08:43:23.000Z
|
# TensorFlow external dependencies that can be loaded in WORKSPACE files.
load("@io_bazel_rules_closure//closure:defs.bzl", "filegroup_external")
load("@io_bazel_rules_closure//closure:defs.bzl", "webfiles_external")
load("//third_party/gpus:cuda_configure.bzl", "cuda_configure")
load("//third_party/sycl:sycl_configure.bzl", "sycl_configure")
# Parse the bazel version string from `native.bazel_version`.
def _parse_bazel_version(bazel_version):
# Remove commit from version.
version = bazel_version.split(" ", 1)[0]
# Split into (release, date) parts and only return the release
# as a tuple of integers.
parts = version.split('-', 1)
# Turn "release" into a tuple of strings
version_tuple = ()
for number in parts[0].split('.'):
version_tuple += (str(number),)
return version_tuple
# Check that a specific bazel version is being used.
def check_version(bazel_version):
if "bazel_version" not in dir(native):
fail("\nCurrent Bazel version is lower than 0.2.1, expected at least %s\n" % bazel_version)
elif not native.bazel_version:
print("\nCurrent Bazel is not a release version, cannot check for compatibility.")
print("Make sure that you are running at least Bazel %s.\n" % bazel_version)
else:
current_bazel_version = _parse_bazel_version(native.bazel_version)
minimum_bazel_version = _parse_bazel_version(bazel_version)
if minimum_bazel_version > current_bazel_version:
fail("\nCurrent Bazel version is {}, expected at least {}\n".format(
native.bazel_version, bazel_version))
pass
# Temporary workaround to support including TensorFlow as a submodule until this
# use-case is supported in the next Bazel release.
def _temp_workaround_http_archive_impl(repo_ctx):
repo_ctx.template("BUILD", repo_ctx.attr.build_file,
{"%ws%": repo_ctx.attr.repository}, False)
repo_ctx.download_and_extract(repo_ctx.attr.urls, "", repo_ctx.attr.sha256,
"", repo_ctx.attr.strip_prefix)
temp_workaround_http_archive = repository_rule(
implementation=_temp_workaround_http_archive_impl,
attrs = {
"build_file": attr.label(),
"repository": attr.string(),
"urls": attr.string_list(default = []),
"sha256": attr.string(default = ""),
"strip_prefix": attr.string(default = ""),
})
# If TensorFlow is linked as a submodule.
# path_prefix and tf_repo_name are no longer used.
def tf_workspace(path_prefix = "", tf_repo_name = ""):
cuda_configure(name = "local_config_cuda")
sycl_configure(name = "local_config_sycl")
if path_prefix:
print("path_prefix was specified to tf_workspace but is no longer used and will be removed in the future.")
if tf_repo_name:
print("tf_repo_name was specified to tf_workspace but is no longer used and will be removed in the future.")
native.new_http_archive(
name = "eigen_archive",
urls = [
"http://bazel-mirror.storage.googleapis.com/bitbucket.org/eigen/eigen/get/60578b474802.tar.gz",
"https://bitbucket.org/eigen/eigen/get/60578b474802.tar.gz",
],
sha256 = "7527cda827aff351981ebd910012e16be4d899c28a9ae7f143ae60e7f3f7b83d",
strip_prefix = "eigen-eigen-60578b474802",
build_file = str(Label("//third_party:eigen.BUILD")),
)
native.new_http_archive(
name = "libxsmm_archive",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/hfp/libxsmm/archive/1.7.tar.gz",
"https://github.com/hfp/libxsmm/archive/1.7.tar.gz",
],
sha256 = "2eea65624a697e74b939511cd2a686b4c957e90c99be168fe134d96771e811ad",
strip_prefix = "libxsmm-1.7",
build_file = str(Label("//third_party:libxsmm.BUILD")),
)
native.bind(
name = "xsmm_avx",
actual = "@libxsmm_archive//third_party:xsmm_avx",
)
native.http_archive(
name = "com_googlesource_code_re2",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/google/re2/archive/b94b7cd42e9f02673cd748c1ac1d16db4052514c.tar.gz",
"https://github.com/google/re2/archive/b94b7cd42e9f02673cd748c1ac1d16db4052514c.tar.gz",
],
sha256 = "bd63550101e056427c9e7ff12a408c1c8b74e9803f393ca916b2926fc2c4906f",
strip_prefix = "re2-b94b7cd42e9f02673cd748c1ac1d16db4052514c",
)
native.http_archive(
name = "gemmlowp",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/google/gemmlowp/archive/a6f29d8ac48d63293f845f2253eccbf86bc28321.tar.gz",
"https://github.com/google/gemmlowp/archive/a6f29d8ac48d63293f845f2253eccbf86bc28321.tar.gz",
],
sha256 = "75d40ea8e68b0d1644f052fffe8f14a410b2a73d40ccb859a95c0578d194ec26",
strip_prefix = "gemmlowp-a6f29d8ac48d63293f845f2253eccbf86bc28321",
)
native.new_http_archive(
name = "farmhash_archive",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/google/farmhash/archive/92e897b282426729f4724d91a637596c7e2fe28f.zip",
"https://github.com/google/farmhash/archive/92e897b282426729f4724d91a637596c7e2fe28f.zip",
],
sha256 = "4c626d1f306bda2c6804ab955892f803f5245f4dcaecb4979dc08b091256da54",
strip_prefix = "farmhash-92e897b282426729f4724d91a637596c7e2fe28f",
build_file = str(Label("//third_party:farmhash.BUILD")),
)
native.bind(
name = "farmhash",
actual = "@farmhash//:farmhash",
)
native.new_http_archive(
name = "highwayhash",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/google/highwayhash/archive/dfcb97ca4fe9277bf9dc1802dd979b071896453b.tar.gz",
"https://github.com/google/highwayhash/archive/dfcb97ca4fe9277bf9dc1802dd979b071896453b.tar.gz",
],
sha256 = "0f30a15b1566d93f146c8d149878a06e91d9bb7ec2cfd76906df62a82be4aac9",
strip_prefix = "highwayhash-dfcb97ca4fe9277bf9dc1802dd979b071896453b",
build_file = str(Label("//third_party:highwayhash.BUILD")),
)
native.new_http_archive(
name = "nasm",
urls = [
"http://bazel-mirror.storage.googleapis.com/www.nasm.us/pub/nasm/releasebuilds/2.12.02/nasm-2.12.02.tar.bz2",
"http://pkgs.fedoraproject.org/repo/pkgs/nasm/nasm-2.12.02.tar.bz2/d15843c3fb7db39af80571ee27ec6fad/nasm-2.12.02.tar.bz2",
],
sha256 = "00b0891c678c065446ca59bcee64719d0096d54d6886e6e472aeee2e170ae324",
strip_prefix = "nasm-2.12.02",
build_file = str(Label("//third_party:nasm.BUILD")),
)
temp_workaround_http_archive(
name = "jpeg",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/libjpeg-turbo/libjpeg-turbo/archive/1.5.1.tar.gz",
"https://github.com/libjpeg-turbo/libjpeg-turbo/archive/1.5.1.tar.gz",
],
sha256 = "c15a9607892113946379ccea3ca8b85018301b200754f209453ab21674268e77",
strip_prefix = "libjpeg-turbo-1.5.1",
build_file = str(Label("//third_party/jpeg:jpeg.BUILD")),
repository = tf_repo_name,
)
native.new_http_archive(
name = "png_archive",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/glennrp/libpng/archive/v1.2.53.zip",
"https://github.com/glennrp/libpng/archive/v1.2.53.zip",
],
sha256 = "c35bcc6387495ee6e757507a68ba036d38ad05b415c2553b3debe2a57647a692",
strip_prefix = "libpng-1.2.53",
build_file = str(Label("//third_party:png.BUILD")),
)
native.new_http_archive(
name = "gif_archive",
urls = [
"http://bazel-mirror.storage.googleapis.com/ufpr.dl.sourceforge.net/project/giflib/giflib-5.1.4.tar.gz",
"http://ufpr.dl.sourceforge.net/project/giflib/giflib-5.1.4.tar.gz",
"http://pilotfiber.dl.sourceforge.net/project/giflib/giflib-5.1.4.tar.gz",
],
sha256 = "34a7377ba834397db019e8eb122e551a49c98f49df75ec3fcc92b9a794a4f6d1",
strip_prefix = "giflib-5.1.4",
build_file = str(Label("//third_party:gif.BUILD")),
)
native.new_http_archive(
name = "six_archive",
urls = [
"http://bazel-mirror.storage.googleapis.com/pypi.python.org/packages/source/s/six/six-1.10.0.tar.gz",
"http://pypi.python.org/packages/source/s/six/six-1.10.0.tar.gz",
],
sha256 = "105f8d68616f8248e24bf0e9372ef04d3cc10104f1980f54d57b2ce73a5ad56a",
strip_prefix = "six-1.10.0",
build_file = str(Label("//third_party:six.BUILD")),
)
native.new_http_archive(
name = "org_pocoo_werkzeug",
urls = [
"http://bazel-mirror.storage.googleapis.com/pypi.python.org/packages/b7/7f/44d3cfe5a12ba002b253f6985a4477edfa66da53787a2a838a40f6415263/Werkzeug-0.11.10.tar.gz",
"https://pypi.python.org/packages/b7/7f/44d3cfe5a12ba002b253f6985a4477edfa66da53787a2a838a40f6415263/Werkzeug-0.11.10.tar.gz",
],
strip_prefix = "Werkzeug-0.11.10",
sha256 = "cc64dafbacc716cdd42503cf6c44cb5a35576443d82f29f6829e5c49264aeeee",
build_file = str(Label("//third_party:werkzeug.BUILD")),
)
native.bind(
name = "six",
actual = "@six_archive//:six",
)
native.http_archive(
name = "protobuf",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/google/protobuf/archive/9d3288e651700f3d52e6b4ead2a9f9ab02da53f4.tar.gz",
"https://github.com/google/protobuf/archive/9d3288e651700f3d52e6b4ead2a9f9ab02da53f4.tar.gz",
],
sha256 = "4663e886f9bbea0121ce424e1620997a37d38c6299dc82183223a0401bbf70ed",
strip_prefix = "protobuf-9d3288e651700f3d52e6b4ead2a9f9ab02da53f4",
)
native.new_http_archive(
name = "gmock_archive",
urls = [
"http://bazel-mirror.storage.googleapis.com/pkgs.fedoraproject.org/repo/pkgs/gmock/gmock-1.7.0.zip/073b984d8798ea1594f5e44d85b20d66/gmock-1.7.0.zip",
"http://pkgs.fedoraproject.org/repo/pkgs/gmock/gmock-1.7.0.zip/073b984d8798ea1594f5e44d85b20d66/gmock-1.7.0.zip",
],
sha256 = "26fcbb5925b74ad5fc8c26b0495dfc96353f4d553492eb97e85a8a6d2f43095b",
strip_prefix = "gmock-1.7.0",
build_file = str(Label("//third_party:gmock.BUILD")),
)
native.bind(
name = "gtest",
actual = "@gmock_archive//:gtest",
)
native.bind(
name = "gtest_main",
actual = "@gmock_archive//:gtest_main",
)
native.bind(
name = "python_headers",
actual = str(Label("//util/python:python_headers")),
)
native.new_http_archive(
name = "pcre",
sha256 = "ccdf7e788769838f8285b3ee672ed573358202305ee361cfec7a4a4fb005bbc7",
urls = [
"http://bazel-mirror.storage.googleapis.com/ftp.exim.org/pub/pcre/pcre-8.39.tar.gz",
"http://ftp.exim.org/pub/pcre/pcre-8.39.tar.gz",
],
strip_prefix = "pcre-8.39",
build_file = str(Label("//third_party:pcre.BUILD")),
)
native.new_http_archive(
name = "swig",
sha256 = "58a475dbbd4a4d7075e5fe86d4e54c9edde39847cdb96a3053d87cb64a23a453",
urls = [
"http://bazel-mirror.storage.googleapis.com/ufpr.dl.sourceforge.net/project/swig/swig/swig-3.0.8/swig-3.0.8.tar.gz",
"http://ufpr.dl.sourceforge.net/project/swig/swig/swig-3.0.8/swig-3.0.8.tar.gz",
"http://pilotfiber.dl.sourceforge.net/project/swig/swig/swig-3.0.8/swig-3.0.8.tar.gz",
],
strip_prefix = "swig-3.0.8",
build_file = str(Label("//third_party:swig.BUILD")),
)
native.new_http_archive(
name = "curl",
sha256 = "ff3e80c1ca6a068428726cd7dd19037a47cc538ce58ef61c59587191039b2ca6",
urls = [
"http://bazel-mirror.storage.googleapis.com/curl.haxx.se/download/curl-7.49.1.tar.gz",
"https://curl.haxx.se/download/curl-7.49.1.tar.gz",
],
strip_prefix = "curl-7.49.1",
build_file = str(Label("//third_party:curl.BUILD")),
)
# grpc expects //external:protobuf_clib and //external:protobuf_compiler
# to point to the protobuf's compiler library.
native.bind(
name = "protobuf_clib",
actual = "@protobuf//:protoc_lib",
)
native.bind(
name = "protobuf_compiler",
actual = "@protobuf//:protoc_lib",
)
native.new_http_archive(
name = "grpc",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/grpc/grpc/archive/d7ff4ff40071d2b486a052183e3e9f9382afb745.tar.gz",
"https://github.com/grpc/grpc/archive/d7ff4ff40071d2b486a052183e3e9f9382afb745.tar.gz",
],
sha256 = "a15f352436ab92c521b1ac11e729e155ace38d0856380cf25048c5d1d9ba8e31",
strip_prefix = "grpc-d7ff4ff40071d2b486a052183e3e9f9382afb745",
build_file = str(Label("//third_party:grpc.BUILD")),
)
# protobuf expects //external:grpc_cpp_plugin to point to grpc's
# C++ plugin code generator.
native.bind(
name = "grpc_cpp_plugin",
actual = "@grpc//:grpc_cpp_plugin",
)
native.bind(
name = "grpc_lib",
actual = "@grpc//:grpc++_unsecure",
)
native.new_http_archive(
name = "linenoise",
sha256 = "7f51f45887a3d31b4ce4fa5965210a5e64637ceac12720cfce7954d6a2e812f7",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz",
"https://github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz",
],
strip_prefix = "linenoise-c894b9e59f02203dbe4e2be657572cf88c4230c3",
build_file = str(Label("//third_party:linenoise.BUILD")),
)
# TODO(phawkins): currently, this rule uses an unofficial LLVM mirror.
# Switch to an official source of snapshots if/when possible.
temp_workaround_http_archive(
name = "llvm",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/llvm-mirror/llvm/archive/2276fd31f36aa58f39397c435a8be6632d8c8505.tar.gz",
"https://github.com/llvm-mirror/llvm/archive/2276fd31f36aa58f39397c435a8be6632d8c8505.tar.gz",
],
sha256 = "0e08c91752732227280466d12f330a5854569deddf28ff4a6c3898334dbb0d16",
strip_prefix = "llvm-2276fd31f36aa58f39397c435a8be6632d8c8505",
build_file = str(Label("//third_party/llvm:llvm.BUILD")),
repository = tf_repo_name,
)
native.new_http_archive(
name = "jsoncpp_git",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/open-source-parsers/jsoncpp/archive/11086dd6a7eba04289944367ca82cea71299ed70.tar.gz",
"https://github.com/open-source-parsers/jsoncpp/archive/11086dd6a7eba04289944367ca82cea71299ed70.tar.gz",
],
sha256 = "07d34db40593d257324ec5fb9debc4dc33f29f8fb44e33a2eeb35503e61d0fe2",
strip_prefix = "jsoncpp-11086dd6a7eba04289944367ca82cea71299ed70",
build_file = str(Label("//third_party:jsoncpp.BUILD")),
)
native.bind(
name = "jsoncpp",
actual = "@jsoncpp_git//:jsoncpp",
)
native.http_archive(
name = "boringssl",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/google/boringssl/archive/bbcaa15b0647816b9a1a9b9e0d209cd6712f0105.tar.gz",
"https://github.com/google/boringssl/archive/bbcaa15b0647816b9a1a9b9e0d209cd6712f0105.tar.gz", # 2016-07-11
],
sha256 = "025264d6e9a7ad371f2f66d17a28b6627de0c9592dc2eb54afd062f68f1f9aa3",
strip_prefix = "boringssl-bbcaa15b0647816b9a1a9b9e0d209cd6712f0105",
)
native.new_http_archive(
name = "nanopb_git",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/nanopb/nanopb/archive/1251fa1065afc0d62f635e0f63fec8276e14e13c.tar.gz",
"https://github.com/nanopb/nanopb/archive/1251fa1065afc0d62f635e0f63fec8276e14e13c.tar.gz",
],
sha256 = "ab1455c8edff855f4f55b68480991559e51c11e7dab060bbab7cffb12dd3af33",
strip_prefix = "nanopb-1251fa1065afc0d62f635e0f63fec8276e14e13c",
build_file = str(Label("//third_party:nanopb.BUILD")),
)
native.bind(
name = "nanopb",
actual = "@nanopb_git//:nanopb",
)
native.new_http_archive(
name = "zlib_archive",
urls = [
"http://bazel-mirror.storage.googleapis.com/zlib.net/zlib-1.2.8.tar.gz",
"http://zlib.net/fossils/zlib-1.2.8.tar.gz",
],
sha256 = "36658cb768a54c1d4dec43c3116c27ed893e88b02ecfcb44f2166f9c0b7f2a0d",
strip_prefix = "zlib-1.2.8",
build_file = str(Label("//third_party:zlib.BUILD")),
)
native.bind(
name = "zlib",
actual = "@zlib_archive//:zlib",
)
native.new_http_archive(
name = "nccl_archive",
url = "https://github.com/nvidia/nccl/archive/024d1e267845f2ed06f3e2e42476d50f04a00ee6.tar.gz",
sha256 = "6787f0eed88d52ee8e32956fa4947d92c139da469f1d8e311c307f27d641118e",
strip_prefix = "nccl-024d1e267845f2ed06f3e2e42476d50f04a00ee6",
build_file = str(Label("//third_party:nccl.BUILD")),
)
# Make junit-4.12 available as //external:junit
native.http_jar(
name = "junit_jar",
url = "https://github.com/junit-team/junit4/releases/download/r4.12/junit-4.12.jar",
sha256 = "59721f0805e223d84b90677887d9ff567dc534d7c502ca903c0c2b17f05c116a",
)
native.bind(
name = "junit",
actual = "@junit_jar//jar",
)
temp_workaround_http_archive(
name = "jemalloc",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/jemalloc/jemalloc/archive/4.4.0.tar.gz",
"https://github.com/jemalloc/jemalloc/archive/4.4.0.tar.gz",
],
sha256 = "3c8f25c02e806c3ce0ab5fb7da1817f89fc9732709024e2a81b6b82f7cc792a8",
strip_prefix = "jemalloc-4.4.0",
build_file = str(Label("//third_party:jemalloc.BUILD")),
repository = tf_repo_name,
)
##############################################################################
# TensorBoard Build Tools
filegroup_external(
name = "org_nodejs",
# MIT with portions licensed:
# - MIT
# - Old MIT
# - 2-Clause-BSD
# - 3-Clause-BSD
# - ISC
# - Unicode
# - zlib
# - Artistic 2.0
licenses = ["notice"],
sha256_urls_extract_macos = {
"47109a00cac344d80296c195451bb5eee7c21727fcef1594384ddfe1f852957a": [
"http://bazel-mirror.storage.googleapis.com/nodejs.org/dist/v4.3.2/node-v4.3.2-darwin-x64.tar.xz",
"http://nodejs.org/dist/v4.3.2/node-v4.3.2-darwin-x64.tar.xz",
],
},
sha256_urls_windows = {
"606c44c42d17866c017c50c0afadad411d9492ac4281d2431b937f881911614e": [
"http://bazel-mirror.storage.googleapis.com/nodejs.org/dist/v4.3.2/win-x64/node.exe",
"http://nodejs.org/dist/v4.3.2/win-x64/node.exe",
],
"451a40570099a95488d6438f175813629e0430f87f23c8659bc18dc42494820a": [
"http://bazel-mirror.storage.googleapis.com/nodejs.org/dist/v4.3.2/win-x64/node.lib",
"http://nodejs.org/dist/v4.3.2/win-x64/node.lib",
],
},
sha256_urls_extract = {
"4350d0431b49697517c6cca5d66adf5f74eb9101c52f52ae959fa94225822d44": [
"http://bazel-mirror.storage.googleapis.com/nodejs.org/dist/v4.3.2/node-v4.3.2-linux-x64.tar.xz",
"http://nodejs.org/dist/v4.3.2/node-v4.3.2-linux-x64.tar.xz",
],
},
strip_prefix = {
"node-v4.3.2-darwin-x64.tar.xz": "node-v4.3.2-darwin-x64",
"node-v4.3.2-linux-x64.tar.xz": "node-v4.3.2-linux-x64",
},
executable = [
"node",
"node.exe",
],
# POSTED: Email jart@google.com before changing this whitelist.
visibility = ["@com_microsoft_typescript//:__pkg__"],
)
filegroup_external(
name = "com_microsoft_typescript",
licenses = ["notice"], # Apache 2.0
sha256_urls = {
"92ae664a574c87a60ed0dc3aa08a28e366477ae40bc7ab23b512710d5c5b51cc": [
"http://bazel-mirror.storage.googleapis.com/raw.githubusercontent.com/Microsoft/TypeScript/v2.0.6/lib/tsc.js",
"https://raw.githubusercontent.com/Microsoft/TypeScript/v2.0.6/lib/tsc.js",
],
"f4de46e04293569a666f2045f850d90e16dc8ba059af02b5a062942245007a71": [
"http://bazel-mirror.storage.googleapis.com/raw.githubusercontent.com/Microsoft/TypeScript/v2.0.6/lib/lib.es6.d.ts",
"https://raw.githubusercontent.com/Microsoft/TypeScript/v2.0.6/lib/lib.es6.d.ts",
],
},
extra_build_file_content = "\n".join([
"sh_binary(",
" name = \"tsc\",",
" srcs = [\"tsc.sh\"],",
" data = [",
" \"tsc.js\",",
" \"@org_nodejs\",",
" ],",
")",
"",
"genrule(",
" name = \"tsc_sh\",",
" outs = [\"tsc.sh\"],",
" cmd = \"cat >$@ <<'EOF'\\n\" +",
" \"#!/bin/bash\\n\" +",
" \"NODE=external/org_nodejs/bin/node\\n\" +",
" \"if [[ -e external/org_nodejs/node.exe ]]; then\\n\" +",
" \" NODE=external/org_nodejs/node.exe\\n\" +",
" \"fi\\n\" +",
" \"exec $${NODE} external/com_microsoft_typescript/tsc.js \\\"$$@\\\"\\n\" +",
" \"EOF\",",
" executable = True,",
")",
]),
)
##############################################################################
# TensorBoard JavaScript Production Dependencies
filegroup_external(
name = "com_lodash",
licenses = ["notice"], # MIT
sha256_urls = {
"7c7b391810bc08cf815683431857c51b5ee190062ae4f557e1e4689d6dd910ea": [
"http://bazel-mirror.storage.googleapis.com/raw.githubusercontent.com/lodash/lodash/3.8.0/lodash.js",
"https://raw.githubusercontent.com/lodash/lodash/3.8.0/lodash.js",
],
},
)
filegroup_external(
name = "com_numericjs",
# no @license header
licenses = ["notice"], # MIT
sha256_urls = {
"dfaca3b8485bee735788cc6eebca82ea25719adc1fb8911c7799c6bd5a95df3b": [
"http://bazel-mirror.storage.googleapis.com/raw.githubusercontent.com/sloisel/numeric/v1.2.6/src/numeric.js",
"https://raw.githubusercontent.com/sloisel/numeric/v1.2.6/src/numeric.js",
],
},
)
filegroup_external(
name = "com_palantir_plottable",
# no @license header
licenses = ["notice"], # MIT
sha256_urls = {
"77510d7538dbd3b59f1c8a06f68131b38562e3be546364747618d5112723e818": [
"http://bazel-mirror.storage.googleapis.com/raw.githubusercontent.com/palantir/plottable/v1.16.1/plottable.css",
"https://raw.githubusercontent.com/palantir/plottable/v1.16.1/plottable.css",
],
"cd46dc709b01cd361e8399f797760871a6a207bc832e08fcff385ced02ef2b43": [
"http://bazel-mirror.storage.googleapis.com/raw.githubusercontent.com/palantir/plottable/v1.16.1/plottable.d.ts",
"https://raw.githubusercontent.com/palantir/plottable/v1.16.1/plottable.d.ts",
],
"32647b0fb4175fa875a71e6d56c761b88d975186ed6a8820e2c7854165a8988d": [
"http://bazel-mirror.storage.googleapis.com/raw.githubusercontent.com/palantir/plottable/v1.16.1/plottable.js",
"https://raw.githubusercontent.com/palantir/plottable/v1.16.1/plottable.js",
],
},
)
filegroup_external(
name = "io_github_cpettitt_dagre",
# no @license header
licenses = ["notice"], # MIT
sha256_urls = {
"7323829ddd77924a69e2b1235ded3eac30acd990da0f037e0fbd3c8e9035b50d": [
"http://bazel-mirror.storage.googleapis.com/raw.githubusercontent.com/cpettitt/dagre/v0.7.4/dist/dagre.core.js",
"https://raw.githubusercontent.com/cpettitt/dagre/v0.7.4/dist/dagre.core.js",
],
},
)
filegroup_external(
name = "io_github_cpettitt_graphlib",
# no @license header
licenses = ["notice"], # MIT
sha256_urls = {
"772045d412b1513b549be991c2e1846c38019429d43974efcae943fbe83489bf": [
"http://bazel-mirror.storage.googleapis.com/raw.githubusercontent.com/cpettitt/graphlib/v1.0.7/dist/graphlib.core.js",
"https://raw.githubusercontent.com/cpettitt/graphlib/v1.0.7/dist/graphlib.core.js",
],
},
)
filegroup_external(
name = "io_github_waylonflinn_weblas",
# no @license header
licenses = ["notice"], # MIT
sha256_urls = {
"f138fce57f673ca8a633f4aee5ae5b6fcb6ad0de59069a42a74e996fd04d8fcc": [
"http://bazel-mirror.storage.googleapis.com/raw.githubusercontent.com/waylonflinn/weblas/v0.9.0/dist/weblas.js",
"https://raw.githubusercontent.com/waylonflinn/weblas/v0.9.0/dist/weblas.js",
],
},
)
filegroup_external(
name = "org_d3js",
# no @license header
licenses = ["notice"], # BSD-3-Clause
sha256_urls = {
"bc1e38838f5c5c8e040132d41efee6bfddbef728210bd566479dc1694af1d3f5": [
"http://bazel-mirror.storage.googleapis.com/raw.githubusercontent.com/d3/d3/v3.5.15/d3.js",
"https://raw.githubusercontent.com/d3/d3/v3.5.15/d3.js",
],
},
)
filegroup_external(
name = "org_definitelytyped",
licenses = ["notice"], # MIT
sha256_urls = {
"b7da645f6e5555feb7aeede73775da0023ce2257df9c8e76c9159266035a9c0d": [
"http://bazel-mirror.storage.googleapis.com/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/ebc69904eb78f94030d5d517b42db20867f679c0/chai/chai.d.ts",
"https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/ebc69904eb78f94030d5d517b42db20867f679c0/chai/chai.d.ts",
],
"177293828c7a206bf2a7f725753d51396d38668311aa37c96445f91bbf8128a7": [
"http://bazel-mirror.storage.googleapis.com/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/6e2f2280ef16ef277049d0ce8583af167d586c59/d3/d3.d.ts",
"https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/6e2f2280ef16ef277049d0ce8583af167d586c59/d3/d3.d.ts",
],
"e4cd3d5de0eb3bc7b1063b50d336764a0ac82a658b39b5cf90511f489ffdee60": [
"http://bazel-mirror.storage.googleapis.com/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/efd40e67ff323f7147651bdbef03c03ead7b1675/lodash/lodash.d.ts",
"https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/efd40e67ff323f7147651bdbef03c03ead7b1675/lodash/lodash.d.ts",
],
"695a03dd2ccb238161d97160b239ab841562710e5c4e42886aefd4ace2ce152e": [
"http://bazel-mirror.storage.googleapis.com/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/ebc69904eb78f94030d5d517b42db20867f679c0/mocha/mocha.d.ts",
"https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/ebc69904eb78f94030d5d517b42db20867f679c0/mocha/mocha.d.ts",
],
},
)
filegroup_external(
name = "org_threejs",
# no @license header
licenses = ["notice"], # MIT
sha256_urls = {
"7aff264bd84c90bed3c72a4dc31db8c19151853c6df6980f52b01d3e9872c82d": [
"http://bazel-mirror.storage.googleapis.com/raw.githubusercontent.com/mrdoob/three.js/ad419d40bdaab80abbb34b8f359b4ee840033a02/build/three.js",
"https://raw.githubusercontent.com/mrdoob/three.js/ad419d40bdaab80abbb34b8f359b4ee840033a02/build/three.js",
],
"0e98ded15bb7fe398a655667e76b39909d36c0973a8950d01c62f65f93161c27": [
"http://bazel-mirror.storage.googleapis.com/raw.githubusercontent.com/mrdoob/three.js/ad419d40bdaab80abbb34b8f359b4ee840033a02/examples/js/controls/OrbitControls.js",
"https://raw.githubusercontent.com/mrdoob/three.js/ad419d40bdaab80abbb34b8f359b4ee840033a02/examples/js/controls/OrbitControls.js",
],
},
)
##############################################################################
# TensorBoard JavaScript Testing Dependencies
filegroup_external(
name = "com_chaijs",
# no @license header
licenses = ["notice"], # MIT
sha256_urls = {
"b926b325ad9843bf0b7a6d580ef78bb560e47c484b98680098d4fd9b31b77cd9": [
"http://bazel-mirror.storage.googleapis.com/raw.githubusercontent.com/chaijs/chai/2.3.0/chai.js",
"https://raw.githubusercontent.com/chaijs/chai/2.3.0/chai.js",
],
},
)
filegroup_external(
name = "org_mochajs",
# no @license header
licenses = ["notice"], # MIT
sha256_urls = {
"e36d865a17ffdf5868e55e736526ae30f3d4bc667c85a2a28cd5c850a82361e2": [
"http://bazel-mirror.storage.googleapis.com/raw.githubusercontent.com/mochajs/mocha/2.3.4/mocha.js",
"https://raw.githubusercontent.com/mochajs/mocha/2.3.4/mocha.js",
],
},
)
##############################################################################
# TensorBoard Polymer Dependencies
webfiles_external(
name = "org_polymer_font_roboto",
licenses = ["notice"], # BSD-3-Clause
sha256 = "fae51429b56a4a4c15f1f0c23b733c7095940cc9c04c275fa7adb3bf055b23b3",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/font-roboto/archive/v1.0.1.tar.gz",
"https://github.com/PolymerElements/font-roboto/archive/v1.0.1.tar.gz",
],
strip_prefix = "font-roboto-1.0.1",
path = "/font-roboto",
srcs = ["roboto.html"],
)
webfiles_external(
name = "org_polymer_iron_a11y_announcer",
licenses = ["notice"], # BSD-3-Clause
sha256 = "6bce143db7a374a68535ec8b861a5f30e81f2f1e4ee36a55bda2a891f6fd2818",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/iron-a11y-announcer/archive/v1.0.5.tar.gz",
"https://github.com/PolymerElements/iron-a11y-announcer/archive/v1.0.5.tar.gz",
],
strip_prefix = "iron-a11y-announcer-1.0.5",
path = "/iron-a11y-announcer",
srcs = ["iron-a11y-announcer.html"],
deps = ["@org_polymer"],
)
webfiles_external(
name = "org_polymer_iron_a11y_keys_behavior",
licenses = ["notice"], # BSD-3-Clause
sha256 = "6823efc47a83208fd51d39c5a1d3eb0c0bebc705df1ce01310509da22a13ebd2",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/iron-a11y-keys-behavior/archive/v1.1.8.tar.gz",
"https://github.com/PolymerElements/iron-a11y-keys-behavior/archive/v1.1.8.tar.gz",
],
strip_prefix = "iron-a11y-keys-behavior-1.1.8",
path = "/iron-a11y-keys-behavior",
srcs = ["iron-a11y-keys-behavior.html"],
deps = ["@org_polymer"],
)
webfiles_external(
name = "org_polymer_iron_ajax",
licenses = ["notice"], # BSD-3-Clause
sha256 = "9162d8af4611e911ac3ebbfc08bb7038ac04f6e79a9287b1476fe36ad6770bc5",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/iron-ajax/archive/v1.2.0.tar.gz",
"https://github.com/PolymerElements/iron-ajax/archive/v1.2.0.tar.gz",
],
strip_prefix = "iron-ajax-1.2.0",
path = "/iron-ajax",
srcs = [
"iron-ajax.html",
"iron-request.html",
],
deps = [
"@org_polymer",
"@org_polymer_promise_polyfill",
],
)
webfiles_external(
name = "org_polymer_iron_autogrow_textarea",
licenses = ["notice"], # BSD-3-Clause
sha256 = "50bbb901d2c8f87462e3552e3d671a552faa12c37c485e548d7a234ebffbc427",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/iron-autogrow-textarea/archive/v1.0.12.tar.gz",
"https://github.com/PolymerElements/iron-autogrow-textarea/archive/v1.0.12.tar.gz",
],
strip_prefix = "iron-autogrow-textarea-1.0.12",
path = "/iron-autogrow-textarea",
srcs = ["iron-autogrow-textarea.html"],
deps = [
"@org_polymer",
"@org_polymer_iron_behaviors",
"@org_polymer_iron_flex_layout",
"@org_polymer_iron_form_element_behavior",
"@org_polymer_iron_validatable_behavior",
],
)
webfiles_external(
name = "org_polymer_iron_behaviors",
licenses = ["notice"], # BSD-3-Clause
sha256 = "a1e8d4b7a13f3d36beba9c2a6b186ed33a53e6af2e79f98c1fcc7e85e7b53f89",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/iron-behaviors/archive/v1.0.17.tar.gz",
"https://github.com/PolymerElements/iron-behaviors/archive/v1.0.17.tar.gz",
],
strip_prefix = "iron-behaviors-1.0.17",
path = "/iron-behaviors",
srcs = [
"iron-button-state.html",
"iron-control-state.html",
],
deps = [
"@org_polymer",
"@org_polymer_iron_a11y_keys_behavior",
],
)
webfiles_external(
name = "org_polymer_iron_checked_element_behavior",
licenses = ["notice"], # BSD-3-Clause
sha256 = "539a0e1c4df0bc702d3bd342388e4e56c77ec4c2066cce69e41426a69f92e8bd",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/iron-checked-element-behavior/archive/v1.0.4.tar.gz",
"https://github.com/PolymerElements/iron-checked-element-behavior/archive/v1.0.4.tar.gz",
],
strip_prefix = "iron-checked-element-behavior-1.0.4",
path = "/iron-checked-element-behavior",
srcs = ["iron-checked-element-behavior.html"],
deps = [
"@org_polymer",
"@org_polymer_iron_form_element_behavior",
"@org_polymer_iron_validatable_behavior",
],
)
webfiles_external(
name = "org_polymer_iron_collapse",
licenses = ["notice"], # BSD-3-Clause
sha256 = "275808994a609a2f9923e2dd2db1957945ab141ba840eadc33f19e1f406d600e",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/iron-collapse/archive/v1.0.8.tar.gz",
"https://github.com/PolymerElements/iron-collapse/archive/v1.0.8.tar.gz",
],
strip_prefix = "iron-collapse-1.0.8",
path = "/iron-collapse",
srcs = ["iron-collapse.html"],
deps = [
"@org_polymer",
"@org_polymer_iron_resizable_behavior",
],
)
webfiles_external(
name = "org_polymer_iron_demo_helpers",
licenses = ["notice"], # BSD-3-Clause
sha256 = "aa7458492a6ac3d1f6344640a4c2ab07bce64e7ad0422b83b5d665707598cce6",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/iron-demo-helpers/archive/v1.1.0.tar.gz",
"https://github.com/PolymerElements/iron-demo-helpers/archive/v1.1.0.tar.gz",
],
strip_prefix = "iron-demo-helpers-1.1.0",
path = "/iron-demo-helpers",
srcs = [
"demo-pages-shared-styles.html",
"demo-snippet.html",
],
deps = [
"@org_polymer",
"@org_polymer_iron_flex_layout",
"@org_polymer_iron_icons",
"@org_polymer_marked_element",
"@org_polymer_paper_icon_button",
"@org_polymer_paper_styles",
"@org_polymer_prism_element",
],
)
webfiles_external(
name = "org_polymer_iron_dropdown",
licenses = ["notice"], # BSD-3-Clause
sha256 = "f7e4a31d096d10d8af1920397695cb17f3eb1cbe5e5ff91a861dabfcc085f376",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/iron-dropdown/archive/v1.4.0.tar.gz",
"https://github.com/PolymerElements/iron-dropdown/archive/v1.4.0.tar.gz",
],
strip_prefix = "iron-dropdown-1.4.0",
path = "/iron-dropdown",
srcs = [
"iron-dropdown.html",
"iron-dropdown-scroll-manager.html",
],
deps = [
"@org_polymer",
"@org_polymer_iron_a11y_keys_behavior",
"@org_polymer_iron_behaviors",
"@org_polymer_iron_overlay_behavior",
"@org_polymer_iron_resizable_behavior",
"@org_polymer_neon_animation",
],
)
webfiles_external(
name = "org_polymer_iron_fit_behavior",
licenses = ["notice"], # BSD-3-Clause
sha256 = "10132a2ea309a37c4c07b8fead71f64abc588ee6107931e34680f5f36dd8291e",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/iron-fit-behavior/archive/v1.2.5.tar.gz",
"https://github.com/PolymerElements/iron-fit-behavior/archive/v1.2.5.tar.gz",
],
strip_prefix = "iron-fit-behavior-1.2.5",
path = "/iron-fit-behavior",
srcs = ["iron-fit-behavior.html"],
deps = ["@org_polymer"],
)
webfiles_external(
name = "org_polymer_iron_flex_layout",
licenses = ["notice"], # BSD-3-Clause
sha256 = "79287f6ca1c2d4e003f68b88fe19d03a1b6a0011e2b4cae579fe4d1474163a2e",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/iron-flex-layout/archive/v1.3.0.tar.gz",
"https://github.com/PolymerElements/iron-flex-layout/archive/v1.3.0.tar.gz",
],
strip_prefix = "iron-flex-layout-1.3.0",
path = "/iron-flex-layout",
srcs = [
"classes/iron-flex-layout.html",
"classes/iron-shadow-flex-layout.html",
"iron-flex-layout.html",
"iron-flex-layout-classes.html",
],
deps = ["@org_polymer"],
)
webfiles_external(
name = "org_polymer_iron_form_element_behavior",
licenses = ["notice"], # BSD-3-Clause
sha256 = "1dd9371c638e5bc2ecba8a64074aa680dfb8712198e9612f9ed24d387efc8f26",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/iron-form-element-behavior/archive/v1.0.6.tar.gz",
"https://github.com/PolymerElements/iron-form-element-behavior/archive/v1.0.6.tar.gz",
],
strip_prefix = "iron-form-element-behavior-1.0.6",
path = "/iron-form-element-behavior",
srcs = ["iron-form-element-behavior.html"],
deps = ["@org_polymer"],
)
webfiles_external(
name = "org_polymer_iron_icon",
licenses = ["notice"], # BSD-3-Clause
sha256 = "9ed58a69159a02c07a6050d242e6d4e585a29f3245b8c8c390cfd52ddb786dc4",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/iron-icon/archive/v1.0.11.tar.gz",
"https://github.com/PolymerElements/iron-icon/archive/v1.0.11.tar.gz",
],
strip_prefix = "iron-icon-1.0.11",
path = "/iron-icon",
srcs = ["iron-icon.html"],
deps = [
"@org_polymer",
"@org_polymer_iron_flex_layout",
"@org_polymer_iron_meta",
],
)
webfiles_external(
name = "org_polymer_iron_icons",
licenses = ["notice"], # BSD-3-Clause
sha256 = "3b18542c147c7923dc3a36b1a51984a73255d610f297d43c9aaccc52859bd0d0",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/iron-icons/archive/v1.1.3.tar.gz",
"https://github.com/PolymerElements/iron-icons/archive/v1.1.3.tar.gz",
],
strip_prefix = "iron-icons-1.1.3",
path = "/iron-icons",
srcs = [
"av-icons.html",
"communication-icons.html",
"device-icons.html",
"editor-icons.html",
"hardware-icons.html",
"image-icons.html",
"iron-icons.html",
"maps-icons.html",
"notification-icons.html",
"places-icons.html",
"social-icons.html",
],
deps = [
"@org_polymer_iron_icon",
"@org_polymer_iron_iconset_svg",
],
)
webfiles_external(
name = "org_polymer_iron_iconset_svg",
licenses = ["notice"], # BSD-3-Clause
sha256 = "7e3925b7e63a7d22524c4b43ce16ab80d06a576649644783643c11a003284368",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/iron-iconset-svg/archive/v1.1.0.tar.gz",
"https://github.com/PolymerElements/iron-iconset-svg/archive/v1.1.0.tar.gz",
],
strip_prefix = "iron-iconset-svg-1.1.0",
path = "/iron-iconset-svg",
srcs = ["iron-iconset-svg.html"],
deps = [
"@org_polymer",
"@org_polymer_iron_meta",
],
)
webfiles_external(
name = "org_polymer_iron_input",
licenses = ["notice"], # BSD-3-Clause
sha256 = "c505101ead08ab25526b1f49baecc8c28b4221b92a65e7334c783bdc81553c36",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/iron-input/archive/1.0.10.tar.gz",
"https://github.com/PolymerElements/iron-input/archive/1.0.10.tar.gz",
],
strip_prefix = "iron-input-1.0.10",
path = "/iron-input",
srcs = ["iron-input.html"],
deps = [
"@org_polymer",
"@org_polymer_iron_a11y_announcer",
"@org_polymer_iron_validatable_behavior",
],
)
webfiles_external(
name = "org_polymer_iron_list",
licenses = ["notice"], # BSD-3-Clause
sha256 = "72a6530b9f0ad5557f5d287845792a0ada74d8b159198e27f940e226313dc116",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/iron-list/archive/v1.3.9.tar.gz",
"https://github.com/PolymerElements/iron-list/archive/v1.3.9.tar.gz",
],
strip_prefix = "iron-list-1.3.9",
path = "/iron-list",
srcs = ["iron-list.html"],
deps = [
"@org_polymer",
"@org_polymer_iron_a11y_keys_behavior",
"@org_polymer_iron_resizable_behavior",
"@org_polymer_iron_scroll_target_behavior",
],
)
webfiles_external(
name = "org_polymer_iron_menu_behavior",
licenses = ["notice"], # BSD-3-Clause
sha256 = "ad27889343bc9a709258b073f69abc028bb1ffd3fdb975cd2d3939f7f5d7bb6c",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/iron-menu-behavior/archive/v1.1.10.tar.gz",
"https://github.com/PolymerElements/iron-menu-behavior/archive/v1.1.10.tar.gz",
],
strip_prefix = "iron-menu-behavior-1.1.10",
path = "/iron-menu-behavior",
srcs = [
"iron-menu-behavior.html",
"iron-menubar-behavior.html",
],
deps = [
"@org_polymer",
"@org_polymer_iron_a11y_keys_behavior",
"@org_polymer_iron_selector",
],
)
webfiles_external(
name = "org_polymer_iron_meta",
licenses = ["notice"], # BSD-3-Clause
sha256 = "fb05e6031bae6b4effe5f15d44b3f548d5807f9e3b3aa2442ba17cf4b8b84361",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/iron-meta/archive/v1.1.1.tar.gz",
"https://github.com/PolymerElements/iron-meta/archive/v1.1.1.tar.gz",
],
strip_prefix = "iron-meta-1.1.1",
path = "/iron-meta",
srcs = ["iron-meta.html"],
deps = ["@org_polymer"],
)
webfiles_external(
name = "org_polymer_iron_overlay_behavior",
licenses = ["notice"], # BSD-3-Clause
sha256 = "3df5b54ff2e0510c87a2aff8c9d730d3fe83d3d11277cc1a49fa29b549acb46c",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/iron-overlay-behavior/archive/v1.10.1.tar.gz",
"https://github.com/PolymerElements/iron-overlay-behavior/archive/v1.10.1.tar.gz",
],
strip_prefix = "iron-overlay-behavior-1.10.1",
path = "/iron-overlay-behavior",
srcs = [
"iron-focusables-helper.html",
"iron-overlay-backdrop.html",
"iron-overlay-behavior.html",
"iron-overlay-manager.html",
],
deps = [
"@org_polymer",
"@org_polymer_iron_a11y_keys_behavior",
"@org_polymer_iron_fit_behavior",
"@org_polymer_iron_resizable_behavior",
],
)
webfiles_external(
name = "org_polymer_iron_range_behavior",
licenses = ["notice"], # BSD-3-Clause
sha256 = "b2f2b6d52284542330bd30b586e217926eb0adec5e13934a3cef557717c22dc2",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/iron-range-behavior/archive/v1.0.4.tar.gz",
"https://github.com/PolymerElements/iron-range-behavior/archive/v1.0.4.tar.gz",
],
strip_prefix = "iron-range-behavior-1.0.4",
path = "/iron-range-behavior",
srcs = ["iron-range-behavior.html"],
deps = ["@org_polymer"],
)
webfiles_external(
name = "org_polymer_iron_resizable_behavior",
licenses = ["notice"], # BSD-3-Clause
sha256 = "a87a78ee9223c2f6afae7fc94a3ff91cbce6f7e2a7ed3f2979af7945c9281616",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/iron-resizable-behavior/archive/v1.0.3.tar.gz",
"https://github.com/PolymerElements/iron-resizable-behavior/archive/v1.0.3.tar.gz",
],
strip_prefix = "iron-resizable-behavior-1.0.3",
path = "/iron-resizable-behavior",
srcs = ["iron-resizable-behavior.html"],
deps = ["@org_polymer"],
)
webfiles_external(
name = "org_polymer_iron_scroll_target_behavior",
licenses = ["notice"], # BSD-3-Clause
sha256 = "d0de0c804b1ec91d814754144afd9da1cdb082690de88bd5e47fd5f41990746f",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/iron-scroll-target-behavior/archive/v1.0.3.tar.gz",
"https://github.com/PolymerElements/iron-scroll-target-behavior/archive/v1.0.3.tar.gz",
],
strip_prefix = "iron-scroll-target-behavior-1.0.3",
path = "/iron-scroll-target-behavior",
srcs = ["iron-scroll-target-behavior.html"],
deps = ["@org_polymer"],
)
webfiles_external(
name = "org_polymer_iron_selector",
licenses = ["notice"], # BSD-3-Clause
sha256 = "ba28a47443bad3b744611c9d7a79fb21dbdf2e35edc5ef8f812e2dcd72b16747",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/iron-selector/archive/v1.5.2.tar.gz",
"https://github.com/PolymerElements/iron-selector/archive/v1.5.2.tar.gz",
],
strip_prefix = "iron-selector-1.5.2",
path = "/iron-selector",
srcs = [
"iron-multi-selectable.html",
"iron-selectable.html",
"iron-selection.html",
"iron-selector.html",
],
deps = ["@org_polymer"],
)
webfiles_external(
name = "org_polymer_iron_validatable_behavior",
licenses = ["notice"], # BSD-3-Clause
sha256 = "aef4901e68043824f36104799269573dd345ffaac494186e466fdc79c06fdb63",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/iron-validatable-behavior/archive/v1.1.1.tar.gz",
"https://github.com/PolymerElements/iron-validatable-behavior/archive/v1.1.1.tar.gz",
],
strip_prefix = "iron-validatable-behavior-1.1.1",
path = "/iron-validatable-behavior",
srcs = ["iron-validatable-behavior.html"],
deps = [
"@org_polymer",
"@org_polymer_iron_meta",
],
)
webfiles_external(
name = "org_polymer_marked",
licenses = ["notice"], # MIT
sha256 = "93d30bd593736ca440938d77808b7ef5972da0f3fcfe4ae63ae7b4ce117da2cb",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/chjj/marked/archive/v0.3.2.zip",
"https://github.com/chjj/marked/archive/v0.3.2.zip",
],
strip_prefix = "marked-0.3.2",
path = "/marked",
srcs = ["lib/marked.js"],
)
webfiles_external(
name = "org_polymer_marked_element",
licenses = ["notice"], # BSD-3-Clause
sha256 = "7547616df95f8b903757e6afbabfcdba5322c2bcec3f17c726b8bba5adf4bc5f",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/marked-element/archive/v1.1.3.tar.gz",
"https://github.com/PolymerElements/marked-element/archive/v1.1.3.tar.gz",
],
strip_prefix = "marked-element-1.1.3",
path = "/marked-element",
srcs = [
"marked-element.html",
"marked-import.html",
],
deps = [
"@org_polymer",
"@org_polymer_marked",
],
)
webfiles_external(
name = "org_polymer_neon_animation",
licenses = ["notice"], # BSD-3-Clause
sha256 = "8800c314a76b2da190a2b203259c1091f6d38e0057ed37c2a3d0b734980fa9a5",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/neon-animation/archive/v1.2.2.tar.gz",
"https://github.com/PolymerElements/neon-animation/archive/v1.2.2.tar.gz",
],
strip_prefix = "neon-animation-1.2.2",
path = "/neon-animation",
srcs = [
"animations/cascaded-animation.html",
"animations/fade-in-animation.html",
"animations/fade-out-animation.html",
"animations/hero-animation.html",
"animations/opaque-animation.html",
"animations/reverse-ripple-animation.html",
"animations/ripple-animation.html",
"animations/scale-down-animation.html",
"animations/scale-up-animation.html",
"animations/slide-down-animation.html",
"animations/slide-from-bottom-animation.html",
"animations/slide-from-left-animation.html",
"animations/slide-from-right-animation.html",
"animations/slide-from-top-animation.html",
"animations/slide-left-animation.html",
"animations/slide-right-animation.html",
"animations/slide-up-animation.html",
"animations/transform-animation.html",
"neon-animatable.html",
"neon-animatable-behavior.html",
"neon-animated-pages.html",
"neon-animation.html",
"neon-animation-behavior.html",
"neon-animation-runner-behavior.html",
"neon-animations.html",
"neon-shared-element-animatable-behavior.html",
"neon-shared-element-animation-behavior.html",
"web-animations.html",
],
deps = [
"@org_polymer",
"@org_polymer_iron_meta",
"@org_polymer_iron_resizable_behavior",
"@org_polymer_iron_selector",
"@org_polymer_web_animations_js",
],
)
webfiles_external(
name = "org_polymer_paper_behaviors",
licenses = ["notice"], # BSD-3-Clause
sha256 = "7cfcb9082ef9909da262df6b5c120bc62dbeaff278cb563e8fc60465ddd387e5",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/paper-behaviors/archive/v1.0.12.tar.gz",
"https://github.com/PolymerElements/paper-behaviors/archive/v1.0.12.tar.gz",
],
strip_prefix = "paper-behaviors-1.0.12",
path = "/paper-behaviors",
srcs = [
"paper-button-behavior.html",
"paper-checked-element-behavior.html",
"paper-inky-focus-behavior.html",
"paper-ripple-behavior.html",
],
deps = [
"@org_polymer",
"@org_polymer_iron_behaviors",
"@org_polymer_iron_checked_element_behavior",
"@org_polymer_paper_ripple",
],
)
webfiles_external(
name = "org_polymer_paper_button",
licenses = ["notice"], # BSD-3-Clause
sha256 = "896c0a7e34bfcce63fc23c63e105ed9c4d62fa3a6385b7161e1e5cd4058820a6",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/paper-button/archive/v1.0.11.tar.gz",
"https://github.com/PolymerElements/paper-button/archive/v1.0.11.tar.gz",
],
strip_prefix = "paper-button-1.0.11",
path = "/paper-button",
srcs = ["paper-button.html"],
deps = [
"@org_polymer",
"@org_polymer_iron_flex_layout",
"@org_polymer_paper_behaviors",
"@org_polymer_paper_material",
"@org_polymer_paper_ripple",
],
)
webfiles_external(
name = "org_polymer_paper_checkbox",
licenses = ["notice"], # BSD-3-Clause
sha256 = "6828a6954a048b1230fbd2606faffbae950ba1d042175b96ec50ae355786a166",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/paper-checkbox/archive/v1.4.0.tar.gz",
"https://github.com/PolymerElements/paper-checkbox/archive/v1.4.0.tar.gz",
],
strip_prefix = "paper-checkbox-1.4.0",
path = "/paper-checkbox",
srcs = ["paper-checkbox.html"],
deps = [
"@org_polymer",
"@org_polymer_paper_behaviors",
"@org_polymer_paper_styles",
],
)
webfiles_external(
name = "org_polymer_paper_dialog",
licenses = ["notice"], # BSD-3-Clause
sha256 = "c6a9709e7f528d03dcd574503c18b72d4751ca30017346d16e6a791d37ed9259",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/paper-dialog/archive/v1.0.4.tar.gz",
"https://github.com/PolymerElements/paper-dialog/archive/v1.0.4.tar.gz",
],
strip_prefix = "paper-dialog-1.0.4",
path = "/paper-dialog",
srcs = ["paper-dialog.html"],
deps = [
"@org_polymer",
"@org_polymer_neon_animation",
"@org_polymer_paper_dialog_behavior",
],
)
webfiles_external(
name = "org_polymer_paper_dialog_behavior",
licenses = ["notice"], # BSD-3-Clause
sha256 = "a7e0e27ce63554bc14f384cf94bcfa24da8dc5f5120dfd565f45e166261aee40",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/paper-dialog-behavior/archive/v1.2.5.tar.gz",
"https://github.com/PolymerElements/paper-dialog-behavior/archive/v1.2.5.tar.gz",
],
strip_prefix = "paper-dialog-behavior-1.2.5",
path = "/paper-dialog-behavior",
srcs = [
"paper-dialog-behavior.html",
"paper-dialog-common.css",
"paper-dialog-shared-styles.html",
],
suppress = ["cssSyntax"],
deps = [
"@org_polymer",
"@org_polymer_iron_flex_layout",
"@org_polymer_iron_overlay_behavior",
"@org_polymer_paper_styles",
],
)
webfiles_external(
name = "org_polymer_paper_dialog_scrollable",
licenses = ["notice"], # BSD-3-Clause
sha256 = "a2e69283e7674f782c44d811387a0f8da2d01fac0172743d1add65e253e6b5ff",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/paper-dialog-scrollable/archive/1.1.5.tar.gz",
"https://github.com/PolymerElements/paper-dialog-scrollable/archive/1.1.5.tar.gz",
],
strip_prefix = "paper-dialog-scrollable-1.1.5",
path = "/paper-dialog-scrollable",
srcs = ["paper-dialog-scrollable.html"],
deps = [
"@org_polymer",
"@org_polymer_iron_flex_layout",
"@org_polymer_paper_dialog_behavior",
"@org_polymer_paper_styles",
],
)
webfiles_external(
name = "org_polymer_paper_dropdown_menu",
licenses = ["notice"], # BSD-3-Clause
sha256 = "9d88f654ec03ee9be211df9e69bede9e8a22b51bf1dbcc63b79762e4256d81ad",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/paper-dropdown-menu/archive/v1.4.0.tar.gz",
"https://github.com/PolymerElements/paper-dropdown-menu/archive/v1.4.0.tar.gz",
],
strip_prefix = "paper-dropdown-menu-1.4.0",
path = "/paper-dropdown-menu",
srcs = [
"paper-dropdown-menu.html",
"paper-dropdown-menu-icons.html",
"paper-dropdown-menu-light.html",
"paper-dropdown-menu-shared-styles.html",
],
deps = [
"@org_polymer",
"@org_polymer_iron_a11y_keys_behavior",
"@org_polymer_iron_behaviors",
"@org_polymer_iron_form_element_behavior",
"@org_polymer_iron_icon",
"@org_polymer_iron_iconset_svg",
"@org_polymer_iron_validatable_behavior",
"@org_polymer_paper_behaviors",
"@org_polymer_paper_input",
"@org_polymer_paper_menu_button",
"@org_polymer_paper_ripple",
"@org_polymer_paper_styles",
],
)
webfiles_external(
name = "org_polymer_paper_header_panel",
licenses = ["notice"], # BSD-3-Clause
sha256 = "0db4bd8a4bf6f20dcd0dffb4f907b31c93a8647c9c021344239cf30b40b87075",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/paper-header-panel/archive/v1.1.4.tar.gz",
"https://github.com/PolymerElements/paper-header-panel/archive/v1.1.4.tar.gz",
],
strip_prefix = "paper-header-panel-1.1.4",
path = "/paper-header-panel",
srcs = ["paper-header-panel.html"],
deps = [
"@org_polymer",
"@org_polymer_iron_flex_layout",
],
)
webfiles_external(
name = "org_polymer_paper_icon_button",
licenses = ["notice"], # BSD-3-Clause
sha256 = "9cba5bcfd6aeb4c41581c1392c678cf2278d360e9d122f4d9db54a9ebb404496",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/paper-icon-button/archive/v1.1.3.tar.gz",
"https://github.com/PolymerElements/paper-icon-button/archive/v1.1.3.tar.gz",
],
strip_prefix = "paper-icon-button-1.1.3",
path = "/paper-icon-button",
srcs = [
"paper-icon-button.html",
"paper-icon-button-light.html",
],
deps = [
"@org_polymer",
"@org_polymer_iron_icon",
"@org_polymer_paper_behaviors",
"@org_polymer_paper_styles",
],
)
webfiles_external(
name = "org_polymer_paper_input",
licenses = ["notice"], # BSD-3-Clause
sha256 = "17c3dea9bb1c2026cc61324696c6c774214a0dc37686b91ca214a6af550994db",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/paper-input/archive/v1.1.18.tar.gz",
"https://github.com/PolymerElements/paper-input/archive/v1.1.18.tar.gz",
],
strip_prefix = "paper-input-1.1.18",
path = "/paper-input",
srcs = [
"paper-input.html",
"paper-input-addon-behavior.html",
"paper-input-behavior.html",
"paper-input-char-counter.html",
"paper-input-container.html",
"paper-input-error.html",
"paper-textarea.html",
],
deps = [
"@org_polymer",
"@org_polymer_iron_a11y_keys_behavior",
"@org_polymer_iron_autogrow_textarea",
"@org_polymer_iron_behaviors",
"@org_polymer_iron_flex_layout",
"@org_polymer_iron_form_element_behavior",
"@org_polymer_iron_input",
"@org_polymer_paper_styles",
],
)
webfiles_external(
name = "org_polymer_paper_item",
licenses = ["notice"], # BSD-3-Clause
sha256 = "12ee0dcb61b0d5721c5988571f6974d7b2211e97724f4195893fbcc9058cdac8",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/paper-item/archive/v1.1.4.tar.gz",
"https://github.com/PolymerElements/paper-item/archive/v1.1.4.tar.gz",
],
strip_prefix = "paper-item-1.1.4",
path = "/paper-item",
srcs = [
"paper-icon-item.html",
"paper-item.html",
"paper-item-behavior.html",
"paper-item-body.html",
"paper-item-shared-styles.html",
],
deps = [
"@org_polymer",
"@org_polymer_iron_behaviors",
"@org_polymer_iron_flex_layout",
"@org_polymer_paper_styles",
],
)
webfiles_external(
name = "org_polymer_paper_listbox",
licenses = ["notice"], # BSD-3-Clause
sha256 = "3cb35f4fe9a3f15185a9e91711dba8f27e9291c8cd371ebf1be21b8f1d5f65fb",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/paper-listbox/archive/v1.1.2.tar.gz",
"https://github.com/PolymerElements/paper-listbox/archive/v1.1.2.tar.gz",
],
strip_prefix = "paper-listbox-1.1.2",
path = "/paper-listbox",
srcs = ["paper-listbox.html"],
deps = [
"@org_polymer",
"@org_polymer_iron_menu_behavior",
"@org_polymer_paper_styles",
],
)
webfiles_external(
name = "org_polymer_paper_material",
licenses = ["notice"], # BSD-3-Clause
sha256 = "09f6c8bd6ddbea2be541dc86306efe41cdfb31bec0b69d35a5dc29772bbc8506",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/paper-material/archive/v1.0.6.tar.gz",
"https://github.com/PolymerElements/paper-material/archive/v1.0.6.tar.gz",
],
strip_prefix = "paper-material-1.0.6",
path = "/paper-material",
srcs = [
"paper-material.html",
"paper-material-shared-styles.html",
],
deps = [
"@org_polymer",
"@org_polymer_paper_styles",
],
)
webfiles_external(
name = "org_polymer_paper_menu",
licenses = ["notice"], # BSD-3-Clause
sha256 = "a3cee220926e315f7412236b3628288774694447c0da4428345f36d0f127ba3b",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/paper-menu/archive/v1.2.2.tar.gz",
"https://github.com/PolymerElements/paper-menu/archive/v1.2.2.tar.gz",
],
strip_prefix = "paper-menu-1.2.2",
path = "/paper-menu",
srcs = [
"paper-menu.html",
"paper-menu-shared-styles.html",
"paper-submenu.html",
],
deps = [
"@org_polymer",
"@org_polymer_iron_behaviors",
"@org_polymer_iron_collapse",
"@org_polymer_iron_flex_layout",
"@org_polymer_iron_menu_behavior",
"@org_polymer_paper_styles",
],
)
webfiles_external(
name = "org_polymer_paper_menu_button",
licenses = ["notice"], # BSD-3-Clause
sha256 = "be3290c288a2bd4f9887213db22c75add99cc29ff4d088100c0bc4eb0e57997b",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/paper-menu-button/archive/v1.5.1.tar.gz",
"https://github.com/PolymerElements/paper-menu-button/archive/v1.5.1.tar.gz",
],
strip_prefix = "paper-menu-button-1.5.1",
path = "/paper-menu-button",
srcs = [
"paper-menu-button.html",
"paper-menu-button-animations.html",
],
deps = [
"@org_polymer",
"@org_polymer_iron_a11y_keys_behavior",
"@org_polymer_iron_behaviors",
"@org_polymer_iron_dropdown",
"@org_polymer_neon_animation",
"@org_polymer_paper_styles",
],
)
webfiles_external(
name = "org_polymer_paper_progress",
licenses = ["notice"], # BSD-3-Clause
sha256 = "2b6776b2f023c1f344feea17ba29b58d879e46f8ed43b7256495054b5183fff6",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/paper-progress/archive/v1.0.9.tar.gz",
"https://github.com/PolymerElements/paper-progress/archive/v1.0.9.tar.gz",
],
strip_prefix = "paper-progress-1.0.9",
path = "/paper-progress",
srcs = ["paper-progress.html"],
deps = [
"@org_polymer",
"@org_polymer_iron_flex_layout",
"@org_polymer_iron_range_behavior",
"@org_polymer_paper_styles",
],
)
webfiles_external(
name = "org_polymer_paper_radio_button",
licenses = ["notice"], # BSD-3-Clause
sha256 = "6e911d0c308aa388136b3af79d1bdcbe5a1f4159cbc79d71efb4ff3b6c0b4e91",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/paper-radio-button/archive/v1.1.2.tar.gz",
"https://github.com/PolymerElements/paper-radio-button/archive/v1.1.2.tar.gz",
],
strip_prefix = "paper-radio-button-1.1.2",
path = "/paper-radio-button",
srcs = ["paper-radio-button.html"],
deps = [
"@org_polymer",
"@org_polymer_paper_behaviors",
"@org_polymer_paper_styles",
],
)
webfiles_external(
name = "org_polymer_paper_radio_group",
licenses = ["notice"], # BSD-3-Clause
sha256 = "7885ad1f81e9dcc03dcea4139b54a201ff55c18543770cd44f94530046c9e163",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/paper-radio-group/archive/v1.0.9.tar.gz",
"https://github.com/PolymerElements/paper-radio-group/archive/v1.0.9.tar.gz",
],
strip_prefix = "paper-radio-group-1.0.9",
path = "/paper-radio-group",
srcs = ["paper-radio-group.html"],
deps = [
"@org_polymer",
"@org_polymer_iron_a11y_keys_behavior",
"@org_polymer_iron_selector",
"@org_polymer_paper_radio_button",
],
)
webfiles_external(
name = "org_polymer_paper_ripple",
licenses = ["notice"], # BSD-3-Clause
sha256 = "ba76bfb1c737260a8a103d3ca97faa1f7c3288c7db9b2519f401b7a782147c09",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/paper-ripple/archive/v1.0.5.tar.gz",
"https://github.com/PolymerElements/paper-ripple/archive/v1.0.5.tar.gz",
],
strip_prefix = "paper-ripple-1.0.5",
path = "/paper-ripple",
srcs = ["paper-ripple.html"],
deps = [
"@org_polymer",
"@org_polymer_iron_a11y_keys_behavior",
],
)
webfiles_external(
name = "org_polymer_paper_slider",
licenses = ["notice"], # BSD-3-Clause
sha256 = "08e7c541dbf5d2e959208810bfc03188e82ced87e4d30d325172967f67962c3c",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/paper-slider/archive/v1.0.10.tar.gz",
"https://github.com/PolymerElements/paper-slider/archive/v1.0.10.tar.gz",
],
strip_prefix = "paper-slider-1.0.10",
path = "/paper-slider",
srcs = ["paper-slider.html"],
deps = [
"@org_polymer",
"@org_polymer_iron_a11y_keys_behavior",
"@org_polymer_iron_flex_layout",
"@org_polymer_iron_form_element_behavior",
"@org_polymer_iron_range_behavior",
"@org_polymer_paper_behaviors",
"@org_polymer_paper_input",
"@org_polymer_paper_progress",
"@org_polymer_paper_styles",
],
)
webfiles_external(
name = "org_polymer_paper_spinner",
licenses = ["notice"], # BSD-3-Clause
sha256 = "6a752907fab7899cbeed15b478e7b9299047c15fbf9d1561d6eb4d204bdbd178",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/paper-spinner/archive/v1.1.1.tar.gz",
"https://github.com/PolymerElements/paper-spinner/archive/v1.1.1.tar.gz",
],
strip_prefix = "paper-spinner-1.1.1",
path = "/paper-spinner",
srcs = [
"paper-spinner.html",
"paper-spinner-behavior.html",
"paper-spinner-lite.html",
"paper-spinner-styles.html"
],
deps = [
"@org_polymer",
"@org_polymer_iron_flex_layout",
"@org_polymer_paper_styles",
],
)
webfiles_external(
name = "org_polymer_paper_styles",
licenses = ["notice"], # BSD-3-Clause
sha256 = "6d26b0a4c286402098853dc7388f6b22f30dfb7a74e47b34992ac03380144bb2",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/paper-styles/archive/v1.1.4.tar.gz",
"https://github.com/PolymerElements/paper-styles/archive/v1.1.4.tar.gz",
],
strip_prefix = "paper-styles-1.1.4",
path = "/paper-styles",
srcs = [
"classes/global.html",
"classes/shadow.html",
"classes/shadow-layout.html",
"classes/typography.html",
"color.html",
"default-theme.html",
"demo.css",
"demo-pages.html",
"paper-styles.html",
"paper-styles-classes.html",
"shadow.html",
"typography.html",
],
deps = [
"@org_polymer",
"@org_polymer_font_roboto",
"@org_polymer_iron_flex_layout",
],
)
webfiles_external(
name = "org_polymer_paper_tabs",
licenses = ["notice"], # BSD-3-Clause
sha256 = "c23b6a5221db35e5b1ed3eb8e8696b952572563e285adaec96aba1e3134db825",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/paper-tabs/archive/v1.7.0.tar.gz",
"https://github.com/PolymerElements/paper-tabs/archive/v1.7.0.tar.gz",
],
strip_prefix = "paper-tabs-1.7.0",
path = "/paper-tabs",
srcs = [
"paper-tab.html",
"paper-tabs.html",
"paper-tabs-icons.html",
],
deps = [
"@org_polymer",
"@org_polymer_iron_behaviors",
"@org_polymer_iron_flex_layout",
"@org_polymer_iron_icon",
"@org_polymer_iron_iconset_svg",
"@org_polymer_iron_menu_behavior",
"@org_polymer_iron_resizable_behavior",
"@org_polymer_paper_behaviors",
"@org_polymer_paper_icon_button",
"@org_polymer_paper_styles",
],
)
webfiles_external(
name = "org_polymer_paper_toast",
licenses = ["notice"], # BSD-3-Clause
sha256 = "55f623712ed1f2bae6d6fadc522a2458e083ccd44cc0a907672547e7b10758a9",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/paper-toast/archive/v1.3.0.tar.gz",
"https://github.com/PolymerElements/paper-toast/archive/v1.3.0.tar.gz",
],
strip_prefix = "paper-toast-1.3.0",
path = "/paper-toast",
srcs = ["paper-toast.html"],
deps = [
"@org_polymer",
"@org_polymer_iron_a11y_announcer",
"@org_polymer_iron_overlay_behavior",
],
)
webfiles_external(
name = "org_polymer_paper_toggle_button",
licenses = ["notice"], # BSD-3-Clause
sha256 = "4aa7cf0396fa2994a8bc2ac6e8428f48b07b945bb7c41bd52041ef5827b45de3",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/paper-toggle-button/archive/v1.2.0.tar.gz",
"https://github.com/PolymerElements/paper-toggle-button/archive/v1.2.0.tar.gz",
],
strip_prefix = "paper-toggle-button-1.2.0",
path = "/paper-toggle-button",
srcs = ["paper-toggle-button.html"],
deps = [
"@org_polymer",
"@org_polymer_iron_flex_layout",
"@org_polymer_paper_behaviors",
"@org_polymer_paper_styles",
],
)
webfiles_external(
name = "org_polymer_paper_toolbar",
licenses = ["notice"], # BSD-3-Clause
sha256 = "dbddffc0654d9fb5fb48843087eebe16bf7a134902495a664c96c11bf8a2c63d",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/paper-toolbar/archive/v1.1.4.tar.gz",
"https://github.com/PolymerElements/paper-toolbar/archive/v1.1.4.tar.gz",
],
strip_prefix = "paper-toolbar-1.1.4",
path = "/paper-toolbar",
srcs = ["paper-toolbar.html"],
deps = [
"@org_polymer",
"@org_polymer_iron_flex_layout",
"@org_polymer_paper_styles",
],
)
webfiles_external(
name = "org_polymer_paper_tooltip",
licenses = ["notice"], # BSD-3-Clause
sha256 = "4c6667acf01f73da14c3cbc0aa574bf14280304567987ee0314534328377d2ad",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/paper-tooltip/archive/v1.1.2.tar.gz",
"https://github.com/PolymerElements/paper-tooltip/archive/v1.1.2.tar.gz",
],
strip_prefix = "paper-tooltip-1.1.2",
path = "/paper-tooltip",
srcs = ["paper-tooltip.html"],
deps = [
"@org_polymer",
"@org_polymer_neon_animation",
],
)
webfiles_external(
name = "org_polymer",
licenses = ["notice"], # BSD-3-Clause
sha256 = "07a9e62ffb52193da3af09adda2fbac5cc690439978520e2d03e783863f65f91",
strip_prefix = "polymer-1.7.0",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/polymer/polymer/archive/v1.7.0.tar.gz",
"https://github.com/polymer/polymer/archive/v1.7.0.tar.gz",
],
path = "/polymer",
srcs = [
"polymer.html",
"polymer-micro.html",
"polymer-mini.html",
],
)
webfiles_external(
name = "org_polymer_prism",
licenses = ["notice"], # MIT
sha256 = "e06eb54f2a80e6b3cd0bd4d59f900423bcaee53fc03998a056df63740c684683",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PrismJS/prism/archive/abee2b7587f1925e57777044270e2a1860810994.tar.gz",
"https://github.com/PrismJS/prism/archive/abee2b7587f1925e57777044270e2a1860810994.tar.gz",
],
strip_prefix = "prism-abee2b7587f1925e57777044270e2a1860810994",
path = "/prism",
srcs = [
"prism.js",
"themes/prism.css",
],
)
webfiles_external(
name = "org_polymer_prism_element",
licenses = ["notice"], # BSD-3-Clause
sha256 = "ad70bf9cd5bbdf525d465e1b0658867ab4022193eb9c74087a839044b46312b4",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerElements/prism-element/archive/1.0.4.tar.gz",
"https://github.com/PolymerElements/prism-element/archive/1.0.4.tar.gz",
],
strip_prefix = "prism-element-1.0.4",
path = "/prism-element",
srcs = [
"prism-highlighter.html",
"prism-import.html",
],
deps = [
"@org_polymer",
"@org_polymer_prism",
],
)
webfiles_external(
name = "org_polymer_promise_polyfill",
licenses = ["notice"], # BSD-3-Clause
sha256 = "4495450e5d884c3e16b537b43afead7f84d17c7dc061bcfcbf440eac083e4ef5",
strip_prefix = "promise-polyfill-1.0.0",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/PolymerLabs/promise-polyfill/archive/v1.0.0.tar.gz",
"https://github.com/PolymerLabs/promise-polyfill/archive/v1.0.0.tar.gz",
],
path = "/promise-polyfill",
srcs = [
"Promise.js",
"Promise-Statics.js",
"promise-polyfill.html",
"promise-polyfill-lite.html"
],
deps = ["@org_polymer"],
)
webfiles_external(
name = "org_polymer_web_animations_js",
licenses = ["notice"], # BSD-3-Clause
sha256 = "f8bd760cbdeba131f6790bd5abe170bcbf7b1755ff58ed16d0b82fa8a7f34a7f",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/web-animations/web-animations-js/archive/2.2.1.tar.gz",
"https://github.com/web-animations/web-animations-js/archive/2.2.1.tar.gz",
],
strip_prefix = "web-animations-js-2.2.1",
path = "/web-animations-js",
srcs = ["web-animations-next-lite.min.js"],
)
webfiles_external(
name = "org_polymer_webcomponentsjs",
licenses = ["notice"], # BSD-3-Clause
sha256 = "138c43306ee0a6d699ddca9b3c6b0f4982974ea8b7bdad291ea7276c72301df9",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/webcomponents/webcomponentsjs/archive/v0.7.22.tar.gz",
"https://github.com/webcomponents/webcomponentsjs/archive/v0.7.22.tar.gz",
],
strip_prefix = "webcomponentsjs-0.7.22",
path = "/webcomponentsjs",
srcs = [
"CustomElements.js",
"CustomElements.min.js",
"HTMLImports.js",
"HTMLImports.min.js",
"MutationObserver.js",
"MutationObserver.min.js",
"ShadowDOM.js",
"ShadowDOM.min.js",
"webcomponents.js",
"webcomponents.min.js",
"webcomponents-lite.js",
"webcomponents-lite.min.js",
],
)
| 39.366736
| 180
| 0.639935
|
296df774b235d2f8f822a0035e51b13269462ac2
| 1,266
|
py
|
Python
|
Praximes/Python/praxa-cli/praxa/__init__.py
|
fish2000/Praxa
|
9758f4e3d05d21c063bb3029fe8399f1d79aa0b1
|
[
"MIT"
] | null | null | null |
Praximes/Python/praxa-cli/praxa/__init__.py
|
fish2000/Praxa
|
9758f4e3d05d21c063bb3029fe8399f1d79aa0b1
|
[
"MIT"
] | null | null | null |
Praximes/Python/praxa-cli/praxa/__init__.py
|
fish2000/Praxa
|
9758f4e3d05d21c063bb3029fe8399f1d79aa0b1
|
[
"MIT"
] | null | null | null |
"""
PRAXA - Control Your Praxis
Copyright 2012 Alexander Bohn.
The PRAXA License Agreement (MIT License)
------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
| 42.2
| 79
| 0.766983
|
312e3c2a0a1bebcf4e24dcf19a1531a142740fce
| 87
|
py
|
Python
|
class/fact.py
|
SaicharanKandukuri/snippets-python-from-scrach
|
b0823fde3cf1a88bf43d97bdc542de7e32c76dac
|
[
"MIT"
] | 1
|
2021-05-29T03:09:24.000Z
|
2021-05-29T03:09:24.000Z
|
class/fact.py
|
SaicharanKandukuri/snippets-python-from-scrach
|
b0823fde3cf1a88bf43d97bdc542de7e32c76dac
|
[
"MIT"
] | null | null | null |
class/fact.py
|
SaicharanKandukuri/snippets-python-from-scrach
|
b0823fde3cf1a88bf43d97bdc542de7e32c76dac
|
[
"MIT"
] | null | null | null |
num = int(input("Enter Number to do op \n : "))
print("Factorail is "+str(num*(num-1)))
| 43.5
| 47
| 0.632184
|
f52a9d60240e849267327255d0be84f55bdb17ec
| 1,409
|
py
|
Python
|
Tests/TestExtractAndDetect/ColourPick.py
|
robdobsn/CatDeterV3
|
8b2a63787fa26772f0be7897ced2dc2a7fd7bc5e
|
[
"MIT"
] | null | null | null |
Tests/TestExtractAndDetect/ColourPick.py
|
robdobsn/CatDeterV3
|
8b2a63787fa26772f0be7897ced2dc2a7fd7bc5e
|
[
"MIT"
] | null | null | null |
Tests/TestExtractAndDetect/ColourPick.py
|
robdobsn/CatDeterV3
|
8b2a63787fa26772f0be7897ced2dc2a7fd7bc5e
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python2
import cv2
import numpy as np
colors = []
def on_mouse_click (event, x, y, flags, frame):
if event == cv2.EVENT_LBUTTONUP:
colors.append(frame[y,x].tolist())
def main():
capture = cv2.VideoCapture(0)
while True:
_, frame = capture.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HLS_FULL)
if colors:
cv2.putText(frame, str(colors[-1]), (10, 50), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 0), 2)
cv2.imshow('frame', frame)
cv2.setMouseCallback('frame', on_mouse_click, frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
capture.release()
cv2.destroyAllWindows()
# avgb = int(sum(c[0] for c in colors) / len(colors))
# avgg = int(sum(c[0] for c in colors) / len(colors))
# avgr = int(sum(c[0] for c in colors) / len(colors))
# print avgb, avgg, avgr
minb = min(c[0] for c in colors)
ming = min(c[1] for c in colors)
minr = min(c[2] for c in colors)
maxb = max(c[0] for c in colors)
maxg = max(c[1] for c in colors)
maxr = max(c[2] for c in colors)
print (minr, ming, minb, maxr, maxg, maxb)
lb = [minb,ming,minr]
ub = [maxb,maxg,maxr]
print (lb, ub)
colrImg = np.zeros((200,200,3), np.uint8)
colrImg[:, :] = (minb, ming, minr)
cv2.imshow('frame', colrImg)
cv2.waitKey(1000)
if __name__ == "__main__":
main()
| 26.584906
| 98
| 0.584102
|
6bbc8fe5b178895e90e3a175e0bc497c155e1fe7
| 119
|
py
|
Python
|
topCoder/srms/300s/srm337/div2/palindromize2.py
|
gauravsingh58/algo
|
397859a53429e7a585e5f6964ad24146c6261326
|
[
"WTFPL"
] | 1
|
2020-09-30T19:53:08.000Z
|
2020-09-30T19:53:08.000Z
|
topCoder/srms/300s/srm337/div2/palindromize2.py
|
gauravsingh58/algo
|
397859a53429e7a585e5f6964ad24146c6261326
|
[
"WTFPL"
] | null | null | null |
topCoder/srms/300s/srm337/div2/palindromize2.py
|
gauravsingh58/algo
|
397859a53429e7a585e5f6964ad24146c6261326
|
[
"WTFPL"
] | 1
|
2020-10-15T09:10:57.000Z
|
2020-10-15T09:10:57.000Z
|
class Palindromize2:
def minChanges(self, s):
return ''.join([min(s[i], s[-i-1]) for i in xrange(len(s))])
| 29.75
| 68
| 0.588235
|
18634a65ab3888be31563f164f336bca47536b42
| 705
|
py
|
Python
|
cloud/cloud/doctype/cloud_settings/cloud_settings.py
|
srdgame/symlink_cloud
|
0df41d9cd9c9757cf5e96f6bea841c3b86de8ee1
|
[
"MIT"
] | 1
|
2021-07-25T08:53:01.000Z
|
2021-07-25T08:53:01.000Z
|
cloud/cloud/doctype/cloud_settings/cloud_settings.py
|
srdgame/symlink_cloud
|
0df41d9cd9c9757cf5e96f6bea841c3b86de8ee1
|
[
"MIT"
] | null | null | null |
cloud/cloud/doctype/cloud_settings/cloud_settings.py
|
srdgame/symlink_cloud
|
0df41d9cd9c9757cf5e96f6bea841c3b86de8ee1
|
[
"MIT"
] | 6
|
2017-04-02T04:01:08.000Z
|
2021-12-22T10:43:29.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Dirk Chang and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class CloudSettings(Document):
@staticmethod
def get_default_company():
return frappe.db.get_single_value("Cloud Settings", "default_cloud_company")
@staticmethod
def get_default_wechat_app():
return frappe.db.get_single_value("Cloud Settings", "default_wechat_app")
@staticmethod
def get_on_behalf(auth_code):
if frappe.db.get_single_value("Cloud Settings", "cloud_auth_code") == auth_code:
return frappe.db.get_single_value("Cloud Settings", "cloud_auth_user")
| 29.375
| 82
| 0.784397
|
9f50cc90c5f7b729a71e7cc0ea83fad25404e3c9
| 1,345
|
py
|
Python
|
python/test/function/test_arange.py
|
daniel-falk/nnabla
|
3fe132ea52dc10521cc029a5d6ba8f565cf65ccf
|
[
"Apache-2.0"
] | 2,792
|
2017-06-26T13:05:44.000Z
|
2022-03-28T07:55:26.000Z
|
python/test/function/test_arange.py
|
daniel-falk/nnabla
|
3fe132ea52dc10521cc029a5d6ba8f565cf65ccf
|
[
"Apache-2.0"
] | 138
|
2017-06-27T07:04:44.000Z
|
2022-02-28T01:37:15.000Z
|
python/test/function/test_arange.py
|
daniel-falk/nnabla
|
3fe132ea52dc10521cc029a5d6ba8f565cf65ccf
|
[
"Apache-2.0"
] | 380
|
2017-06-26T13:23:52.000Z
|
2022-03-25T16:51:30.000Z
|
# Copyright 2018,2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import nnabla.functions as F
from nbla_test_utils import list_context, function_tester
ctxs = list_context('Arange')
def ref_arange(start, stop, step):
return np.arange(start, stop, step).astype(np.float32)
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("start, stop, step", [
(0, 10, 1),
(0, 10, -1),
(10, 0, -1),
(0, 10, 11),
(0, 10, 0.5),
(0, 10, 0.3),
(0, -10, -1),
(-9.9, 9.9, 1.1),
(9.9, -9.9, -1.1),
])
def test_arange_forward(start, stop, step, ctx, func_name):
function_tester(None, F.arange, ref_arange, inputs=[], ctx=ctx,
func_args=[start, stop, step],
func_name=func_name, backward=[])
| 31.27907
| 74
| 0.673606
|
216e946cb15e78ec6ccbfc13d0916710e6cc5015
| 15,340
|
py
|
Python
|
plugins/youtube_dl_button.py
|
sahaynitin/Uploader-Bot-V4
|
5835b718adb9bd38d7145702292b873449d34579
|
[
"MIT"
] | null | null | null |
plugins/youtube_dl_button.py
|
sahaynitin/Uploader-Bot-V4
|
5835b718adb9bd38d7145702292b873449d34579
|
[
"MIT"
] | null | null | null |
plugins/youtube_dl_button.py
|
sahaynitin/Uploader-Bot-V4
|
5835b718adb9bd38d7145702292b873449d34579
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) Shrimadhav U K
# the logging things
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
import asyncio
import json
import math
import os
import shutil
import time
from datetime import datetime
# the secret configuration specific things
if bool(os.environ.get("WEBHOOK", False)):
from sample_config import Config
else:
from config import Config
# the Strings used for this "thing"
from translation import Translation
import pyrogram
logging.getLogger("pyrogram").setLevel(logging.WARNING)
from helper_funcs.display_progress import progress_for_pyrogram, humanbytes
from helper_funcs.help_uploadbot import DownLoadFile
from hachoir.metadata import extractMetadata
from hachoir.parser import createParser
# https://stackoverflow.com/a/37631799/4723940
from PIL import Image
from helper_funcs.help_Nekmo_ffmpeg import generate_screen_shots
from helper_funcs.ran_text import random_char
async def youtube_dl_call_back(bot, update):
cb_data = update.data
# youtube_dl extractors
tg_send_type, youtube_dl_format, ranom = cb_data.split("|")
print(cb_data)
random1 = random_char(5)
thumb_image_path = Config.DOWNLOAD_LOCATION + \
"/" + str(update.from_user.id) + f'{ranom}' + ".jpg"
save_ytdl_json_path = Config.DOWNLOAD_LOCATION + \
"/" + str(update.from_user.id) + f'{ranom}' + ".json"
try:
with open(save_ytdl_json_path, "r", encoding="utf8") as f:
response_json = json.load(f)
except (FileNotFoundError) as e:
await bot.delete_messages(
chat_id=update.message.chat.id,
message_ids=update.message.message_id,
revoke=True
)
return False
youtube_dl_url = update.message.reply_to_message.text
custom_file_name = str(response_json.get("title")) + \
"_" + youtube_dl_format + "." + youtube_dl_ext
youtube_dl_username = None
youtube_dl_password = None
if "|" in youtube_dl_url:
url_parts = youtube_dl_url.split("|")
if len(url_parts) == 2:
youtube_dl_url = url_parts[0]
custom_file_name = url_parts[1]
elif len(url_parts) == 4:
youtube_dl_url = url_parts[0]
custom_file_name = url_parts[1]
youtube_dl_username = url_parts[2]
youtube_dl_password = url_parts[3]
else:
for entity in update.message.reply_to_message.entities:
if entity.type == "text_link":
youtube_dl_url = entity.url
elif entity.type == "url":
o = entity.offset
l = entity.length
youtube_dl_url = youtube_dl_url[o:o + l]
if youtube_dl_url is not None:
youtube_dl_url = youtube_dl_url.strip()
if custom_file_name is not None:
custom_file_name = custom_file_name.strip()
# https://stackoverflow.com/a/761825/4723940
if youtube_dl_username is not None:
youtube_dl_username = youtube_dl_username.strip()
if youtube_dl_password is not None:
youtube_dl_password = youtube_dl_password.strip()
logger.info(youtube_dl_url)
logger.info(custom_file_name)
else:
for entity in update.message.reply_to_message.entities:
if entity.type == "text_link":
youtube_dl_url = entity.url
elif entity.type == "url":
o = entity.offset
l = entity.length
youtube_dl_url = youtube_dl_url[o:o + l]
await bot.edit_message_text(
text=Translation.DOWNLOAD_START,
chat_id=update.message.chat.id,
message_id=update.message.message_id
)
description = Translation.CUSTOM_CAPTION_UL_FILE
if "fulltitle" in response_json:
description = response_json["fulltitle"][0:1021]
# escape Markdown and special characters
tmp_directory_for_each_user = Config.DOWNLOAD_LOCATION + "/" + str(update.from_user.id) + f'{random1}'
if not os.path.isdir(tmp_directory_for_each_user):
os.makedirs(tmp_directory_for_each_user)
download_directory = tmp_directory_for_each_user + "/" + custom_file_name
command_to_exec = []
if tg_send_type == "audio":
command_to_exec = [
"yt-dlp",
"-c",
"--max-filesize", str(Config.TG_MAX_FILE_SIZE),
"--prefer-ffmpeg",
"--extract-audio",
"--audio-format", youtube_dl_ext,
"--audio-quality", youtube_dl_format,
youtube_dl_url,
"-o", download_directory
]
else:
# command_to_exec = ["youtube-dl", "-f", youtube_dl_format, "--hls-prefer-ffmpeg", "--recode-video", "mp4", "-k", youtube_dl_url, "-o", download_directory]
minus_f_format = youtube_dl_format
if "youtu" in youtube_dl_url:
minus_f_format = youtube_dl_format + "+bestaudio"
command_to_exec = [
"yt-dlp",
"-c",
"--max-filesize", str(Config.TG_MAX_FILE_SIZE),
"--embed-subs",
"-f", minus_f_format,
"--hls-prefer-ffmpeg", youtube_dl_url,
"-o", download_directory
]
if Config.HTTP_PROXY != "":
command_to_exec.append("--proxy")
command_to_exec.append(Config.HTTP_PROXY)
if youtube_dl_username is not None:
command_to_exec.append("--username")
command_to_exec.append(youtube_dl_username)
if youtube_dl_password is not None:
command_to_exec.append("--password")
command_to_exec.append(youtube_dl_password)
command_to_exec.append("--no-warnings")
# command_to_exec.append("--quiet")
logger.info(command_to_exec)
start = datetime.now()
process = await asyncio.create_subprocess_exec(
*command_to_exec,
# stdout must a pipe to be accessible as process.stdout
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
# Wait for the subprocess to finish
stdout, stderr = await process.communicate()
e_response = stderr.decode().strip()
t_response = stdout.decode().strip()
logger.info(e_response)
logger.info(t_response)
ad_string_to_replace = "please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; see https://yt-dl.org/update on how to update. Be sure to call youtube-dl with the --verbose flag and include its complete output."
if e_response and ad_string_to_replace in e_response:
error_message = e_response.replace(ad_string_to_replace, "")
await bot.edit_message_text(
chat_id=update.message.chat.id,
message_id=update.message.message_id,
text=error_message
)
return False
if t_response:
logger.info(t_response)
try:
os.remove(save_ytdl_json_path)
except FileNotFoundError as exc:
pass
end_one = datetime.now()
time_taken_for_download = (end_one -start).seconds
file_size = Config.TG_MAX_FILE_SIZE + 1
try:
file_size = os.stat(download_directory).st_size
except FileNotFoundError as exc:
download_directory = os.path.splitext(download_directory)[0] + "." + "mkv"
# https://stackoverflow.com/a/678242/4723940
file_size = os.stat(download_directory).st_size
if file_size > Config.TG_MAX_FILE_SIZE:
await bot.edit_message_text(
chat_id=update.message.chat.id,
text=Translation.RCHD_TG_API_LIMIT.format(time_taken_for_download, humanbytes(file_size)),
message_id=update.message.message_id
)
else:
is_w_f = False
'''images = await generate_screen_shots(
download_directory,
tmp_directory_for_each_user,
is_w_f,
Config.DEF_WATER_MARK_FILE,
300,
9
)
logger.info(images)'''
await bot.edit_message_text(
text=Translation.UPLOAD_START,
chat_id=update.message.chat.id,
message_id=update.message.message_id
)
# get the correct width, height, and duration for videos greater than 10MB
# ref: message from @BotSupport
width = 0
height = 0
duration = 0
if tg_send_type != "file":
metadata = extractMetadata(createParser(download_directory))
if metadata is not None:
if metadata.has("duration"):
duration = metadata.get('duration').seconds
# get the correct width, height, and duration for videos greater than 10MB
if os.path.exists(thumb_image_path):
width = 0
height = 0
metadata = extractMetadata(createParser(thumb_image_path))
if metadata.has("width"):
width = metadata.get("width")
if metadata.has("height"):
height = metadata.get("height")
if tg_send_type == "vm":
height = width
# resize image
# ref: https://t.me/PyrogramChat/44663
# https://stackoverflow.com/a/21669827/4723940
Image.open(thumb_image_path).convert(
"RGB").save(thumb_image_path)
img = Image.open(thumb_image_path)
# https://stackoverflow.com/a/37631799/4723940
# img.thumbnail((90, 90))
if tg_send_type == "file":
img.resize((320, height))
else:
img.resize((90, height))
img.save(thumb_image_path, "JPEG")
# https://pillow.readthedocs.io/en/3.1.x/reference/Image.html#create-thumbnails
else:
thumb_image_path = None
start_time = time.time()
# try to upload file
if tg_send_type == "audio":
await bot.send_audio(
chat_id=update.message.chat.id,
audio=download_directory,
caption=description,
parse_mode="HTML",
duration=duration,
# performer=response_json["uploader"],
# title=response_json["title"],
# reply_markup=reply_markup,
thumb=thumb_image_path,
reply_to_message_id=update.message.reply_to_message.message_id,
progress=progress_for_pyrogram,
progress_args=(
Translation.UPLOAD_START,
update.message,
start_time
)
)
elif tg_send_type == "file":
await bot.send_document(
chat_id=update.message.chat.id,
document=download_directory,
thumb=thumb_image_path,
caption=description,
parse_mode="HTML",
# reply_markup=reply_markup,
reply_to_message_id=update.message.reply_to_message.message_id,
progress=progress_for_pyrogram,
progress_args=(
Translation.UPLOAD_START,
update.message,
start_time
)
)
elif tg_send_type == "vm":
await bot.send_video_note(
chat_id=update.message.chat.id,
video_note=download_directory,
duration=duration,
length=width,
thumb=thumb_image_path,
reply_to_message_id=update.message.reply_to_message.message_id,
progress=progress_for_pyrogram,
progress_args=(
Translation.UPLOAD_START,
update.message,
start_time
)
)
elif tg_send_type == "video":
await bot.send_video(
chat_id=update.message.chat.id,
video=download_directory,
caption=description,
parse_mode="HTML",
duration=duration,
width=width,
height=height,
supports_streaming=True,
# reply_markup=reply_markup,
thumb=thumb_image_path,
reply_to_message_id=update.message.reply_to_message.message_id,
progress=progress_for_pyrogram,
progress_args=(
Translation.UPLOAD_START,
update.message,
start_time
)
)
else:
logger.info("Did this happen? :\\")
end_two = datetime.now()
time_taken_for_upload = (end_two - end_one).seconds
#
'''media_album_p = []
if images is not None:
i = 0
caption = "JOIN : https://t.me/TGBotsCollection \n For the List of Telegram Bots"
if is_w_f:
caption = "/upgrade to Plan D to remove the watermark\nJOIN : https://t.me/TGBotsCollection \n For the List of Telegram Bots"
for image in images:
if os.path.exists(image):
if i == 0:
media_album_p.append(
pyrogram.types.InputMediaPhoto(
media=image,
caption=caption,
parse_mode="html"
)
)
else:
media_album_p.append(
pyrogram.types.InputMediaPhoto(
media=image
)
)
i = i + 1
await bot.send_media_group(
chat_id=update.message.chat.id,
disable_notification=True,
reply_to_message_id=update.message.message_id,
media=media_album_p
)'''
#
try:
os.remove(thumb_image_path)
shutil.rmtree(tmp_directory_for_each_user)
except:
pass
await bot.edit_message_text(
text=Translation.AFTER_SUCCESSFUL_UPLOAD_MSG_WITH_TS.format(time_taken_for_download, time_taken_for_upload),
chat_id=update.message.chat.id,
message_id=update.message.message_id,
disable_web_page_preview=True
)
| 41.016043
| 257
| 0.556519
|
b550f912d73a492bea08dbc6fd0b4de127b948cd
| 949
|
py
|
Python
|
ascii_progress/__main__.py
|
Deric-W/ascii-progress
|
8d26cc2b8dae21cd7cb52f498f24ef18a761c834
|
[
"MIT"
] | 1
|
2020-03-15T01:03:07.000Z
|
2020-03-15T01:03:07.000Z
|
ascii_progress/__main__.py
|
Deric-W/ascii-progress
|
8d26cc2b8dae21cd7cb52f498f24ef18a761c834
|
[
"MIT"
] | null | null | null |
ascii_progress/__main__.py
|
Deric-W/ascii-progress
|
8d26cc2b8dae21cd7cb52f498f24ef18a761c834
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import sys
import time
from .spinner import Spinner
from .bar import BarFormat
# run demo
for frames in (
"|/-\\",
("←↖↑↗→↘↓↙"),
("◐◓◑◒"),
("(o )", "( o )", "( o )", "( o )", "( o)", "( o )", "( o )", "( o )"),
(".oO@*"),
("", ".", "..", "..."),
("⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏"),
(">))'>", " >))'>", " >))'>", " <'((<", " <'((<", "<'((<")
):
sys.stdout.write("Working ")
with Spinner(frames).handle_exceptions("Done", "Exception") as spinner:
for _ in map(spinner.set_progress, range(1, 15)):
time.sleep(0.2)
for bar_format in map(
lambda t: BarFormat(t[0], t[1], 10),
(
(("[", "]"), (".", "#")),
(("|", "|"), (" ", "█")),
(("[", "]"), (" ", "="))
)
):
sys.stdout.write("Working ")
with bar_format.bar(75).handle_exceptions("Done", "Exception") as bar:
for _ in bar:
time.sleep(0.02)
| 27.114286
| 93
| 0.385669
|
de7f787c00f91a514a6fa09b343e0110219f4209
| 504
|
py
|
Python
|
events/urls.py
|
flyinactor91/Rocky-Rollcall
|
ab2bdbbd5f5920e709a09d1b1182a388955211d9
|
[
"MIT"
] | 2
|
2019-09-03T06:08:35.000Z
|
2020-07-19T06:57:17.000Z
|
events/urls.py
|
flyinactor91/Rocky-Rollcall
|
ab2bdbbd5f5920e709a09d1b1182a388955211d9
|
[
"MIT"
] | null | null | null |
events/urls.py
|
flyinactor91/Rocky-Rollcall
|
ab2bdbbd5f5920e709a09d1b1182a388955211d9
|
[
"MIT"
] | null | null | null |
"""
Events URL patterns
"""
from django.urls import path
from . import views
urlpatterns = [
path('', views.EventListView.as_view(), name='event_list'),
path('new/<slug:slug>', views.event_new, name='event_new'),
path('<int:pk>', views.event_detail, name='event_detail'),
path('<int:pk>/edit', views.event_edit, name='event_edit'),
path('<int:pk>/delete', views.event_delete, name='event_delete'),
path('casting/<int:pk>/delete', views.casting_delete, name='casting_delete'),
]
| 31.5
| 81
| 0.680556
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.