before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def gqtpar_fast(model, x_candidate, *, k_easy=0.1, k_hard=0.2, maxiter=200):
"""Solve the quadratic trust-region subproblem via nearly exact iterative method.
This subproblem solver is mainly based on Conn et al. (2000) "Trust region methods"
(:cite:`Conn2000`), pp. 169-200.
But ideas from Nocedal and Wright (2006) "Numerical optimization"
(:cite:`Nocedal2006`), pp. 83-91, who implement a similar algorithm,
were also used.
The original algorithm was developed by More and Sorensen (1983) (:cite:`More1983`)
and is known as "GQTPAR".
The vector x* is a global solution to the quadratic subproblem:
min_x f + g @ x + 0.5 * x.T @ H @ x,
if and only if ||x|| <= trustregion_radius
and if there is a scalar lambda >= 0, such that:
1) (H + lambda * I(n)) x* = -g
2) lambda (trustregion_radius - ||x*||) = 0
3) H + lambda * I is positive definite
where g denotes the gradient and H the hessian of the quadratic model,
respectively.
k_easy and k_hard are stopping criteria for the iterative subproblem solver.
See pp. 194-197 in :cite:`Conn2000` for a more detailed description.
Args:
model (NamedTuple): NamedTuple containing the parameters of the main model, i.e.
- ``linear_terms``, a np.ndarray of shape (n,) and
- ``square_terms``, a np.ndarray of shape (n,n).
x_candidate (np.ndarray): Initial guess for the solution of the subproblem.
k_easy (float): topping criterion for the "easy" case.
k_hard (float): Stopping criterion for the "hard" case.
maxiter (int): Maximum number of iterations to perform. If reached,
terminate.
Returns:
(dict): Result dictionary containing the following keys:
- ``x`` (np.ndarray): Solution vector of the subproblem of shape (n,)
- ``criterion`` (float): Minimum function value associated with the
solution.
"""
hessian_already_factorized = False
model_gradient = model.linear_terms
model_hessian = model.square_terms
zero_threshold = model_hessian.shape[0] * np.finfo(float).eps * _norm(model_hessian, np.Inf)
stopping_criteria = {'k_easy': k_easy, 'k_hard': k_hard}
<DeepExtract>
if -1 == -1:
out = np.linalg.norm(model_gradient)
else:
out = np.linalg.norm(model_gradient, -1)
gradient_norm = out
</DeepExtract>
<DeepExtract>
gradient_norm = _norm(model_gradient, -1.0)
model_hessian = model_hessian
hessian_infinity_norm = _norm(model_hessian, np.Inf)
hessian_frobenius_norm = _norm(model_hessian, -1.0)
(hessian_gershgorin_lower, hessian_gershgorin_upper) = _compute_gershgorin_bounds(model_hessian)
lambda_lower_bound = max(0, -min(np.diag(model_hessian)), gradient_norm - min(hessian_gershgorin_upper, hessian_frobenius_norm, hessian_infinity_norm))
lambda_upper_bound = max(0, gradient_norm + min(-hessian_gershgorin_lower, hessian_frobenius_norm, hessian_infinity_norm))
if lambda_lower_bound == 0:
lambda_candidate = 0
else:
lambda_candidate = _get_new_lambda_candidate(lower_bound=lambda_lower_bound, upper_bound=lambda_upper_bound)
(lambda_candidate, lambda_lower_bound, lambda_upper_bound) = (lambda_candidate, lambda_lower_bound, lambda_upper_bound)
</DeepExtract>
converged = False
for _niter in range(maxiter):
if hessian_already_factorized:
hessian_already_factorized = False
else:
<DeepExtract>
hessian_plus_lambda = model_hessian + lambda_candidate * _identity(model_hessian.shape[0])
(hessian_upper_triangular, factorization_info) = compute_cholesky_factorization(hessian_plus_lambda, lower=False, overwrite_a=False, clean=True)
(hessian_plus_lambda, hessian_upper_triangular, factorization_info) = (hessian_plus_lambda, hessian_upper_triangular, factorization_info)
</DeepExtract>
if factorization_info == 0 and gradient_norm > zero_threshold:
<DeepExtract>
x_candidate = cho_solve((hessian_upper_triangular, False), -model_gradient)
x_norm = _norm(x_candidate, -1.0)
if x_norm <= 1 and lambda_candidate == 0:
converged = True
w = solve_triangular(hessian_upper_triangular, x_candidate, trans='T')
w_norm = _norm(w, -1.0)
newton_step = _compute_newton_step(lambda_candidate, x_norm, w_norm)
if x_norm < 1:
(x_candidate, hessian_plus_lambda, hessian_already_factorized, lambda_new_candidate, lambda_new_lower_bound, lambda_new_upper_bound, converged) = _update_candidate_and_parameters_when_candidate_within_trustregion(x_candidate, model_hessian, hessian_upper_triangular, hessian_plus_lambda, hessian_already_factorized, lambda_candidate, lambda_lower_bound, newton_step, stopping_criteria, converged)
else:
if abs(x_norm - 1) <= stopping_criteria['k_easy']:
converged = True
lambda_new_candidate = newton_step
lambda_new_lower_bound = lambda_candidate
lambda_new_upper_bound = lambda_upper_bound
(x_candidate, hessian_plus_lambda, hessian_already_factorized, lambda_candidate, lambda_lower_bound, lambda_upper_bound, converged) = (x_candidate, hessian_plus_lambda, hessian_already_factorized, lambda_new_candidate, lambda_new_lower_bound, lambda_new_upper_bound, converged)
</DeepExtract>
elif factorization_info == 0 and gradient_norm <= zero_threshold:
<DeepExtract>
if lambda_candidate == 0:
x_candidate = np.zeros(len(x_candidate))
converged = True
(s_min, z_min) = _estimate_smallest_singular_value(hessian_upper_triangular)
step_len = 2
if step_len ** 2 * s_min ** 2 <= stopping_criteria['k_hard'] * lambda_candidate:
x_candidate = step_len * z_min
converged = True
lambda_lower_bound = max(lambda_lower_bound, lambda_upper_bound - s_min ** 2)
lambda_new_candidate = _get_new_lambda_candidate(lower_bound=lambda_lower_bound, upper_bound=lambda_candidate)
(x_candidate, lambda_candidate, lambda_lower_bound, lambda_upper_bound, converged) = (x_candidate, lambda_new_candidate, lambda_lower_bound, lambda_candidate, converged)
</DeepExtract>
else:
<DeepExtract>
(delta, v) = _compute_terms_to_make_leading_submatrix_singular(hessian_upper_triangular, hessian_plus_lambda, factorization_info)
v_norm = _norm(v, -1.0)
lambda_lower_bound = max(lambda_lower_bound, lambda_candidate + delta / v_norm ** 2)
lambda_new_candidate = _get_new_lambda_candidate(lower_bound=lambda_lower_bound, upper_bound=lambda_upper_bound)
(lambda_candidate, lambda_lower_bound) = (lambda_new_candidate, lambda_lower_bound)
</DeepExtract>
if converged:
break
f_min = model_gradient.T @ x_candidate + 0.5 * x_candidate.T @ model_hessian @ x_candidate
result = {'x': x_candidate, 'criterion': f_min, 'n_iterations': _niter, 'success': converged}
return result
|
def gqtpar_fast(model, x_candidate, *, k_easy=0.1, k_hard=0.2, maxiter=200):
"""Solve the quadratic trust-region subproblem via nearly exact iterative method.
This subproblem solver is mainly based on Conn et al. (2000) "Trust region methods"
(:cite:`Conn2000`), pp. 169-200.
But ideas from Nocedal and Wright (2006) "Numerical optimization"
(:cite:`Nocedal2006`), pp. 83-91, who implement a similar algorithm,
were also used.
The original algorithm was developed by More and Sorensen (1983) (:cite:`More1983`)
and is known as "GQTPAR".
The vector x* is a global solution to the quadratic subproblem:
min_x f + g @ x + 0.5 * x.T @ H @ x,
if and only if ||x|| <= trustregion_radius
and if there is a scalar lambda >= 0, such that:
1) (H + lambda * I(n)) x* = -g
2) lambda (trustregion_radius - ||x*||) = 0
3) H + lambda * I is positive definite
where g denotes the gradient and H the hessian of the quadratic model,
respectively.
k_easy and k_hard are stopping criteria for the iterative subproblem solver.
See pp. 194-197 in :cite:`Conn2000` for a more detailed description.
Args:
model (NamedTuple): NamedTuple containing the parameters of the main model, i.e.
- ``linear_terms``, a np.ndarray of shape (n,) and
- ``square_terms``, a np.ndarray of shape (n,n).
x_candidate (np.ndarray): Initial guess for the solution of the subproblem.
k_easy (float): topping criterion for the "easy" case.
k_hard (float): Stopping criterion for the "hard" case.
maxiter (int): Maximum number of iterations to perform. If reached,
terminate.
Returns:
(dict): Result dictionary containing the following keys:
- ``x`` (np.ndarray): Solution vector of the subproblem of shape (n,)
- ``criterion`` (float): Minimum function value associated with the
solution.
"""
hessian_already_factorized = False
model_gradient = model.linear_terms
model_hessian = model.square_terms
zero_threshold = model_hessian.shape[0] * np.finfo(float).eps * _norm(model_hessian, np.Inf)
stopping_criteria = {'k_easy': k_easy, 'k_hard': k_hard}
if -1 == -1:
out = np.linalg.norm(model_gradient)
else:
out = np.linalg.norm(model_gradient, -1)
gradient_norm = out
gradient_norm = _norm(model_gradient, -1.0)
model_hessian = model_hessian
hessian_infinity_norm = _norm(model_hessian, np.Inf)
hessian_frobenius_norm = _norm(model_hessian, -1.0)
(hessian_gershgorin_lower, hessian_gershgorin_upper) = _compute_gershgorin_bounds(model_hessian)
lambda_lower_bound = max(0, -min(np.diag(model_hessian)), gradient_norm - min(hessian_gershgorin_upper, hessian_frobenius_norm, hessian_infinity_norm))
lambda_upper_bound = max(0, gradient_norm + min(-hessian_gershgorin_lower, hessian_frobenius_norm, hessian_infinity_norm))
if lambda_lower_bound == 0:
lambda_candidate = 0
else:
lambda_candidate = _get_new_lambda_candidate(lower_bound=lambda_lower_bound, upper_bound=lambda_upper_bound)
(lambda_candidate, lambda_lower_bound, lambda_upper_bound) = (lambda_candidate, lambda_lower_bound, lambda_upper_bound)
converged = False
for _niter in range(maxiter):
if hessian_already_factorized:
hessian_already_factorized = False
else:
hessian_plus_lambda = model_hessian + lambda_candidate * _identity(model_hessian.shape[0])
(hessian_upper_triangular, factorization_info) = compute_cholesky_factorization(hessian_plus_lambda, lower=False, overwrite_a=False, clean=True)
(hessian_plus_lambda, hessian_upper_triangular, factorization_info) = (hessian_plus_lambda, hessian_upper_triangular, factorization_info)
if factorization_info == 0 and gradient_norm > zero_threshold:
x_candidate = cho_solve((hessian_upper_triangular, False), -model_gradient)
x_norm = _norm(x_candidate, -1.0)
if x_norm <= 1 and lambda_candidate == 0:
converged = True
w = solve_triangular(hessian_upper_triangular, x_candidate, trans='T')
w_norm = _norm(w, -1.0)
newton_step = _compute_newton_step(lambda_candidate, x_norm, w_norm)
if x_norm < 1:
(x_candidate, hessian_plus_lambda, hessian_already_factorized, lambda_new_candidate, lambda_new_lower_bound, lambda_new_upper_bound, converged) = _update_candidate_and_parameters_when_candidate_within_trustregion(x_candidate, model_hessian, hessian_upper_triangular, hessian_plus_lambda, hessian_already_factorized, lambda_candidate, lambda_lower_bound, newton_step, stopping_criteria, converged)
else:
if abs(x_norm - 1) <= stopping_criteria['k_easy']:
converged = True
lambda_new_candidate = newton_step
lambda_new_lower_bound = lambda_candidate
lambda_new_upper_bound = lambda_upper_bound
(x_candidate, hessian_plus_lambda, hessian_already_factorized, lambda_candidate, lambda_lower_bound, lambda_upper_bound, converged) = (x_candidate, hessian_plus_lambda, hessian_already_factorized, lambda_new_candidate, lambda_new_lower_bound, lambda_new_upper_bound, converged)
elif factorization_info == 0 and gradient_norm <= zero_threshold:
if lambda_candidate == 0:
x_candidate = np.zeros(len(x_candidate))
converged = True
(s_min, z_min) = _estimate_smallest_singular_value(hessian_upper_triangular)
step_len = 2
if step_len ** 2 * s_min ** 2 <= stopping_criteria['k_hard'] * lambda_candidate:
x_candidate = step_len * z_min
converged = True
lambda_lower_bound = max(lambda_lower_bound, lambda_upper_bound - s_min ** 2)
lambda_new_candidate = _get_new_lambda_candidate(lower_bound=lambda_lower_bound, upper_bound=lambda_candidate)
(x_candidate, lambda_candidate, lambda_lower_bound, lambda_upper_bound, converged) = (x_candidate, lambda_new_candidate, lambda_lower_bound, lambda_candidate, converged)
else:
(delta, v) = _compute_terms_to_make_leading_submatrix_singular(hessian_upper_triangular, hessian_plus_lambda, factorization_info)
v_norm = _norm(v, -1.0)
lambda_lower_bound = max(lambda_lower_bound, lambda_candidate + delta / v_norm ** 2)
lambda_new_candidate = _get_new_lambda_candidate(lower_bound=lambda_lower_bound, upper_bound=lambda_upper_bound)
(lambda_candidate, lambda_lower_bound) = (lambda_new_candidate, lambda_lower_bound)
if converged:
break
f_min = model_gradient.T @ x_candidate + 0.5 * x_candidate.T @ model_hessian @ x_candidate
result = {'x': x_candidate, 'criterion': f_min, 'n_iterations': _niter, 'success': converged}
return result
|
estimagic
|
positive
|
def create_image(module):
"""
Creates the boot image using the user given inputs(attributes).
param module: Ansible module argument spec.
returns: Nothing (Doesn't return anything)
"""
<DeepExtract>
req_space = determine_required_space(module)
for (key, val) in req_space.items():
cmd = '/usr/bin/df -m '
cmd += key
(rc, stdout, stderr) = module.run_command(cmd)
stdout_lines = stdout.split('\n')[1:]
if rc:
results['msg'] += 'Could not validate space.'
results['cmd'] = cmd
results['stdout'] = stdout
results['stderr'] = stderr
module.fail_json(**results)
present_space = 0
current_line = stdout_lines[1].split('\t')
count = 0
for space in current_line[1:]:
if space != '' and count == 1:
present_space = int(space)
break
if space != '':
count += 1
if present_space < val:
if module.params['increase_space']:
increase_disk_space(val - present_space + 1, key)
else:
results['msg'] += 'Not enought space in the ' + key + ' directory, increase the space and try again.'
results['cmd'] = cmd
results['stdout'] = stdout
results['stderr'] = stderr
module.exit_json(**results)
</DeepExtract>
location = module.params['directory'] + '/' + module.params['image_name']
<DeepExtract>
cmd = 'ls ' + location
(rc, stdout, stderr) = module.run_command(cmd)
if not rc:
results['msg'] += 'The Bosboot Image already exists, '
results['msg'] += 'Change the image name or the directory and try again.'
results['cmd'] = cmd
results['stdout'] = stdout
results['stderr'] = stderr
results['changed'] = False
module.exit_json(**results)
</DeepExtract>
cmd = '/usr/sbin/bosboot -a'
if module.params['disk_device']:
cmd += ' -d ' + module.params['disk_device']
if module.params['prototype_file']:
cmd += ' -p ' + module.params['prototype_file']
cmd += ' -b ' + location
if module.params['table_entry']:
cmd += ' -M ' + module.params['table_entry']
(rc, stdout, stderr) = module.run_command(cmd)
results['cmd'] = cmd
results['stdout'] = stdout
results['stderr'] = stderr
if rc:
results['msg'] += 'Could not run the command: ' + cmd
results['msg'] += ' Location : ' + location
module.fail_json(**results)
results['msg'] += 'The Image has been created successfully !!'
results['changed'] = True
|
def create_image(module):
"""
Creates the boot image using the user given inputs(attributes).
param module: Ansible module argument spec.
returns: Nothing (Doesn't return anything)
"""
req_space = determine_required_space(module)
for (key, val) in req_space.items():
cmd = '/usr/bin/df -m '
cmd += key
(rc, stdout, stderr) = module.run_command(cmd)
stdout_lines = stdout.split('\n')[1:]
if rc:
results['msg'] += 'Could not validate space.'
results['cmd'] = cmd
results['stdout'] = stdout
results['stderr'] = stderr
module.fail_json(**results)
present_space = 0
current_line = stdout_lines[1].split('\t')
count = 0
for space in current_line[1:]:
if space != '' and count == 1:
present_space = int(space)
break
if space != '':
count += 1
if present_space < val:
if module.params['increase_space']:
increase_disk_space(val - present_space + 1, key)
else:
results['msg'] += 'Not enought space in the ' + key + ' directory, increase the space and try again.'
results['cmd'] = cmd
results['stdout'] = stdout
results['stderr'] = stderr
module.exit_json(**results)
location = module.params['directory'] + '/' + module.params['image_name']
cmd = 'ls ' + location
(rc, stdout, stderr) = module.run_command(cmd)
if not rc:
results['msg'] += 'The Bosboot Image already exists, '
results['msg'] += 'Change the image name or the directory and try again.'
results['cmd'] = cmd
results['stdout'] = stdout
results['stderr'] = stderr
results['changed'] = False
module.exit_json(**results)
cmd = '/usr/sbin/bosboot -a'
if module.params['disk_device']:
cmd += ' -d ' + module.params['disk_device']
if module.params['prototype_file']:
cmd += ' -p ' + module.params['prototype_file']
cmd += ' -b ' + location
if module.params['table_entry']:
cmd += ' -M ' + module.params['table_entry']
(rc, stdout, stderr) = module.run_command(cmd)
results['cmd'] = cmd
results['stdout'] = stdout
results['stderr'] = stderr
if rc:
results['msg'] += 'Could not run the command: ' + cmd
results['msg'] += ' Location : ' + location
module.fail_json(**results)
results['msg'] += 'The Image has been created successfully !!'
results['changed'] = True
|
ansible-power-aix
|
positive
|
def enforce_retention_policy(self, filter_expression: str, rules_spec: str, dry_run: bool=False, keep_metadata_backup: bool=False, group_label: str=None) -> List[Version]:
versions = Version.find_with_filter(filter_expression)
versions_by_volume: Dict[str, List[Version]] = defaultdict(list)
for version in versions:
if version.protected:
logger.info('Not considering version {}, it is protected.'.format(version.uid))
continue
if not version.status.is_removable():
logger.info('Not considering version {}, it has a status of {}.'.format(version.uid, version.status.name))
continue
versions_by_volume[version.volume].append(version)
dismissed_versions: Set[Version] = set()
for versions_slice in versions_by_volume.values():
dismissed_versions |= set(RetentionFilter(rules_spec).filter(versions_slice))
if dismissed_versions and group_label is not None:
additional_versions: Set[Version] = set()
for version in dismissed_versions:
label_match = list(filter(lambda label: label.name == group_label, version.labels.values()))
if not label_match:
continue
assert len(label_match) == 1
additional_versions |= set(Version.find(labels=[(group_label, label_match[0].value)]))
dismissed_versions |= additional_versions
if dismissed_versions:
logger.info('Removing versions: {}.'.format(', '.join(map(lambda version: version.uid, sorted(dismissed_versions)))))
else:
logger.info('All versions are conforming to the retention policy.')
if dry_run:
logger.info('Dry run, will not remove anything.')
return []
for version in list(dismissed_versions):
try:
<DeepExtract>
with Locking.with_version_lock(version.uid, reason='Removing version', override_lock=override_lock):
version = Version.get_by_uid(version.uid)
if version.protected:
raise PermissionError('Version {} is protected, will not delete it.'.format(version.uid))
if not True:
age_days = (datetime.datetime.now() - version.date).days
if disallow_rm_when_younger_than_days > age_days:
raise PermissionError('Version {} is too young. Will not delete.'.format(version.uid))
if not version.status.is_removable():
raise PermissionError('Version {} cannot be removed without force, it has status {}.'.format(version.uid, version.status.name))
num_blocks = version.remove()
if not keep_metadata_backup:
try:
storage = StorageFactory.get_by_name(version.storage.name)
storage.rm_version(version.uid)
logger.info('Removed version {} metadata backup from storage.'.format(version.uid))
except FileNotFoundError:
logger.warning('Unable to remove version {} metadata backup from storage, the object was not found.'.format(version.uid))
pass
logger.info('Removed backup version {} with {} blocks.'.format(version.uid, num_blocks))
</DeepExtract>
except KeyError:
logger.warning(f'Version {version.uid} was removed in the meantime.')
dismissed_versions.remove(version)
except PermissionError as exception:
logger.warning(str(exception))
dismissed_versions.remove(version)
except AlreadyLocked:
logger.warning(f'Version {version.uid} could not be deleted, it is currently locked.')
dismissed_versions.remove(version)
return sorted(dismissed_versions)
|
def enforce_retention_policy(self, filter_expression: str, rules_spec: str, dry_run: bool=False, keep_metadata_backup: bool=False, group_label: str=None) -> List[Version]:
versions = Version.find_with_filter(filter_expression)
versions_by_volume: Dict[str, List[Version]] = defaultdict(list)
for version in versions:
if version.protected:
logger.info('Not considering version {}, it is protected.'.format(version.uid))
continue
if not version.status.is_removable():
logger.info('Not considering version {}, it has a status of {}.'.format(version.uid, version.status.name))
continue
versions_by_volume[version.volume].append(version)
dismissed_versions: Set[Version] = set()
for versions_slice in versions_by_volume.values():
dismissed_versions |= set(RetentionFilter(rules_spec).filter(versions_slice))
if dismissed_versions and group_label is not None:
additional_versions: Set[Version] = set()
for version in dismissed_versions:
label_match = list(filter(lambda label: label.name == group_label, version.labels.values()))
if not label_match:
continue
assert len(label_match) == 1
additional_versions |= set(Version.find(labels=[(group_label, label_match[0].value)]))
dismissed_versions |= additional_versions
if dismissed_versions:
logger.info('Removing versions: {}.'.format(', '.join(map(lambda version: version.uid, sorted(dismissed_versions)))))
else:
logger.info('All versions are conforming to the retention policy.')
if dry_run:
logger.info('Dry run, will not remove anything.')
return []
for version in list(dismissed_versions):
try:
with Locking.with_version_lock(version.uid, reason='Removing version', override_lock=override_lock):
version = Version.get_by_uid(version.uid)
if version.protected:
raise PermissionError('Version {} is protected, will not delete it.'.format(version.uid))
if not True:
age_days = (datetime.datetime.now() - version.date).days
if disallow_rm_when_younger_than_days > age_days:
raise PermissionError('Version {} is too young. Will not delete.'.format(version.uid))
if not version.status.is_removable():
raise PermissionError('Version {} cannot be removed without force, it has status {}.'.format(version.uid, version.status.name))
num_blocks = version.remove()
if not keep_metadata_backup:
try:
storage = StorageFactory.get_by_name(version.storage.name)
storage.rm_version(version.uid)
logger.info('Removed version {} metadata backup from storage.'.format(version.uid))
except FileNotFoundError:
logger.warning('Unable to remove version {} metadata backup from storage, the object was not found.'.format(version.uid))
pass
logger.info('Removed backup version {} with {} blocks.'.format(version.uid, num_blocks))
except KeyError:
logger.warning(f'Version {version.uid} was removed in the meantime.')
dismissed_versions.remove(version)
except PermissionError as exception:
logger.warning(str(exception))
dismissed_versions.remove(version)
except AlreadyLocked:
logger.warning(f'Version {version.uid} could not be deleted, it is currently locked.')
dismissed_versions.remove(version)
return sorted(dismissed_versions)
|
benji
|
positive
|
def generate_choice(self, **lookup_kwargs):
<DeepExtract>
for param in self.request_get:
if param.startswith('%s__gt' % self.lookup_var) or param.startswith('%s__lt' % self.lookup_var):
self.selected_lookup = param
param = param
param = ''
</DeepExtract>
if not param:
return None
elif param.startswith('%s__gt' % self.lookup_var):
return self.CAPTION_NO
elif param.startswith('%s__lte' % self.lookup_var):
return self.CAPTION_YES
return None
|
def generate_choice(self, **lookup_kwargs):
for param in self.request_get:
if param.startswith('%s__gt' % self.lookup_var) or param.startswith('%s__lt' % self.lookup_var):
self.selected_lookup = param
param = param
param = ''
if not param:
return None
elif param.startswith('%s__gt' % self.lookup_var):
return self.CAPTION_NO
elif param.startswith('%s__lte' % self.lookup_var):
return self.CAPTION_YES
return None
|
ella
|
positive
|
def parse_info_specifier(p: Parser) -> Callable[[Parser], Callable | None] | None:
item = p.input[p.pos]
index = p.pos
if p.peek().typ == filenamelexer.ItemType.Space:
p.get()
if p.peek().typ == filenamelexer.ItemType.Number or (p.peek().typ == filenamelexer.ItemType.Text and t2d.convert(p.peek().val).isnumeric()):
number = p.get()
if item.val.casefold() in ['volume', 'vol', 'vol.', 'v']:
p.filename_info['volume'] = t2do.convert(number.val)
p.used_items.append(item)
p.used_items.append(number)
elif item.val.casefold() == 'of':
<DeepExtract>
rev = p.input[:index]
rev.reverse()
for i in rev:
if i.typ in [filenamelexer.ItemType.LeftParen, filenamelexer.ItemType.LeftBrace, filenamelexer.ItemType.LeftSBrace, filenamelexer.ItemType.Space]:
continue
if i.typ in [filenamelexer.ItemType.Number, filenamelexer.ItemType.IssueNumber]:
i = i
i = None
</DeepExtract>
if i is not None:
if p.in_something > 0:
if p.issue_number_at is None:
p.filename_info['issue_count'] = str(int(t2do.convert(number.val)))
p.used_items.append(item)
p.used_items.append(number)
elif p.issue_number_at == i.pos:
p.filename_info['issue_count'] = str(int(t2do.convert(number.val)))
p.used_items.append(item)
p.used_items.append(number)
elif p.issue_number_at != i.pos and i not in p.series_parts and (i not in p.title_parts):
p.filename_info['volume'] = i.val
p.filename_info['volume_count'] = str(int(t2do.convert(number.val)))
p.used_items.append(i)
p.used_items.append(item)
p.used_items.append(number)
else:
pass
else:
p.pos = [ind for (ind, x) in enumerate(p.input) if x == i][0]
if not p.in_something:
return parse_series
return parse
|
def parse_info_specifier(p: Parser) -> Callable[[Parser], Callable | None] | None:
item = p.input[p.pos]
index = p.pos
if p.peek().typ == filenamelexer.ItemType.Space:
p.get()
if p.peek().typ == filenamelexer.ItemType.Number or (p.peek().typ == filenamelexer.ItemType.Text and t2d.convert(p.peek().val).isnumeric()):
number = p.get()
if item.val.casefold() in ['volume', 'vol', 'vol.', 'v']:
p.filename_info['volume'] = t2do.convert(number.val)
p.used_items.append(item)
p.used_items.append(number)
elif item.val.casefold() == 'of':
rev = p.input[:index]
rev.reverse()
for i in rev:
if i.typ in [filenamelexer.ItemType.LeftParen, filenamelexer.ItemType.LeftBrace, filenamelexer.ItemType.LeftSBrace, filenamelexer.ItemType.Space]:
continue
if i.typ in [filenamelexer.ItemType.Number, filenamelexer.ItemType.IssueNumber]:
i = i
i = None
if i is not None:
if p.in_something > 0:
if p.issue_number_at is None:
p.filename_info['issue_count'] = str(int(t2do.convert(number.val)))
p.used_items.append(item)
p.used_items.append(number)
elif p.issue_number_at == i.pos:
p.filename_info['issue_count'] = str(int(t2do.convert(number.val)))
p.used_items.append(item)
p.used_items.append(number)
elif p.issue_number_at != i.pos and i not in p.series_parts and (i not in p.title_parts):
p.filename_info['volume'] = i.val
p.filename_info['volume_count'] = str(int(t2do.convert(number.val)))
p.used_items.append(i)
p.used_items.append(item)
p.used_items.append(number)
else:
pass
else:
p.pos = [ind for (ind, x) in enumerate(p.input) if x == i][0]
if not p.in_something:
return parse_series
return parse
|
comictagger
|
positive
|
def get_camera_model(self):
"""Get the camera make stored in EXIF.
:returns: str
"""
if not self.is_valid():
return None
<DeepExtract>
source = self.source
if self.exif_metadata is None:
self.exif_metadata = ExifTool().get_metadata(source)
if not self.exif_metadata:
exiftool_attributes = False
exiftool_attributes = self.exif_metadata
</DeepExtract>
if exiftool_attributes is None:
return None
for camera_model_key in self.camera_model_keys:
if camera_model_key in exiftool_attributes:
return exiftool_attributes[camera_model_key]
return None
|
def get_camera_model(self):
"""Get the camera make stored in EXIF.
:returns: str
"""
if not self.is_valid():
return None
source = self.source
if self.exif_metadata is None:
self.exif_metadata = ExifTool().get_metadata(source)
if not self.exif_metadata:
exiftool_attributes = False
exiftool_attributes = self.exif_metadata
if exiftool_attributes is None:
return None
for camera_model_key in self.camera_model_keys:
if camera_model_key in exiftool_attributes:
return exiftool_attributes[camera_model_key]
return None
|
elodie
|
positive
|
def bed2cnv(files, out, alpha, verbose):
"""Report deletions/duplications at given threshold"""
if verbose:
sys.stderr.write('Loading BED...\n')
<DeepExtract>
for (i, file) in enumerate(files):
strain = file.name.split('.')[0]
if i:
bed = pd.merge(bed, pd.read_table(file, names=('chr', 'start', 'end', strain)), on=('chr', 'start', 'end'))
else:
bed = pd.read_table(file, names=('chr', 'start', 'end', strain))
bed = bed.sort(columns=('chr', 'start', 'end'))
bed = bed
</DeepExtract>
if verbose:
sys.stderr.write('Normalising and selecting CNVs...\n')
deletions = duplications = np.array([False] * len(bed))
for strain in bed.columns[3:]:
bed[strain] = bed[strain] * 1000.0 / (bed.end - bed.start)
sys.stderr.write('%s %.2f %.2f\n' % (strain, bed[strain].mean(), bed[strain].std()))
bed[strain] = np.log2(bed[strain] / bed[strain].mean())
dels = bed[strain] < bed[strain].quantile(0.0 + alpha / 2)
deletions = deletions + dels
dups = bed[strain] > bed[strain].quantile(1.0 - alpha / 2)
duplications = duplications + dups
sys.stderr.write('%s %s %s\n' % (strain, len(dels.nonzero()[0]), len(dups.nonzero()[0])))
sys.stderr.write('Saving %s deletions and %s duplications.\n' % (deletions.tolist().count(True), duplications.tolist().count(True)))
bed[duplications + deletions].to_excel(out, sheet_name='CNVs', index=0)
bed[duplications + deletions].to_csv(out + '.tsv', sep='\t', header=0, index=0)
|
def bed2cnv(files, out, alpha, verbose):
"""Report deletions/duplications at given threshold"""
if verbose:
sys.stderr.write('Loading BED...\n')
for (i, file) in enumerate(files):
strain = file.name.split('.')[0]
if i:
bed = pd.merge(bed, pd.read_table(file, names=('chr', 'start', 'end', strain)), on=('chr', 'start', 'end'))
else:
bed = pd.read_table(file, names=('chr', 'start', 'end', strain))
bed = bed.sort(columns=('chr', 'start', 'end'))
bed = bed
if verbose:
sys.stderr.write('Normalising and selecting CNVs...\n')
deletions = duplications = np.array([False] * len(bed))
for strain in bed.columns[3:]:
bed[strain] = bed[strain] * 1000.0 / (bed.end - bed.start)
sys.stderr.write('%s %.2f %.2f\n' % (strain, bed[strain].mean(), bed[strain].std()))
bed[strain] = np.log2(bed[strain] / bed[strain].mean())
dels = bed[strain] < bed[strain].quantile(0.0 + alpha / 2)
deletions = deletions + dels
dups = bed[strain] > bed[strain].quantile(1.0 - alpha / 2)
duplications = duplications + dups
sys.stderr.write('%s %s %s\n' % (strain, len(dels.nonzero()[0]), len(dups.nonzero()[0])))
sys.stderr.write('Saving %s deletions and %s duplications.\n' % (deletions.tolist().count(True), duplications.tolist().count(True)))
bed[duplications + deletions].to_excel(out, sheet_name='CNVs', index=0)
bed[duplications + deletions].to_csv(out + '.tsv', sep='\t', header=0, index=0)
|
bin
|
positive
|
def render_azure(jinja_env, forge_config, forge_dir, return_metadata=False):
target_path = os.path.join(forge_dir, 'azure-pipelines.yml')
template_filename = 'azure-pipelines.yml.tmpl'
fast_finish_text = ''
<DeepExtract>
platforms = []
keep_noarchs = []
archs = []
upload_packages = []
for platform_arch in forge_config['build_platform'].keys():
(platform, arch) = platform_arch.split('_')
build_platform_arch = forge_config['build_platform'][platform_arch]
(build_platform, build_arch) = build_platform_arch.split('_')
if build_arch == '64' and build_platform in forge_config['provider'] and forge_config['provider'][build_platform]:
build_platform_arch = build_platform
if build_platform_arch not in forge_config['provider']:
continue
providers = forge_config['provider'][build_platform_arch]
if 'azure' in providers:
platforms.append(platform)
archs.append(arch)
if platform_arch in forge_config['noarch_platforms']:
keep_noarchs.append(True)
else:
keep_noarchs.append(False)
upload_packages.append(forge_config.get('azure', {}).get('upload_packages', True))
elif 'azure' == 'azure' and forge_config['azure']['force'] and (arch == '64'):
platforms.append(platform)
archs.append(arch)
if platform_arch in forge_config['noarch_platforms']:
keep_noarchs.append(True)
else:
keep_noarchs.append(False)
upload_packages.append(False)
(platforms, archs, keep_noarchs, upload_packages) = (platforms, archs, keep_noarchs, upload_packages)
</DeepExtract>
logger.debug('azure platforms retreived')
remove_file_or_dir(os.path.join(forge_dir, '.azure-pipelines'))
return _render_ci_provider('azure', jinja_env=jinja_env, forge_config=forge_config, forge_dir=forge_dir, platforms=platforms, archs=archs, fast_finish_text=fast_finish_text, platform_target_path=target_path, platform_template_file=template_filename, platform_specific_setup=_azure_specific_setup, keep_noarchs=keep_noarchs, upload_packages=upload_packages, return_metadata=return_metadata)
|
def render_azure(jinja_env, forge_config, forge_dir, return_metadata=False):
target_path = os.path.join(forge_dir, 'azure-pipelines.yml')
template_filename = 'azure-pipelines.yml.tmpl'
fast_finish_text = ''
platforms = []
keep_noarchs = []
archs = []
upload_packages = []
for platform_arch in forge_config['build_platform'].keys():
(platform, arch) = platform_arch.split('_')
build_platform_arch = forge_config['build_platform'][platform_arch]
(build_platform, build_arch) = build_platform_arch.split('_')
if build_arch == '64' and build_platform in forge_config['provider'] and forge_config['provider'][build_platform]:
build_platform_arch = build_platform
if build_platform_arch not in forge_config['provider']:
continue
providers = forge_config['provider'][build_platform_arch]
if 'azure' in providers:
platforms.append(platform)
archs.append(arch)
if platform_arch in forge_config['noarch_platforms']:
keep_noarchs.append(True)
else:
keep_noarchs.append(False)
upload_packages.append(forge_config.get('azure', {}).get('upload_packages', True))
elif 'azure' == 'azure' and forge_config['azure']['force'] and (arch == '64'):
platforms.append(platform)
archs.append(arch)
if platform_arch in forge_config['noarch_platforms']:
keep_noarchs.append(True)
else:
keep_noarchs.append(False)
upload_packages.append(False)
(platforms, archs, keep_noarchs, upload_packages) = (platforms, archs, keep_noarchs, upload_packages)
logger.debug('azure platforms retreived')
remove_file_or_dir(os.path.join(forge_dir, '.azure-pipelines'))
return _render_ci_provider('azure', jinja_env=jinja_env, forge_config=forge_config, forge_dir=forge_dir, platforms=platforms, archs=archs, fast_finish_text=fast_finish_text, platform_target_path=target_path, platform_template_file=template_filename, platform_specific_setup=_azure_specific_setup, keep_noarchs=keep_noarchs, upload_packages=upload_packages, return_metadata=return_metadata)
|
conda-smithy
|
positive
|
def get_link_kwargs(self, **kwargs):
"""
:rtype: dict
"""
assert self.endpoint.state, 'link creation must come from a dispatched endpoint'
params = dict(self.link_kwargs)
params.setdefault('description', self.get_link_description())
params.update(kwargs)
if params.pop('use_request_url', False):
params['url'] = self.api_request.get_full_path()
<DeepExtract>
if self.form_kwargs:
kwargs = copy(self.form_kwargs)
else:
kwargs = dict()
kwargs.update(form_kwargs)
params['form_kwargs'] = kwargs
</DeepExtract>
return self.endpoint.get_link_kwargs(**params)
|
def get_link_kwargs(self, **kwargs):
"""
:rtype: dict
"""
assert self.endpoint.state, 'link creation must come from a dispatched endpoint'
params = dict(self.link_kwargs)
params.setdefault('description', self.get_link_description())
params.update(kwargs)
if params.pop('use_request_url', False):
params['url'] = self.api_request.get_full_path()
if self.form_kwargs:
kwargs = copy(self.form_kwargs)
else:
kwargs = dict()
kwargs.update(form_kwargs)
params['form_kwargs'] = kwargs
return self.endpoint.get_link_kwargs(**params)
|
django-hyperadmin
|
positive
|
def init(self, routine):
"""
Numerical initialization of a model.
Initialization sequence:
1. Sequential initialization based on the order of definition
2. Use Newton-Krylov method for iterative initialization
3. Custom init
"""
<DeepExtract>
for (name, instance) in self.services.items():
if name in self.calls.s:
func = self.calls.s[name]
if callable(func):
self.get_inputs(refresh=True)
instance.v = np.array(func(*self.s_args[name]), dtype=instance.vtype).ravel()
else:
instance.v = np.array(func, dtype=instance.vtype).ravel()
if isinstance(instance.v, (int, float)):
instance.v = np.ones(self.n, dtype=instance.vtype) * instance.v
elif isinstance(instance.v, np.ndarray) and len(instance.v) == 1:
instance.v = np.ones(self.n, dtype=instance.vtype) * instance.v
func = instance.v_numeric
if func is not None and callable(func):
kwargs = self.get_inputs(refresh=True)
instance.v = func(**kwargs).copy().astype(instance.vtype)
if self.flags.s_num is True:
kwargs = self.get_inputs(refresh=True)
self.s_numeric(**kwargs)
self.get_inputs(refresh=True)
</DeepExtract>
<DeepExtract>
if len(self.services_var):
kwargs = self.get_inputs()
for (name, instance) in self.services_var_seq.items():
if instance.v_str is not None:
func = self.calls.s[name]
if callable(func):
instance.v[:] = func(*self.s_args[name])
if callable(self.calls.sns):
ret = self.calls.sns(*self.sns_args)
for (idx, instance) in enumerate(self.services_var_nonseq.values()):
instance.v[:] = ret[idx]
for instance in self.services_var.values():
if instance.v_numeric is None:
continue
if callable(instance.v_numeric):
instance.v[:] = instance.v_numeric(**kwargs)
if self.flags.sv_num is True:
kwargs = self.get_inputs()
self.s_numeric_var(**kwargs)
</DeepExtract>
flag_name = routine + '_init'
if not hasattr(self.flags, flag_name) or getattr(self.flags, flag_name) is None:
do_init = getattr(self.flags, routine)
else:
do_init = getattr(self.flags, flag_name)
sys_debug = self.system.options.get('init')
logger.debug('========== %s has <%s> = %s ==========', self.class_name, flag_name, do_init)
if do_init:
<DeepExtract>
if len(self._input) == 0 or True:
self.refresh_inputs()
self.refresh_inputs_arg()
kwargs = self._input
</DeepExtract>
logger.debug('Initialization sequence:')
seq_str = ' -> '.join([str(i) for i in self.calls.init_seq])
logger.debug('\n'.join(wrap(seq_str, 70)))
logger.debug('%s: assignment initialization phase begins', self.class_name)
for (idx, name) in enumerate(self.calls.init_seq):
debug_flag = sys_debug or name in self.debug_equations
if isinstance(name, str):
<DeepExtract>
if debug_flag is True:
logger.debug(*args)
</DeepExtract>
instance = self.__dict__[name]
if instance.discrete is not None:
<DeepExtract>
if debug_flag is True:
logger.debug(*args)
</DeepExtract>
<DeepExtract>
if not isinstance(instance.discrete, (list, tuple, set)):
dlist = (instance.discrete,)
else:
dlist = instance.discrete
for d in dlist:
d.check_var(allow_adjust=self.config.allow_adjust, adjust_lower=self.config.adjust_lower, adjust_upper=self.config.adjust_upper, is_init=True)
d.check_eq(allow_adjust=self.config.allow_adjust, adjust_lower=self.config.adjust_lower, adjust_upper=self.config.adjust_upper, is_init=True)
</DeepExtract>
if instance.v_str is not None:
arg_print = OrderedDict()
if debug_flag:
for (a, b) in zip(self.calls.ia_args[name], self.ia_args[name]):
arg_print[a] = b
if not instance.v_str_add:
<DeepExtract>
if debug_flag is True:
logger.debug(*args)
</DeepExtract>
instance.v[:] = self.calls.ia[name](*self.ia_args[name])
else:
<DeepExtract>
if debug_flag is True:
logger.debug(*args)
</DeepExtract>
instance.v[:] += self.calls.ia[name](*self.ia_args[name])
arg_print[name] = instance.v
if debug_flag:
for (key, val) in arg_print.items():
if isinstance(val, (int, float, np.floating, np.integer)) or (isinstance(val, np.ndarray) and val.ndim == 0):
arg_print[key] = val * np.ones_like(instance.v)
if isinstance(val, np.ndarray) and val.dtype == complex:
arg_print[key] = [str(i) for i in val]
tab = Tab(title="v_str of %s is '%s'" % (name, instance.v_str), header=['idx', *self.calls.ia_args[name], name], data=list(zip(self.idx.v, *arg_print.values())))
<DeepExtract>
if debug_flag is True:
logger.debug(*args)
</DeepExtract>
if name in self.calls.ii:
<DeepExtract>
if debug_flag is True:
logger.debug(*args)
</DeepExtract>
<DeepExtract>
for pos in range(self.n):
logger.debug('%s: iterative init for %s, device pos = %s', self.class_name, name, pos)
self.solve_iter_single(name, kwargs, pos)
</DeepExtract>
<DeepExtract>
if debug_flag is True:
logger.debug(*args)
</DeepExtract>
else:
<DeepExtract>
if debug_flag is True:
logger.debug(*args)
</DeepExtract>
for vv in name:
instance = self.__dict__[vv]
if instance.discrete is not None:
<DeepExtract>
if debug_flag is True:
logger.debug(*args)
</DeepExtract>
<DeepExtract>
if not isinstance(instance.discrete, (list, tuple, set)):
dlist = (instance.discrete,)
else:
dlist = instance.discrete
for d in dlist:
d.check_var(allow_adjust=self.config.allow_adjust, adjust_lower=self.config.adjust_lower, adjust_upper=self.config.adjust_upper, is_init=True)
d.check_eq(allow_adjust=self.config.allow_adjust, adjust_lower=self.config.adjust_lower, adjust_upper=self.config.adjust_upper, is_init=True)
</DeepExtract>
if instance.v_str is not None:
<DeepExtract>
if debug_flag is True:
logger.debug(*args)
</DeepExtract>
arg_print = OrderedDict()
if debug_flag:
for (a, b) in zip(self.calls.ia_args[vv], self.ia_args[vv]):
arg_print[a] = b
<DeepExtract>
if debug_flag is True:
logger.debug(*args)
</DeepExtract>
instance.v[:] = self.calls.ia[vv](*self.ia_args[vv])
<DeepExtract>
if debug_flag is True:
logger.debug(*args)
</DeepExtract>
<DeepExtract>
for pos in range(self.n):
logger.debug('%s: iterative init for %s, device pos = %s', self.class_name, name, pos)
self.solve_iter_single(name, kwargs, pos)
</DeepExtract>
for vv in name:
instance = self.__dict__[vv]
<DeepExtract>
if debug_flag is True:
logger.debug(*args)
</DeepExtract>
<DeepExtract>
if len(self._input) == 0 or True:
self.refresh_inputs()
self.refresh_inputs_arg()
kwargs = self._input
</DeepExtract>
<DeepExtract>
pass
</DeepExtract>
<DeepExtract>
self.get_inputs(refresh=True)
if self.system.config.warn_abnormal:
for item in self.services_icheck.values():
item.check()
</DeepExtract>
self.flags.initialized = True
|
def init(self, routine):
"""
Numerical initialization of a model.
Initialization sequence:
1. Sequential initialization based on the order of definition
2. Use Newton-Krylov method for iterative initialization
3. Custom init
"""
for (name, instance) in self.services.items():
if name in self.calls.s:
func = self.calls.s[name]
if callable(func):
self.get_inputs(refresh=True)
instance.v = np.array(func(*self.s_args[name]), dtype=instance.vtype).ravel()
else:
instance.v = np.array(func, dtype=instance.vtype).ravel()
if isinstance(instance.v, (int, float)):
instance.v = np.ones(self.n, dtype=instance.vtype) * instance.v
elif isinstance(instance.v, np.ndarray) and len(instance.v) == 1:
instance.v = np.ones(self.n, dtype=instance.vtype) * instance.v
func = instance.v_numeric
if func is not None and callable(func):
kwargs = self.get_inputs(refresh=True)
instance.v = func(**kwargs).copy().astype(instance.vtype)
if self.flags.s_num is True:
kwargs = self.get_inputs(refresh=True)
self.s_numeric(**kwargs)
self.get_inputs(refresh=True)
if len(self.services_var):
kwargs = self.get_inputs()
for (name, instance) in self.services_var_seq.items():
if instance.v_str is not None:
func = self.calls.s[name]
if callable(func):
instance.v[:] = func(*self.s_args[name])
if callable(self.calls.sns):
ret = self.calls.sns(*self.sns_args)
for (idx, instance) in enumerate(self.services_var_nonseq.values()):
instance.v[:] = ret[idx]
for instance in self.services_var.values():
if instance.v_numeric is None:
continue
if callable(instance.v_numeric):
instance.v[:] = instance.v_numeric(**kwargs)
if self.flags.sv_num is True:
kwargs = self.get_inputs()
self.s_numeric_var(**kwargs)
flag_name = routine + '_init'
if not hasattr(self.flags, flag_name) or getattr(self.flags, flag_name) is None:
do_init = getattr(self.flags, routine)
else:
do_init = getattr(self.flags, flag_name)
sys_debug = self.system.options.get('init')
logger.debug('========== %s has <%s> = %s ==========', self.class_name, flag_name, do_init)
if do_init:
if len(self._input) == 0 or True:
self.refresh_inputs()
self.refresh_inputs_arg()
kwargs = self._input
logger.debug('Initialization sequence:')
seq_str = ' -> '.join([str(i) for i in self.calls.init_seq])
logger.debug('\n'.join(wrap(seq_str, 70)))
logger.debug('%s: assignment initialization phase begins', self.class_name)
for (idx, name) in enumerate(self.calls.init_seq):
debug_flag = sys_debug or name in self.debug_equations
if isinstance(name, str):
if debug_flag is True:
logger.debug(*args)
instance = self.__dict__[name]
if instance.discrete is not None:
if debug_flag is True:
logger.debug(*args)
if not isinstance(instance.discrete, (list, tuple, set)):
dlist = (instance.discrete,)
else:
dlist = instance.discrete
for d in dlist:
d.check_var(allow_adjust=self.config.allow_adjust, adjust_lower=self.config.adjust_lower, adjust_upper=self.config.adjust_upper, is_init=True)
d.check_eq(allow_adjust=self.config.allow_adjust, adjust_lower=self.config.adjust_lower, adjust_upper=self.config.adjust_upper, is_init=True)
if instance.v_str is not None:
arg_print = OrderedDict()
if debug_flag:
for (a, b) in zip(self.calls.ia_args[name], self.ia_args[name]):
arg_print[a] = b
if not instance.v_str_add:
if debug_flag is True:
logger.debug(*args)
instance.v[:] = self.calls.ia[name](*self.ia_args[name])
else:
if debug_flag is True:
logger.debug(*args)
instance.v[:] += self.calls.ia[name](*self.ia_args[name])
arg_print[name] = instance.v
if debug_flag:
for (key, val) in arg_print.items():
if isinstance(val, (int, float, np.floating, np.integer)) or (isinstance(val, np.ndarray) and val.ndim == 0):
arg_print[key] = val * np.ones_like(instance.v)
if isinstance(val, np.ndarray) and val.dtype == complex:
arg_print[key] = [str(i) for i in val]
tab = Tab(title="v_str of %s is '%s'" % (name, instance.v_str), header=['idx', *self.calls.ia_args[name], name], data=list(zip(self.idx.v, *arg_print.values())))
if debug_flag is True:
logger.debug(*args)
if name in self.calls.ii:
if debug_flag is True:
logger.debug(*args)
for pos in range(self.n):
logger.debug('%s: iterative init for %s, device pos = %s', self.class_name, name, pos)
self.solve_iter_single(name, kwargs, pos)
if debug_flag is True:
logger.debug(*args)
else:
if debug_flag is True:
logger.debug(*args)
for vv in name:
instance = self.__dict__[vv]
if instance.discrete is not None:
if debug_flag is True:
logger.debug(*args)
if not isinstance(instance.discrete, (list, tuple, set)):
dlist = (instance.discrete,)
else:
dlist = instance.discrete
for d in dlist:
d.check_var(allow_adjust=self.config.allow_adjust, adjust_lower=self.config.adjust_lower, adjust_upper=self.config.adjust_upper, is_init=True)
d.check_eq(allow_adjust=self.config.allow_adjust, adjust_lower=self.config.adjust_lower, adjust_upper=self.config.adjust_upper, is_init=True)
if instance.v_str is not None:
if debug_flag is True:
logger.debug(*args)
arg_print = OrderedDict()
if debug_flag:
for (a, b) in zip(self.calls.ia_args[vv], self.ia_args[vv]):
arg_print[a] = b
if debug_flag is True:
logger.debug(*args)
instance.v[:] = self.calls.ia[vv](*self.ia_args[vv])
if debug_flag is True:
logger.debug(*args)
for pos in range(self.n):
logger.debug('%s: iterative init for %s, device pos = %s', self.class_name, name, pos)
self.solve_iter_single(name, kwargs, pos)
for vv in name:
instance = self.__dict__[vv]
if debug_flag is True:
logger.debug(*args)
if len(self._input) == 0 or True:
self.refresh_inputs()
self.refresh_inputs_arg()
kwargs = self._input
pass
self.get_inputs(refresh=True)
if self.system.config.warn_abnormal:
for item in self.services_icheck.values():
item.check()
self.flags.initialized = True
|
andes
|
positive
|
def string_replace(args):
<DeepExtract>
params = {}
index = 1
for var in ('source', 'pattern', 'replace', 'count', 'plain'):
value = args.get(var)
if value is None:
value = args.get(str(index))
if value is None:
value = ''
else:
index += 1
params[var] = value
params = params
</DeepExtract>
source = params.get('source', '')
pattern = params.get('pattern', '')
replace = params.get('replace', '')
count = int(params.get('count', 0) or 0)
plain = int(params.get('plain', 1) or 1)
if plain:
if count:
return source.replace(pattern, replace, count)
else:
return source.replace(pattern, replace)
else:
return re.compile(pattern).sub(replace, source, count)
|
def string_replace(args):
params = {}
index = 1
for var in ('source', 'pattern', 'replace', 'count', 'plain'):
value = args.get(var)
if value is None:
value = args.get(str(index))
if value is None:
value = ''
else:
index += 1
params[var] = value
params = params
source = params.get('source', '')
pattern = params.get('pattern', '')
replace = params.get('replace', '')
count = int(params.get('count', 0) or 0)
plain = int(params.get('plain', 1) or 1)
if plain:
if count:
return source.replace(pattern, replace, count)
else:
return source.replace(pattern, replace)
else:
return re.compile(pattern).sub(replace, source, count)
|
DistillBERT
|
positive
|
def get_APInt(self, manager):
<DeepExtract>
if self.op == CnstFunction.abs:
(wrap, cexp) = (False, self.args[0].get_APInt(manager).dot('abs', []))
if self.op == CnstFunction.sbits:
(wrap, cexp) = (True, CFunctionCall('ComputeNumSignBits', manager.get_cexp(self.args[0])))
if self.op == CnstFunction.obits:
(wrap, cexp) = (False, CFunctionCall('computeKnownOneBits', manager.get_cexp(self.args[0]), CVariable('I')))
if self.op == CnstFunction.zbits:
(wrap, cexp) = (False, CFunctionCall('computeKnownZeroBits', manager.get_cexp(self.args[0]), CVariable('I')))
if self.op == CnstFunction.ctlz:
(wrap, cexp) = (True, self.args[0].get_APInt(manager).dot('countLeadingZeros', []))
if self.op == CnstFunction.cttz:
(wrap, cexp) = (True, self.args[0].get_APInt(manager).dot('countTrailingZeros', []))
if self.op == CnstFunction.log2:
(wrap, cexp) = (True, self.args[0].get_APInt(manager).dot('logBase2', []))
if self.op == CnstFunction.lshr:
(wrap, cexp) = (False, self.args[0].get_APInt(manager).dot('lshr', [self.args[1].get_APInt_or_u64(manager)]))
if self.op == CnstFunction.max:
(wrap, cexp) = (False, CFunctionCall('APIntOps::smax', self.args[0].get_APInt(manager), self.args[1].get_APInt(manager)))
if self.op == CnstFunction.sext:
(wrap, cexp) = (False, self.args[0].get_APInt(manager).dot('sext', [manager.get_llvm_type(self).arr('getScalarSizeInBits', [])]))
if self.op == CnstFunction.trunc:
(wrap, cexp) = (False, self.args[0].get_APInt(manager).dot('trunc', [manager.get_llvm_type(self).arr('getScalarSizeInBits', [])]))
if self.op == CnstFunction.umax:
(wrap, cexp) = (False, CFunctionCall('APIntOps::umax', self.args[0].get_APInt(manager), self.args[1].get_APInt(manager)))
if self.op == CnstFunction.width:
(wrap, cexp) = (True, manager.get_llvm_type(self.args[0]).arr('getScalarSizeInBits', []))
if self.op == CnstFunction.zext:
(wrap, cexp) = (False, self.args[0].get_APInt(manager).dot('zext', [manager.get_llvm_type(self).arr('getScalarSizeInBits', [])]))
raise AliveError(self.opnames[self.op] + ' not implemented')
</DeepExtract>
if wrap:
return CFunctionCall('APInt', manager.get_llvm_type(self).arr('getScalarSizeInBits', []), cexp)
return cexp
return self.get_Value(manager).arr('getValue', [])
|
def get_APInt(self, manager):
if self.op == CnstFunction.abs:
(wrap, cexp) = (False, self.args[0].get_APInt(manager).dot('abs', []))
if self.op == CnstFunction.sbits:
(wrap, cexp) = (True, CFunctionCall('ComputeNumSignBits', manager.get_cexp(self.args[0])))
if self.op == CnstFunction.obits:
(wrap, cexp) = (False, CFunctionCall('computeKnownOneBits', manager.get_cexp(self.args[0]), CVariable('I')))
if self.op == CnstFunction.zbits:
(wrap, cexp) = (False, CFunctionCall('computeKnownZeroBits', manager.get_cexp(self.args[0]), CVariable('I')))
if self.op == CnstFunction.ctlz:
(wrap, cexp) = (True, self.args[0].get_APInt(manager).dot('countLeadingZeros', []))
if self.op == CnstFunction.cttz:
(wrap, cexp) = (True, self.args[0].get_APInt(manager).dot('countTrailingZeros', []))
if self.op == CnstFunction.log2:
(wrap, cexp) = (True, self.args[0].get_APInt(manager).dot('logBase2', []))
if self.op == CnstFunction.lshr:
(wrap, cexp) = (False, self.args[0].get_APInt(manager).dot('lshr', [self.args[1].get_APInt_or_u64(manager)]))
if self.op == CnstFunction.max:
(wrap, cexp) = (False, CFunctionCall('APIntOps::smax', self.args[0].get_APInt(manager), self.args[1].get_APInt(manager)))
if self.op == CnstFunction.sext:
(wrap, cexp) = (False, self.args[0].get_APInt(manager).dot('sext', [manager.get_llvm_type(self).arr('getScalarSizeInBits', [])]))
if self.op == CnstFunction.trunc:
(wrap, cexp) = (False, self.args[0].get_APInt(manager).dot('trunc', [manager.get_llvm_type(self).arr('getScalarSizeInBits', [])]))
if self.op == CnstFunction.umax:
(wrap, cexp) = (False, CFunctionCall('APIntOps::umax', self.args[0].get_APInt(manager), self.args[1].get_APInt(manager)))
if self.op == CnstFunction.width:
(wrap, cexp) = (True, manager.get_llvm_type(self.args[0]).arr('getScalarSizeInBits', []))
if self.op == CnstFunction.zext:
(wrap, cexp) = (False, self.args[0].get_APInt(manager).dot('zext', [manager.get_llvm_type(self).arr('getScalarSizeInBits', [])]))
raise AliveError(self.opnames[self.op] + ' not implemented')
if wrap:
return CFunctionCall('APInt', manager.get_llvm_type(self).arr('getScalarSizeInBits', []), cexp)
return cexp
return self.get_Value(manager).arr('getValue', [])
|
alive
|
positive
|
def remove_account(user):
"""
Remove user's account:
* Mark the account as inactive.
* Remove all information from user profile.
* Assign a random e-mail.
* Set user name to Anonymous.
# TODO:
* Remove all boards?
* Remove all conversations created by the user?
"""
if hasattr(user, 'profile'):
<DeepExtract>
profile = user.profile.__class__(user=user, id=user.profile.id)
profile.save()
</DeepExtract>
email = user.email
user.is_active = False
user.is_superuser = False
user.is_staff = False
user.name = _('Anonymous')
user.save()
new_email = f'anonymous-{user.id}@deleted-account'
User.objects.filter(id=user.id).update(email=new_email)
log.info(f'{email} removed account')
|
def remove_account(user):
"""
Remove user's account:
* Mark the account as inactive.
* Remove all information from user profile.
* Assign a random e-mail.
* Set user name to Anonymous.
# TODO:
* Remove all boards?
* Remove all conversations created by the user?
"""
if hasattr(user, 'profile'):
profile = user.profile.__class__(user=user, id=user.profile.id)
profile.save()
email = user.email
user.is_active = False
user.is_superuser = False
user.is_staff = False
user.name = _('Anonymous')
user.save()
new_email = f'anonymous-{user.id}@deleted-account'
User.objects.filter(id=user.id).update(email=new_email)
log.info(f'{email} removed account')
|
ej-server
|
positive
|
def update_or_create_resource(self):
log('ModuleExecutor.update_or_create_resource()')
if not self.resource_exists():
self.module_result['changed'] = True
self.prepared_list.append('Create resource')
if not self.module.check_mode:
<DeepExtract>
log('ModuleExecutor.resource_create()')
if self.lifecycle == 'object':
self.object_create()
elif self.lifecycle == 'binding':
self.binding_create()
elif self.lifecycle == 'bindings_list':
self.bindings_list_create()
elif self.lifecycle == 'non_updateable_object':
return self.non_updateable_object_create()
elif self.lifecycle == 'object_by_args':
self.object_by_args_create()
elif self.lifecycle == 'parameter_object':
self.parameter_object_create()
</DeepExtract>
elif not self.resource_identical():
self.module_result['changed'] = True
if not self.module.check_mode:
<DeepExtract>
log('ModuleExecutor.resource_update()')
if self.lifecycle == 'object':
self.object_update()
elif self.lifecycle == 'binding':
self.binding_update()
elif self.lifecycle == 'bindings_list':
self.bindings_list_update()
elif self.lifecycle == 'non_updateable_object':
return self.non_updateable_object_update()
elif self.lifecycle == 'object_by_args':
self.object_by_args_update()
elif self.lifecycle == 'parameter_object':
self.parameter_object_update()
</DeepExtract>
else:
log('Existing resource has identical values to configured.')
|
def update_or_create_resource(self):
log('ModuleExecutor.update_or_create_resource()')
if not self.resource_exists():
self.module_result['changed'] = True
self.prepared_list.append('Create resource')
if not self.module.check_mode:
log('ModuleExecutor.resource_create()')
if self.lifecycle == 'object':
self.object_create()
elif self.lifecycle == 'binding':
self.binding_create()
elif self.lifecycle == 'bindings_list':
self.bindings_list_create()
elif self.lifecycle == 'non_updateable_object':
return self.non_updateable_object_create()
elif self.lifecycle == 'object_by_args':
self.object_by_args_create()
elif self.lifecycle == 'parameter_object':
self.parameter_object_create()
elif not self.resource_identical():
self.module_result['changed'] = True
if not self.module.check_mode:
log('ModuleExecutor.resource_update()')
if self.lifecycle == 'object':
self.object_update()
elif self.lifecycle == 'binding':
self.binding_update()
elif self.lifecycle == 'bindings_list':
self.bindings_list_update()
elif self.lifecycle == 'non_updateable_object':
return self.non_updateable_object_update()
elif self.lifecycle == 'object_by_args':
self.object_by_args_update()
elif self.lifecycle == 'parameter_object':
self.parameter_object_update()
else:
log('Existing resource has identical values to configured.')
|
citrix-adc-ansible-modules
|
positive
|
def add_token_to_travis(user, project):
"""Add the BINSTAR_TOKEN to travis."""
<DeepExtract>
try:
anaconda_token = anaconda_token
except NameError:
raise RuntimeError('You must have the anaconda token defined to do CI registrationThis requirement can be overriden by specifying `--without-anaconda-token`')
</DeepExtract>
<DeepExtract>
headers = {'User-Agent': 'Travis/1.0', 'Accept': 'application/json', 'Content-Type': 'application/json', 'Travis-API-Version': '3'}
travis_token = os.path.expanduser('~/.conda-smithy/travis.token')
try:
with open(travis_token, 'r') as fh:
token = fh.read().strip()
if not token:
raise ValueError
except (IOError, ValueError):
v2_headers = headers.copy()
v2_headers['Accept'] = 'application/vnd.travis-ci.2+json'
del v2_headers['Travis-API-Version']
url = '{}/auth/github'.format(travis_endpoint)
data = {'github_token': github.gh_token()}
response = requests.post(url, json=data, headers=v2_headers)
if response.status_code != 201:
response.raise_for_status()
token = response.json()['access_token']
with open(travis_token, 'w') as fh:
fh.write(token)
headers['Authorization'] = 'token {}'.format(token)
headers = headers
</DeepExtract>
<DeepExtract>
headers = travis_headers()
url = '{}/repo/{user}%2F{project}'.format(travis_endpoint, user=user, project=project)
response = requests.get(url, headers=headers)
try:
response.raise_for_status()
content = response.json()
repo_info = content
except requests.HTTPError as e:
if show_error or True:
print(e)
repo_info = {}
</DeepExtract>
if not repo_info:
msg = 'Unable to retrieve repo info from Travis\n(Is it down? Is the "{}/{}" name spelt correctly? [note: case sensitive])'
raise RuntimeError(msg.format(user, project))
repo_id = repo_info['id']
r = requests.get('{}/repo/{repo_id}/env_vars'.format(travis_endpoint, repo_id=repo_id), headers=headers)
if r.status_code != 200:
r.raise_for_status()
have_token = False
ev_id = None
for ev in r.json()['env_vars']:
if ev['name'] == 'BINSTAR_TOKEN':
have_token = True
ev_id = ev['id']
data = {'env_var.name': 'BINSTAR_TOKEN', 'env_var.value': anaconda_token, 'env_var.public': 'false'}
if have_token:
r = requests.patch('{}/repo/{repo_id}/env_var/{ev_id}'.format(travis_endpoint, repo_id=repo_id, ev_id=ev_id), headers=headers, json=data)
r.raise_for_status()
else:
r = requests.post('{}/repo/{repo_id}/env_vars'.format(travis_endpoint, repo_id=repo_id), headers=headers, json=data)
if r.status_code != 201:
r.raise_for_status()
|
def add_token_to_travis(user, project):
"""Add the BINSTAR_TOKEN to travis."""
try:
anaconda_token = anaconda_token
except NameError:
raise RuntimeError('You must have the anaconda token defined to do CI registrationThis requirement can be overriden by specifying `--without-anaconda-token`')
headers = {'User-Agent': 'Travis/1.0', 'Accept': 'application/json', 'Content-Type': 'application/json', 'Travis-API-Version': '3'}
travis_token = os.path.expanduser('~/.conda-smithy/travis.token')
try:
with open(travis_token, 'r') as fh:
token = fh.read().strip()
if not token:
raise ValueError
except (IOError, ValueError):
v2_headers = headers.copy()
v2_headers['Accept'] = 'application/vnd.travis-ci.2+json'
del v2_headers['Travis-API-Version']
url = '{}/auth/github'.format(travis_endpoint)
data = {'github_token': github.gh_token()}
response = requests.post(url, json=data, headers=v2_headers)
if response.status_code != 201:
response.raise_for_status()
token = response.json()['access_token']
with open(travis_token, 'w') as fh:
fh.write(token)
headers['Authorization'] = 'token {}'.format(token)
headers = headers
headers = travis_headers()
url = '{}/repo/{user}%2F{project}'.format(travis_endpoint, user=user, project=project)
response = requests.get(url, headers=headers)
try:
response.raise_for_status()
content = response.json()
repo_info = content
except requests.HTTPError as e:
if show_error or True:
print(e)
repo_info = {}
if not repo_info:
msg = 'Unable to retrieve repo info from Travis\n(Is it down? Is the "{}/{}" name spelt correctly? [note: case sensitive])'
raise RuntimeError(msg.format(user, project))
repo_id = repo_info['id']
r = requests.get('{}/repo/{repo_id}/env_vars'.format(travis_endpoint, repo_id=repo_id), headers=headers)
if r.status_code != 200:
r.raise_for_status()
have_token = False
ev_id = None
for ev in r.json()['env_vars']:
if ev['name'] == 'BINSTAR_TOKEN':
have_token = True
ev_id = ev['id']
data = {'env_var.name': 'BINSTAR_TOKEN', 'env_var.value': anaconda_token, 'env_var.public': 'false'}
if have_token:
r = requests.patch('{}/repo/{repo_id}/env_var/{ev_id}'.format(travis_endpoint, repo_id=repo_id, ev_id=ev_id), headers=headers, json=data)
r.raise_for_status()
else:
r = requests.post('{}/repo/{repo_id}/env_vars'.format(travis_endpoint, repo_id=repo_id), headers=headers, json=data)
if r.status_code != 201:
r.raise_for_status()
|
conda-smithy
|
positive
|
def _check_cloudwatch_alarm_states(self):
return_result_list = []
tmp = None
for metric in self.metrics:
<DeepExtract>
alarms_response = self.cloudwatch_client.describe_alarms_for_metric(MetricName=metric.metric_name, Namespace=self.git_metric_namespace, Dimensions=metric.metric_dimensions)
return_result = [True, None, metric.metric_alarm_severity]
for metric_alarm_dict in alarms_response['MetricAlarms']:
if metric_alarm_dict['StateValue'] == 'ALARM':
return_result[0] = False
return_result[1] = metric_alarm_dict['AlarmName']
break
tmp = return_result
</DeepExtract>
if tmp[1] != None:
if 'Alive_Alarm' in tmp[1] == False:
if tmp[0] != True:
return_result_list.append(tmp)
return return_result_list
|
def _check_cloudwatch_alarm_states(self):
return_result_list = []
tmp = None
for metric in self.metrics:
alarms_response = self.cloudwatch_client.describe_alarms_for_metric(MetricName=metric.metric_name, Namespace=self.git_metric_namespace, Dimensions=metric.metric_dimensions)
return_result = [True, None, metric.metric_alarm_severity]
for metric_alarm_dict in alarms_response['MetricAlarms']:
if metric_alarm_dict['StateValue'] == 'ALARM':
return_result[0] = False
return_result[1] = metric_alarm_dict['AlarmName']
break
tmp = return_result
if tmp[1] != None:
if 'Alive_Alarm' in tmp[1] == False:
if tmp[0] != True:
return_result_list.append(tmp)
return return_result_list
|
aws-crt-python
|
positive
|
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
self.body[key] = kwargs[key]
self.api_id = kwargs['api_id']
self.body['user_id'] = kwargs['user_id']
self.inflate_parameters(self.module_arg_spec, self.body, 0)
old_response = None
response = None
self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, base_url=self._cloud_environment.endpoints.resource_manager)
resource_group = self.get_resource_group(self.resource_group)
self.url = '/subscriptions' + '/{{ subscription_id }}' + '/resourceGroups' + '/{{ resource_group }}' + '/providers' + '/Microsoft.ApiManagement' + '/service' + '/{{ service_name }}' + '/apis' + '/{{ api_name }}' + '/issues' + '/{{ issue_name }}'
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
self.url = self.url.replace('{{ service_name }}', self.service_name)
self.url = self.url.replace('{{ api_name }}', self.api_id)
self.url = self.url.replace('{{ issue_name }}', self.issue_id)
<DeepExtract>
found = False
try:
response = self.mgmt_client.query(self.url, 'GET', self.query_parameters, self.header_parameters, None, self.status_code, 600, 30)
found = True
self.log('Response : {0}'.format(response))
except CloudError as e:
self.log('Did not find the ApiIssue instance.')
if found is True:
old_response = response
old_response = False
</DeepExtract>
if not old_response:
self.log("ApiIssue instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log('ApiIssue instance already exists')
if self.state == 'absent':
self.to_do = Actions.Delete
else:
modifiers = {}
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
self.results['modifiers'] = modifiers
self.results['compare'] = []
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
if not self.default_compare(modifiers, self.body, old_response, '', self.results):
self.to_do = Actions.Update
if self.to_do == Actions.Create or self.to_do == Actions.Update:
self.log('Need to Create / Update the ApiIssue instance')
if self.check_mode:
self.results['changed'] = True
return self.results
<DeepExtract>
try:
response = self.mgmt_client.query(self.url, 'PUT', self.query_parameters, self.header_parameters, self.body, self.status_code, 600, 30)
except CloudError as exc:
self.log('Error attempting to create the ApiIssue instance.')
self.fail('Error creating the ApiIssue instance: {0}'.format(str(exc)))
try:
response = json.loads(response.text)
except Exception:
response = {'text': response.text}
pass
response = response
</DeepExtract>
self.results['changed'] = True
self.log('Creation / Update done')
elif self.to_do == Actions.Delete:
self.log('ApiIssue instance deleted')
self.results['changed'] = True
if self.check_mode:
return self.results
<DeepExtract>
try:
response = self.mgmt_client.query(self.url, 'DELETE', self.query_parameters, self.header_parameters, None, self.status_code, 600, 30)
except CloudError as e:
self.log('Error attempting to delete the ApiIssue instance.')
self.fail('Error deleting the ApiIssue instance: {0}'.format(str(e)))
return True
</DeepExtract>
while self.get_resource():
time.sleep(20)
else:
self.log('ApiIssue instance unchanged')
self.results['changed'] = False
response = old_response
if response:
self.results['id'] = response['id']
self.results['name'] = response['name']
self.results['type'] = response['type']
self.results['properties'] = response['properties']
return self.results
|
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
self.body[key] = kwargs[key]
self.api_id = kwargs['api_id']
self.body['user_id'] = kwargs['user_id']
self.inflate_parameters(self.module_arg_spec, self.body, 0)
old_response = None
response = None
self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, base_url=self._cloud_environment.endpoints.resource_manager)
resource_group = self.get_resource_group(self.resource_group)
self.url = '/subscriptions' + '/{{ subscription_id }}' + '/resourceGroups' + '/{{ resource_group }}' + '/providers' + '/Microsoft.ApiManagement' + '/service' + '/{{ service_name }}' + '/apis' + '/{{ api_name }}' + '/issues' + '/{{ issue_name }}'
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
self.url = self.url.replace('{{ service_name }}', self.service_name)
self.url = self.url.replace('{{ api_name }}', self.api_id)
self.url = self.url.replace('{{ issue_name }}', self.issue_id)
found = False
try:
response = self.mgmt_client.query(self.url, 'GET', self.query_parameters, self.header_parameters, None, self.status_code, 600, 30)
found = True
self.log('Response : {0}'.format(response))
except CloudError as e:
self.log('Did not find the ApiIssue instance.')
if found is True:
old_response = response
old_response = False
if not old_response:
self.log("ApiIssue instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log('ApiIssue instance already exists')
if self.state == 'absent':
self.to_do = Actions.Delete
else:
modifiers = {}
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
self.results['modifiers'] = modifiers
self.results['compare'] = []
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
if not self.default_compare(modifiers, self.body, old_response, '', self.results):
self.to_do = Actions.Update
if self.to_do == Actions.Create or self.to_do == Actions.Update:
self.log('Need to Create / Update the ApiIssue instance')
if self.check_mode:
self.results['changed'] = True
return self.results
try:
response = self.mgmt_client.query(self.url, 'PUT', self.query_parameters, self.header_parameters, self.body, self.status_code, 600, 30)
except CloudError as exc:
self.log('Error attempting to create the ApiIssue instance.')
self.fail('Error creating the ApiIssue instance: {0}'.format(str(exc)))
try:
response = json.loads(response.text)
except Exception:
response = {'text': response.text}
pass
response = response
self.results['changed'] = True
self.log('Creation / Update done')
elif self.to_do == Actions.Delete:
self.log('ApiIssue instance deleted')
self.results['changed'] = True
if self.check_mode:
return self.results
try:
response = self.mgmt_client.query(self.url, 'DELETE', self.query_parameters, self.header_parameters, None, self.status_code, 600, 30)
except CloudError as e:
self.log('Error attempting to delete the ApiIssue instance.')
self.fail('Error deleting the ApiIssue instance: {0}'.format(str(e)))
return True
while self.get_resource():
time.sleep(20)
else:
self.log('ApiIssue instance unchanged')
self.results['changed'] = False
response = old_response
if response:
self.results['id'] = response['id']
self.results['name'] = response['name']
self.results['type'] = response['type']
self.results['properties'] = response['properties']
return self.results
|
AnsibleLabs
|
positive
|
def __init__(self):
"""Construct."""
self.pool = {}
self.view = OrderedDict()
self.library = OrderedDict()
self.edit = False
self.lock = Lock()
<DeepExtract>
try:
self._load()
except Exception:
print('Failed to load session!')
backup(profile('*.json'))
raise
self.cleanup()
</DeepExtract>
thread = current()
mission_ch.sub(thread)
@thread.listen('MISSION_PROPERTY_CHANGED')
def _(event):
"""Set the edit flag after mission changed."""
self.edit = True
|
def __init__(self):
"""Construct."""
self.pool = {}
self.view = OrderedDict()
self.library = OrderedDict()
self.edit = False
self.lock = Lock()
try:
self._load()
except Exception:
print('Failed to load session!')
backup(profile('*.json'))
raise
self.cleanup()
thread = current()
mission_ch.sub(thread)
@thread.listen('MISSION_PROPERTY_CHANGED')
def _(event):
"""Set the edit flag after mission changed."""
self.edit = True
|
ComicCrawler
|
positive
|
def project(self, fields):
all_fields = set(self.config.keys())
assert all_fields.issuperset(set(fields)), 'supplied fields are not subset of config fields'
self._df = self._df[fields]
config_pairs = [(key, value) for (key, value) in self.config.items() if key in fields]
self.config = OrderedDict(config_pairs)
<DeepExtract>
for column in self._df.columns:
if column in self.config and 'value_map' in self.config[column]:
for (source, target) in list(self.config[column]['value_map'].items()):
self._df.replace({column: {source: target}}, inplace=True)
for column in self.config:
if self.config[column]['domain'] == 'active':
self.config[column]['domain'] = (self._df[column].min(), self._df[column].max())
return self
</DeepExtract>
return self
|
def project(self, fields):
all_fields = set(self.config.keys())
assert all_fields.issuperset(set(fields)), 'supplied fields are not subset of config fields'
self._df = self._df[fields]
config_pairs = [(key, value) for (key, value) in self.config.items() if key in fields]
self.config = OrderedDict(config_pairs)
for column in self._df.columns:
if column in self.config and 'value_map' in self.config[column]:
for (source, target) in list(self.config[column]['value_map'].items()):
self._df.replace({column: {source: target}}, inplace=True)
for column in self.config:
if self.config[column]['domain'] == 'active':
self.config[column]['domain'] = (self._df[column].min(), self._df[column].max())
return self
return self
|
ektelo
|
positive
|
def testFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
<DeepExtract>
if None in [2, 321, 321, 3]:
inputs = tf.placeholder(tf.float32, (2, 321, 321, 3))
else:
inputs = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(321), [321, 1]) + np.reshape(np.arange(321), [1, 321]), [1, 321, 321, 1]), [2, 1, 1, 3]))
</DeepExtract>
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
<DeepExtract>
block = resnet_v1.resnet_v1_block
blocks = [block('block1', base_depth=1, num_units=3, stride=2), block('block2', base_depth=2, num_units=3, stride=2), block('block3', base_depth=4, num_units=3, stride=2), block('block4', base_depth=8, num_units=2, stride=1)]
(_, end_points) = resnet_v1.resnet_v1(inputs, blocks, num_classes, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=include_root_block, spatial_squeeze=False, reuse=reuse, scope='resnet')
</DeepExtract>
endpoint_to_shape = {'resnet/block1': [2, 41, 41, 4], 'resnet/block2': [2, 21, 21, 8], 'resnet/block3': [2, 11, 11, 16], 'resnet/block4': [2, 11, 11, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
|
def testFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
if None in [2, 321, 321, 3]:
inputs = tf.placeholder(tf.float32, (2, 321, 321, 3))
else:
inputs = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(321), [321, 1]) + np.reshape(np.arange(321), [1, 321]), [1, 321, 321, 1]), [2, 1, 1, 3]))
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
block = resnet_v1.resnet_v1_block
blocks = [block('block1', base_depth=1, num_units=3, stride=2), block('block2', base_depth=2, num_units=3, stride=2), block('block3', base_depth=4, num_units=3, stride=2), block('block4', base_depth=8, num_units=2, stride=1)]
(_, end_points) = resnet_v1.resnet_v1(inputs, blocks, num_classes, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=include_root_block, spatial_squeeze=False, reuse=reuse, scope='resnet')
endpoint_to_shape = {'resnet/block1': [2, 41, 41, 4], 'resnet/block2': [2, 21, 21, 8], 'resnet/block3': [2, 11, 11, 16], 'resnet/block4': [2, 11, 11, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
|
edafa
|
positive
|
def main(self, args):
if args.subparser_name == self.MODULE_AB:
output_path = args.output_prefix + self.module_ab_output_suffix
kmg = KeggModuleGrabber()
if args.modules or args.module_list_file:
module_to_def = {}
needed_kos = set()
if args.modules:
for module in args.modules:
module_to_def[module] = kmg.m2def[module]
for ko in self._extract_kos(kmg.m2def[module]):
needed_kos.add(ko)
elif args.module_list_file:
for module in [x.strip() for x in open(args.module_list_file)]:
module_to_def[module] = kmg.m2def[module]
for ko in self._extract_kos(kmg.m2def[module]):
needed_kos.add(ko)
else:
module_to_def = kmg.m2def
needed_kos = False
<DeepExtract>
logging.info('Loading KO matrix: %s' % args.matrix)
ko_matrix_object = Matrix(args.matrix)
mkg = KeggModuleGrabber()
output_lines = ['\t'.join(['Module', 'Sample', 'Mean', 'SD', 'SEM'])]
needed_kos = needed_kos if needed_kos else ko_matrix_object.rownames
logging.info('Generating list of possible KOs for each sample')
sample_to_possible_kos = {sample: set() for sample in ko_matrix_object.colnames}
for ko in needed_kos:
for sample in ko_matrix_object.colnames:
if float(ko_matrix_object.get_entry(sample, ko)) >= 0.0:
sample_to_possible_kos[sample].add(ko)
for (module, definition) in module_to_def.items():
module_is_possible = False
output_line_base = [module]
sample_batches = []
definition_object = ModuleDescription(definition)
for sample in ko_matrix_object.colnames:
ko_values = []
possible_kos_in_sample = set()
for ko in needed_kos:
if float(ko_matrix_object.get_entry(sample, ko)) >= 0.0:
possible_kos_in_sample.add(ko)
if definition_object.num_steps() == definition_object.num_covered_steps(list(sample_to_possible_kos[sample])):
module_is_possible = True
for ko in definition_object.kos():
if ko in needed_kos:
ko_values.append(float(ko_matrix_object.get_entry(sample, ko)))
ko_values_array = np.array(ko_values)
mu = ko_values_array.mean()
sd = ko_values_array.std()
se = sd / math.sqrt(len(ko_values))
sample_batches.append([sample, str(mu), str(sd), str(se)])
module_is_possible = True
else:
sample_batches.append([sample, '0.0', '0.0', '0.0'])
if module_is_possible:
for batch in sample_batches:
line = output_line_base + batch
output_lines.append('\t'.join(line))
output_lines = output_lines
</DeepExtract>
logging.info('Writing output to file: %s' % output_path)
with open(output_path, 'w') as out_io:
out_io.write('\n'.join(output_lines) + '\n')
|
def main(self, args):
if args.subparser_name == self.MODULE_AB:
output_path = args.output_prefix + self.module_ab_output_suffix
kmg = KeggModuleGrabber()
if args.modules or args.module_list_file:
module_to_def = {}
needed_kos = set()
if args.modules:
for module in args.modules:
module_to_def[module] = kmg.m2def[module]
for ko in self._extract_kos(kmg.m2def[module]):
needed_kos.add(ko)
elif args.module_list_file:
for module in [x.strip() for x in open(args.module_list_file)]:
module_to_def[module] = kmg.m2def[module]
for ko in self._extract_kos(kmg.m2def[module]):
needed_kos.add(ko)
else:
module_to_def = kmg.m2def
needed_kos = False
logging.info('Loading KO matrix: %s' % args.matrix)
ko_matrix_object = Matrix(args.matrix)
mkg = KeggModuleGrabber()
output_lines = ['\t'.join(['Module', 'Sample', 'Mean', 'SD', 'SEM'])]
needed_kos = needed_kos if needed_kos else ko_matrix_object.rownames
logging.info('Generating list of possible KOs for each sample')
sample_to_possible_kos = {sample: set() for sample in ko_matrix_object.colnames}
for ko in needed_kos:
for sample in ko_matrix_object.colnames:
if float(ko_matrix_object.get_entry(sample, ko)) >= 0.0:
sample_to_possible_kos[sample].add(ko)
for (module, definition) in module_to_def.items():
module_is_possible = False
output_line_base = [module]
sample_batches = []
definition_object = ModuleDescription(definition)
for sample in ko_matrix_object.colnames:
ko_values = []
possible_kos_in_sample = set()
for ko in needed_kos:
if float(ko_matrix_object.get_entry(sample, ko)) >= 0.0:
possible_kos_in_sample.add(ko)
if definition_object.num_steps() == definition_object.num_covered_steps(list(sample_to_possible_kos[sample])):
module_is_possible = True
for ko in definition_object.kos():
if ko in needed_kos:
ko_values.append(float(ko_matrix_object.get_entry(sample, ko)))
ko_values_array = np.array(ko_values)
mu = ko_values_array.mean()
sd = ko_values_array.std()
se = sd / math.sqrt(len(ko_values))
sample_batches.append([sample, str(mu), str(sd), str(se)])
module_is_possible = True
else:
sample_batches.append([sample, '0.0', '0.0', '0.0'])
if module_is_possible:
for batch in sample_batches:
line = output_line_base + batch
output_lines.append('\t'.join(line))
output_lines = output_lines
logging.info('Writing output to file: %s' % output_path)
with open(output_path, 'w') as out_io:
out_io.write('\n'.join(output_lines) + '\n')
|
enrichM
|
positive
|
def command(self, cmd, args=None, decomposeParDict=None, include_header=True):
"""Get command line for an OpenFOAM command in parallel or serial.
Args:
cmd: An OpenFOAM command.
args: List of optional arguments for command. e.g. ('c', 'latestTime')
decomposeParDict: decomposeParDict for parallel runs (default: None).
include_header: Include header lines to set up the environment
(default: True).
Returns:
(cmd, logfiles, errorfiles)
"""
if isinstance(cmd, str):
return self.__command(cmd, args, decomposeParDict, include_header)
elif isinstance(cmd, (list, tuple)):
res = namedtuple('log', 'cmd logfiles errorfiles')
logs = list(range(len(cmd)))
for (count, c) in enumerate(cmd):
if count > 0:
include_header = False
if c == 'blockMesh':
decomposeParDict = None
try:
arg = args[count]
except TypeError:
arg = args
<DeepExtract>
res = namedtuple('log', 'cmd logfiles errorfiles')
arguments = '' if not (arg,) else '{}'.format(' '.join((arg,)))
if decomposeParDict:
n = decomposeParDict.numberOfSubdomains
arguments = arguments + ' -parallel'
if c == 'snappyHexMesh':
cmd_list = ('decomposePar', 'mpirun -np %s %s' % (n, c), 'reconstructParMesh', 'rm')
arg_list = ('', arguments, '-constant', '-r proc*')
cmd_name_list = ('decomposePar', c, 'reconstructParMesh', 'rm')
else:
cmd_list = ('decomposePar', 'mpirun -np %s %s' % (n, c), 'reconstructPar', 'rm')
arg_list = ('', arguments, '', '-r proc*')
cmd_name_list = ('decomposePar', c, 'reconstructPar', 'rm')
cmds = tuple((' '.join((c, arg)) for (c, arg) in zip(cmd_list, arg_list)))
errfiles = tuple(('{}/{}.err'.format(self.errFolder, name) for name in cmd_name_list))
logfiles = tuple(('{}/{}.log'.format(self.log_folder, name) for name in cmd_name_list))
else:
cmds = (c,)
errfiles = ('{}/{}.err'.format(self.errFolder, c),)
logfiles = ('{}/{}.log'.format(self.log_folder, c),)
logs[count] = res(cmds, logfiles, errfiles)
</DeepExtract>
command = tuple((log.cmd for log in logs))
logfiles = tuple((ff for log in logs for ff in log.logfiles))
errorfiles = tuple((ff for log in logs for ff in log.errorfiles))
return res(command, logfiles, errorfiles)
|
def command(self, cmd, args=None, decomposeParDict=None, include_header=True):
"""Get command line for an OpenFOAM command in parallel or serial.
Args:
cmd: An OpenFOAM command.
args: List of optional arguments for command. e.g. ('c', 'latestTime')
decomposeParDict: decomposeParDict for parallel runs (default: None).
include_header: Include header lines to set up the environment
(default: True).
Returns:
(cmd, logfiles, errorfiles)
"""
if isinstance(cmd, str):
return self.__command(cmd, args, decomposeParDict, include_header)
elif isinstance(cmd, (list, tuple)):
res = namedtuple('log', 'cmd logfiles errorfiles')
logs = list(range(len(cmd)))
for (count, c) in enumerate(cmd):
if count > 0:
include_header = False
if c == 'blockMesh':
decomposeParDict = None
try:
arg = args[count]
except TypeError:
arg = args
res = namedtuple('log', 'cmd logfiles errorfiles')
arguments = '' if not (arg,) else '{}'.format(' '.join((arg,)))
if decomposeParDict:
n = decomposeParDict.numberOfSubdomains
arguments = arguments + ' -parallel'
if c == 'snappyHexMesh':
cmd_list = ('decomposePar', 'mpirun -np %s %s' % (n, c), 'reconstructParMesh', 'rm')
arg_list = ('', arguments, '-constant', '-r proc*')
cmd_name_list = ('decomposePar', c, 'reconstructParMesh', 'rm')
else:
cmd_list = ('decomposePar', 'mpirun -np %s %s' % (n, c), 'reconstructPar', 'rm')
arg_list = ('', arguments, '', '-r proc*')
cmd_name_list = ('decomposePar', c, 'reconstructPar', 'rm')
cmds = tuple((' '.join((c, arg)) for (c, arg) in zip(cmd_list, arg_list)))
errfiles = tuple(('{}/{}.err'.format(self.errFolder, name) for name in cmd_name_list))
logfiles = tuple(('{}/{}.log'.format(self.log_folder, name) for name in cmd_name_list))
else:
cmds = (c,)
errfiles = ('{}/{}.err'.format(self.errFolder, c),)
logfiles = ('{}/{}.log'.format(self.log_folder, c),)
logs[count] = res(cmds, logfiles, errfiles)
command = tuple((log.cmd for log in logs))
logfiles = tuple((ff for log in logs for ff in log.logfiles))
errorfiles = tuple((ff for log in logs for ff in log.errorfiles))
return res(command, logfiles, errorfiles)
|
butterfly
|
positive
|
def validate_ommers(ommers: Tuple[Header, ...], block_header: Header, chain: BlockChain) -> None:
"""
Validates the ommers mentioned in the block.
An ommer block is a block that wasn't canonically added to the
blockchain because it wasn't validated as fast as the canonical block
but was mined at the same time.
To be considered valid, the ommers must adhere to the rules defined in
the Ethereum protocol. The maximum amount of ommers is 2 per block and
there cannot be duplicate ommers in a block. Many of the other ommer
contraints are listed in the in-line comments of this function.
Parameters
----------
ommers :
List of ommers mentioned in the current block.
block_header:
The header of current block.
chain :
History and current state.
"""
block_hash = rlp.rlp_hash(block_header)
ensure(rlp.rlp_hash(ommers) == block_header.ommers_hash, InvalidBlock)
if len(ommers) == 0:
return
for ommer in ommers:
ensure(1 <= ommer.number < block_header.number, InvalidBlock)
ommer_parent_header = chain.blocks[-(block_header.number - ommer.number) - 1].header
<DeepExtract>
if ommer.number == MAINNET_FORK_BLOCK:
parent_gas_limit = ommer_parent_header.gas_limit * ELASTICITY_MULTIPLIER
parent_gas_target = ommer_parent_header.gas_limit
else:
parent_gas_limit = ommer_parent_header.gas_limit
parent_gas_target = ommer_parent_header.gas_limit // ELASTICITY_MULTIPLIER
parent_base_fee_per_gas = ommer_parent_header.base_fee_per_gas
parent_gas_used = ommer_parent_header.gas_used
ensure(ommer.gas_used <= ommer.gas_limit, InvalidBlock)
ensure(check_gas_limit(ommer.gas_limit, parent_gas_limit), InvalidBlock)
if ommer.number == MAINNET_FORK_BLOCK:
expected_base_fee_per_gas = INITIAL_BASE_FEE
elif parent_gas_used == parent_gas_target:
expected_base_fee_per_gas = parent_base_fee_per_gas
elif parent_gas_used > parent_gas_target:
gas_used_delta = parent_gas_used - parent_gas_target
parent_fee_gas_delta = parent_base_fee_per_gas * gas_used_delta
target_fee_gas_delta = parent_fee_gas_delta // parent_gas_target
base_fee_per_gas_delta = max(target_fee_gas_delta // BASE_FEE_MAX_CHANGE_DENOMINATOR, 1)
expected_base_fee_per_gas = parent_base_fee_per_gas + base_fee_per_gas_delta
else:
gas_used_delta = parent_gas_target - parent_gas_used
parent_fee_gas_delta = parent_base_fee_per_gas * gas_used_delta
target_fee_gas_delta = parent_fee_gas_delta // parent_gas_target
base_fee_per_gas_delta = target_fee_gas_delta // BASE_FEE_MAX_CHANGE_DENOMINATOR
expected_base_fee_per_gas = parent_base_fee_per_gas - base_fee_per_gas_delta
ensure(expected_base_fee_per_gas == ommer.base_fee_per_gas, InvalidBlock)
parent_has_ommers = ommer_parent_header.ommers_hash != EMPTY_OMMER_HASH
ensure(ommer.timestamp > ommer_parent_header.timestamp, InvalidBlock)
ensure(ommer.number == ommer_parent_header.number + 1, InvalidBlock)
ensure(len(ommer.extra_data) <= 32, InvalidBlock)
block_difficulty = calculate_block_difficulty(ommer.number, ommer.timestamp, ommer_parent_header.timestamp, ommer_parent_header.difficulty, parent_has_ommers)
ensure(ommer.difficulty == block_difficulty, InvalidBlock)
block_parent_hash = keccak256(rlp.encode(ommer_parent_header))
ensure(ommer.parent_hash == block_parent_hash, InvalidBlock)
validate_proof_of_work(ommer)
</DeepExtract>
ensure(len(ommers) <= 2, InvalidBlock)
ommers_hashes = [rlp.rlp_hash(ommer) for ommer in ommers]
ensure(len(ommers_hashes) == len(set(ommers_hashes)), InvalidBlock)
recent_canonical_blocks = chain.blocks[-(MAX_OMMER_DEPTH + 1):]
recent_canonical_block_hashes = {rlp.rlp_hash(block.header) for block in recent_canonical_blocks}
recent_ommers_hashes: Set[Hash32] = set()
for block in recent_canonical_blocks:
recent_ommers_hashes = recent_ommers_hashes.union({rlp.rlp_hash(ommer) for ommer in block.ommers})
for (ommer_index, ommer) in enumerate(ommers):
ommer_hash = ommers_hashes[ommer_index]
ensure(ommer_hash != block_hash, InvalidBlock)
ensure(ommer_hash not in recent_canonical_block_hashes, InvalidBlock)
ensure(ommer_hash not in recent_ommers_hashes, InvalidBlock)
ommer_age = block_header.number - ommer.number
ensure(1 <= ommer_age <= MAX_OMMER_DEPTH, InvalidBlock)
ensure(ommer.parent_hash in recent_canonical_block_hashes, InvalidBlock)
ensure(ommer.parent_hash != block_header.parent_hash, InvalidBlock)
|
def validate_ommers(ommers: Tuple[Header, ...], block_header: Header, chain: BlockChain) -> None:
"""
Validates the ommers mentioned in the block.
An ommer block is a block that wasn't canonically added to the
blockchain because it wasn't validated as fast as the canonical block
but was mined at the same time.
To be considered valid, the ommers must adhere to the rules defined in
the Ethereum protocol. The maximum amount of ommers is 2 per block and
there cannot be duplicate ommers in a block. Many of the other ommer
contraints are listed in the in-line comments of this function.
Parameters
----------
ommers :
List of ommers mentioned in the current block.
block_header:
The header of current block.
chain :
History and current state.
"""
block_hash = rlp.rlp_hash(block_header)
ensure(rlp.rlp_hash(ommers) == block_header.ommers_hash, InvalidBlock)
if len(ommers) == 0:
return
for ommer in ommers:
ensure(1 <= ommer.number < block_header.number, InvalidBlock)
ommer_parent_header = chain.blocks[-(block_header.number - ommer.number) - 1].header
if ommer.number == MAINNET_FORK_BLOCK:
parent_gas_limit = ommer_parent_header.gas_limit * ELASTICITY_MULTIPLIER
parent_gas_target = ommer_parent_header.gas_limit
else:
parent_gas_limit = ommer_parent_header.gas_limit
parent_gas_target = ommer_parent_header.gas_limit // ELASTICITY_MULTIPLIER
parent_base_fee_per_gas = ommer_parent_header.base_fee_per_gas
parent_gas_used = ommer_parent_header.gas_used
ensure(ommer.gas_used <= ommer.gas_limit, InvalidBlock)
ensure(check_gas_limit(ommer.gas_limit, parent_gas_limit), InvalidBlock)
if ommer.number == MAINNET_FORK_BLOCK:
expected_base_fee_per_gas = INITIAL_BASE_FEE
elif parent_gas_used == parent_gas_target:
expected_base_fee_per_gas = parent_base_fee_per_gas
elif parent_gas_used > parent_gas_target:
gas_used_delta = parent_gas_used - parent_gas_target
parent_fee_gas_delta = parent_base_fee_per_gas * gas_used_delta
target_fee_gas_delta = parent_fee_gas_delta // parent_gas_target
base_fee_per_gas_delta = max(target_fee_gas_delta // BASE_FEE_MAX_CHANGE_DENOMINATOR, 1)
expected_base_fee_per_gas = parent_base_fee_per_gas + base_fee_per_gas_delta
else:
gas_used_delta = parent_gas_target - parent_gas_used
parent_fee_gas_delta = parent_base_fee_per_gas * gas_used_delta
target_fee_gas_delta = parent_fee_gas_delta // parent_gas_target
base_fee_per_gas_delta = target_fee_gas_delta // BASE_FEE_MAX_CHANGE_DENOMINATOR
expected_base_fee_per_gas = parent_base_fee_per_gas - base_fee_per_gas_delta
ensure(expected_base_fee_per_gas == ommer.base_fee_per_gas, InvalidBlock)
parent_has_ommers = ommer_parent_header.ommers_hash != EMPTY_OMMER_HASH
ensure(ommer.timestamp > ommer_parent_header.timestamp, InvalidBlock)
ensure(ommer.number == ommer_parent_header.number + 1, InvalidBlock)
ensure(len(ommer.extra_data) <= 32, InvalidBlock)
block_difficulty = calculate_block_difficulty(ommer.number, ommer.timestamp, ommer_parent_header.timestamp, ommer_parent_header.difficulty, parent_has_ommers)
ensure(ommer.difficulty == block_difficulty, InvalidBlock)
block_parent_hash = keccak256(rlp.encode(ommer_parent_header))
ensure(ommer.parent_hash == block_parent_hash, InvalidBlock)
validate_proof_of_work(ommer)
ensure(len(ommers) <= 2, InvalidBlock)
ommers_hashes = [rlp.rlp_hash(ommer) for ommer in ommers]
ensure(len(ommers_hashes) == len(set(ommers_hashes)), InvalidBlock)
recent_canonical_blocks = chain.blocks[-(MAX_OMMER_DEPTH + 1):]
recent_canonical_block_hashes = {rlp.rlp_hash(block.header) for block in recent_canonical_blocks}
recent_ommers_hashes: Set[Hash32] = set()
for block in recent_canonical_blocks:
recent_ommers_hashes = recent_ommers_hashes.union({rlp.rlp_hash(ommer) for ommer in block.ommers})
for (ommer_index, ommer) in enumerate(ommers):
ommer_hash = ommers_hashes[ommer_index]
ensure(ommer_hash != block_hash, InvalidBlock)
ensure(ommer_hash not in recent_canonical_block_hashes, InvalidBlock)
ensure(ommer_hash not in recent_ommers_hashes, InvalidBlock)
ommer_age = block_header.number - ommer.number
ensure(1 <= ommer_age <= MAX_OMMER_DEPTH, InvalidBlock)
ensure(ommer.parent_hash in recent_canonical_block_hashes, InvalidBlock)
ensure(ommer.parent_hash != block_header.parent_hash, InvalidBlock)
|
eth1.0-specs
|
positive
|
def __get_sub_handle_by_name(self, name):
try:
return self._sub_handles[name]
except KeyError:
pass
if name in self._invalid_sub_handles:
return None
new_handle = self._handle.get_handle_by_name(name)
if not new_handle:
self._invalid_sub_handles.add(name)
return None
<DeepExtract>
_type2cls = {simulator.MODULE: HierarchyObject, simulator.STRUCTURE: HierarchyObject, simulator.REG: ModifiableObject, simulator.NET: ModifiableObject, simulator.NETARRAY: NonHierarchyIndexableObject, simulator.REAL: RealObject, simulator.INTEGER: IntegerObject, simulator.ENUM: EnumObject, simulator.STRING: StringObject, simulator.GENARRAY: HierarchyArrayObject}
global _handle2obj
try:
sub_handle = _handle2obj[new_handle]
except KeyError:
pass
t = new_handle.get_type()
if new_handle.get_const() and t not in [simulator.MODULE, simulator.STRUCTURE, simulator.NETARRAY, simulator.GENARRAY]:
obj = ConstantObject(new_handle, self._child_path(name), t)
_handle2obj[new_handle] = obj
sub_handle = obj
if t not in _type2cls:
raise NotImplementedError("Couldn't find a matching object for GPI type %s(%d) (path=%s)" % (new_handle.get_type_string(), t, self._child_path(name)))
obj = _type2cls[t](new_handle, self._child_path(name))
_handle2obj[new_handle] = obj
sub_handle = obj
</DeepExtract>
self._sub_handles[name] = sub_handle
return sub_handle
|
def __get_sub_handle_by_name(self, name):
try:
return self._sub_handles[name]
except KeyError:
pass
if name in self._invalid_sub_handles:
return None
new_handle = self._handle.get_handle_by_name(name)
if not new_handle:
self._invalid_sub_handles.add(name)
return None
_type2cls = {simulator.MODULE: HierarchyObject, simulator.STRUCTURE: HierarchyObject, simulator.REG: ModifiableObject, simulator.NET: ModifiableObject, simulator.NETARRAY: NonHierarchyIndexableObject, simulator.REAL: RealObject, simulator.INTEGER: IntegerObject, simulator.ENUM: EnumObject, simulator.STRING: StringObject, simulator.GENARRAY: HierarchyArrayObject}
global _handle2obj
try:
sub_handle = _handle2obj[new_handle]
except KeyError:
pass
t = new_handle.get_type()
if new_handle.get_const() and t not in [simulator.MODULE, simulator.STRUCTURE, simulator.NETARRAY, simulator.GENARRAY]:
obj = ConstantObject(new_handle, self._child_path(name), t)
_handle2obj[new_handle] = obj
sub_handle = obj
if t not in _type2cls:
raise NotImplementedError("Couldn't find a matching object for GPI type %s(%d) (path=%s)" % (new_handle.get_type_string(), t, self._child_path(name)))
obj = _type2cls[t](new_handle, self._child_path(name))
_handle2obj[new_handle] = obj
sub_handle = obj
self._sub_handles[name] = sub_handle
return sub_handle
|
cocotb
|
positive
|
@staticmethod
def _return_match(LL: List[Leaf]) -> Optional[int]:
"""
Returns:
string_idx such that @LL[string_idx] is equal to our target (i.e.
matched) string, if this line matches the return/yield statement
requirements listed in the 'Requirements' section of this classes'
docstring.
OR
None, otherwise.
"""
if parent_type(LL[0]) in [syms.return_stmt, syms.yield_expr] and LL[0].value in ['return', 'yield']:
<DeepExtract>
def is_valid_index(idx: int) -> bool:
"""
Returns:
True iff @idx is positive AND seq[@idx] does NOT raise an
IndexError.
"""
is_valid_index = 0 <= idx < len(LL)
is_valid_index = is_valid_index
</DeepExtract>
idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1
if is_valid_index(idx) and LL[idx].type == token.STRING:
return idx
return None
|
@staticmethod
def _return_match(LL: List[Leaf]) -> Optional[int]:
"""
Returns:
string_idx such that @LL[string_idx] is equal to our target (i.e.
matched) string, if this line matches the return/yield statement
requirements listed in the 'Requirements' section of this classes'
docstring.
OR
None, otherwise.
"""
if parent_type(LL[0]) in [syms.return_stmt, syms.yield_expr] and LL[0].value in ['return', 'yield']:
def is_valid_index(idx: int) -> bool:
"""
Returns:
True iff @idx is positive AND seq[@idx] does NOT raise an
IndexError.
"""
is_valid_index = 0 <= idx < len(LL)
is_valid_index = is_valid_index
idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1
if is_valid_index(idx) and LL[idx].type == token.STRING:
return idx
return None
|
black
|
positive
|
def validate_nodes(root_node, vi):
""" Check if node contains all required components or if it is custom."""
to_check = [root_node]
while len(to_check) > 0:
node = to_check.pop(0)
custom = 'custom' in node.sdef and node.sdef['custom']
is_link = node.link_info is not None
type = node.sdef['type']
if custom:
if self.options['identify_custom_nodes'] and (not is_link):
(caid, cval) = self.options['custom_node_identifier']
if not (caid in node.h5attrs and vs.values_match(cval, node.h5attrs[caid])):
if type not in ('group', 'dataset') or node.link_info:
pass
elif node.parent and 'custom' in node.parent.sdef and node.parent.sdef['custom'] and (not self.closed_group(node.parent)):
vi['custom_nodes_inside_custom_missing_flag'][type].append(node.full_path)
else:
vi['custom_nodes_missing_flag'][type].append(node.full_path)
if 'h5nsig' in node.sdef:
<DeepExtract>
msigs = node.parent.msigs
h5nsig = node.sdef['h5nsig']
matching_ids = []
for id in msigs:
idsig = msigs[id]
if idsig['name'] == h5nsig['name']:
matching_ids.append(id)
if not matching_ids:
explanation = None
parent = node.parent
mstats = parent.mstats
options = []
for id in sorted(matching_ids):
attrs = msigs[id]['attrs']
options.append(self.get_const_attributes_option(attrs))
found = ppdict(h5nsig['attrs'])
msg = 'expected one of:\n%s\nfound attributes: %s' % ('\n'.join(options), found)
explanation = msg
</DeepExtract>
if explanation:
vi['explanations'][node.full_path] = explanation
elif self.closed_group(node.parent):
msg = '%s: addition not allowed because parent group specified as closed' % node.full_path
self.error.append(msg)
else:
vi['identified_custom_nodes'][type].append(node.full_path)
elif node.sdef['ns'] != self.default_ns and self.options['identify_extension_nodes'] and (not is_link):
eaid = self.options['extension_node_identifier']
found_match = False
if eaid in node.h5attrs:
found_val = node.h5attrs[eaid]
expected_val = '%s:%s' % (node.sdef['ns'], node.sdef['id'])
if vs.values_match(found_val, expected_val) or vs.values_match(found_val, node.sdef['ns']):
found_match = True
if found_match:
vi['identified_extension_nodes'][type].append(node.full_path)
else:
vi['extension_nodes_missing_flag'][type].append(node.full_path)
<DeepExtract>
if not hasattr(node, 'attributes'):
return
ats = node.attributes
added_nodes_identifiers = []
if self.options['identify_extension_nodes']:
added_nodes_identifiers.append(self.options['extension_node_identifier'])
if self.options['identify_custom_nodes']:
cni = self.options['custom_node_identifier'][0]
if cni not in added_nodes_identifiers:
added_nodes_identifiers.append(cni)
for aid in sorted(ats):
if node.link_info:
msg = '%s: (%s) is link but has attribute (%s) - links should never have attributes.' % (node.full_path, node.sdef['type'], aid)
self.error.append(msg)
if 'autogen' in ats[aid]:
continue
qty = ats[aid]['qty']
if qty == 'custom':
msg = '%s: (%s) %s' % (node.full_path, node.sdef['type'], aid)
vi['added_attributes_not_described_by_extension'].append(msg)
continue
assert qty in ('?', '!', '^'), "attribute qty must be one of: '!', '^', '?' or 'custom'"
const = 'const' in ats[aid] and ats[aid]['const']
if const:
assert 'nv' not in ats[aid], '%s: attribute [%s] is type const, but nv specified' % (node.full_path, aid)
const_val = ats[aid]['value']
val_present = aid in node.h5attrs
if val_present:
aval = vs.make_str(node.h5attrs[aid])
last_source_ns = ats[aid]['source'][-1].split(':')[0] if 'source' in ats[aid] else None
if val_present and last_source_ns and (last_source_ns != self.default_ns):
msg = '%s: (%s) %s' % (node.full_path, node.sdef['type'], aid)
vi['added_attributes_described_by_extension'].append(msg)
if qty in ('!', '^'):
if not val_present:
if aid in added_nodes_identifiers:
continue
if const:
msg = "%s: (expected %s='%s' %s)" % (node.full_path, aid, const_val, stype(const_val))
else:
msg = '%s - %s' % (node.full_path, aid)
elist = 'missing_attributes' if qty == '!' else 'recommended_attributes_missing'
vi[elist].append(msg)
continue
if vs.values_match(node.h5attrs[aid], ''):
msg = '%s - %s' % (node.full_path, aid)
elist = 'required_attributes_empty' if qty == '!' else 'recommended_attributes_empty'
vi[elist].append(msg)
continue
if val_present and const and (not vs.values_match(aval, const_val)):
msg = "%s: %s\nexpected: '%s' %s\nfound: '%s' %s " % (node.full_path, aid, const_val, stype(const_val), aval, stype(aval))
vi['incorrect_attribute_values'].append(msg)
</DeepExtract>
if node.link_info:
link_info = node.link_info
if 'node' in link_info:
if link_info['node'] is None:
continue
target_path = node.link_info['node'].full_path
find_links.add_item(vi['links'], target_path, node.full_path)
elif 'extlink' in link_info:
target = link_info['extlink']
try:
tnode = self.file_pointer[node.full_path]
except KeyError:
msg = "%s: external link target not found: file='%s', path='%s'" % (node.full_path, target[0], target[1])
self.warning.append(msg)
find_links.add_item(vi['ext_links'], target, node.full_path)
else:
<DeepExtract>
if 'Unknown link_info type: %s' % link_info:
print('** Error: %s' % 'Unknown link_info type: %s' % link_info)
print('Stack trace follows')
print('-------------------')
traceback.print_stack()
sys.exit(1)
</DeepExtract>
elif type == 'group':
<DeepExtract>
if not hasattr(node, 'required'):
required_info = None
exclude_info = self.get_exclude_info(node)
excluded_ids = set(exclude_info['ids'].keys()) if exclude_info else set()
r_ref = set()
spec = {}
pattern = re.compile('([^\\d\\W][^/\\W]*/?)')
ops = set(['AND', 'and', 'XOR', '^', 'OR', 'or', 'NOT', 'not'])
for rid in node.required.keys():
cm = node.required[rid]
condition_string = cm[0]
error_message = cm[1]
ids_and_ops = set(re.findall(pattern, condition_string))
ids = ids_and_ops - ops
if not ids <= excluded_ids:
spec[rid] = cm
r_ref.update(ids)
if len(r_ref) == 0:
assert excluded_ids, 'did not find any ids in _required spec, and none were excluded: %s' % node.required
required_info = None
id_status = {}
for id in list(r_ref):
if id not in node.mstats:
print('%s identifier (%s) in _required specification not found in group' % (node.full_path, id))
print('valid options are:\n%s' % node.mstats.keys())
error_exit()
present = len(node.mstats[id]['created']) > 0
id_status[id] = present
required_info = {'spec': spec, 'id_status': id_status}
required_info = required_info
</DeepExtract>
required_referenced = required_info['id_status'] if required_info else {}
<DeepExtract>
if not hasattr(node, 'exclude_in'):
exclude_info = None
ex_spec = node.exclude_in
for path in ex_spec:
assert path.startswith('/'), '_exclude_in path must start with "/": %s in %s' % (path, ex_spec)
if node.full_path.startswith(path):
ids = {}
for id in ex_spec[path]:
qty = id[-1]
if qty in ('!', '^', '?'):
id = id[:-1]
else:
qty = '!'
ids[id] = qty
exclude_info = {'path': path, 'ids': ids}
exclude_info = exclude_info
exclude_info = None
</DeepExtract>
for id in sorted(node.mstats.keys()):
idinfo = node.mstats[id]
qty = idinfo['qty']
type = idinfo['type']
created = idinfo['created']
is_excluded = exclude_info and id in exclude_info['ids']
if is_excluded:
if not created:
continue
else:
ex_qty = exclude_info['ids'][id]
assert ex_qty in ('?', '!', '^')
if ex_qty == '?':
continue
verb = 'must' if ex_qty == '!' else 'should'
msg = "%s - '%s' %s not be present within '%s'" % (created[0].full_path, id, verb, exclude_info['path'])
if ex_qty == '!':
self.error.append(msg)
else:
self.warning.append(msg)
if id.rstrip('/') not in required_referenced and len(created) == 0:
<DeepExtract>
full_path = node.full_path + '/' + id if node.full_path != '' else id
full_path = re.sub('//+', '/', full_path)
self.validate_path(full_path)
id_full_path = full_path
</DeepExtract>
if qty in ('!', '+'):
vi['missing_nodes'][type].append(id_full_path)
elif qty == '^':
vi['missing_recommended'][type].append(id_full_path)
try:
to_check.extend(sorted(created))
except TypeError as e:
<DeepExtract>
if 'failed extend':
print('** Error: %s' % 'failed extend')
print('Stack trace follows')
print('-------------------')
traceback.print_stack()
sys.exit(1)
</DeepExtract>
<DeepExtract>
required_info = self.get_required_info(node)
if not required_info:
return
required_spec = required_info['spec']
id_status = required_info['id_status']
ce = self.eval_required(node, required_spec, id_status)
if ce:
[condition_string, error_message] = ce
msg = '%s: %s - %s' % (node.full_path, condition_string, error_message)
self.error.append(msg)
</DeepExtract>
elif type == 'dataset':
<DeepExtract>
if self.options['storage_method'] == 'commands':
return
if 'custom' in node.sdef and node.sdef['custom']:
return
if 'autogen' in node.dsinfo:
return
ddt = node.dsinfo['ddt']
found_dtype = str(self.file_pointer[node.full_path].dtype)
if re.match('^\\|V\\d+$', found_dtype):
found_dtype = 'binary'
types_match = valid_dtype(ddt, found_dtype)
if not types_match:
if ddt['type'] == 'text':
nid = self.file_pointer[node.full_path].id
h5type = nid.get_type()
if isinstance(h5type, h5py.h5t.TypeStringID):
return
if ddt['type'] == 'binary':
if 'int' in found_dtype:
return
msg = '%s -- data type in hdf5 file (%s) does not match specification type (%s)' % (node.full_path, found_dtype, ddt['type'])
self.error.append(msg)
return
if found_dtype == 'string_':
return
min_size = node.dsinfo['ddt']['minimum_size']
if not min_size:
return
pat = '([a-z]+)(\\d+)'
match = re.match(pat, found_dtype)
if not match:
msg = '%s: unable to split hdf5 dtype (%s) into type and size.' % (node.full_path, found_dtype)
self.warning.append(msg)
return
typ = match.group(1)
siz = int(match.group(2))
if siz < min_size:
msg = '%s: Data type (%s) has size (%i) less than minimum (%i)' % (node.full_path, found_dtype, siz, min_size)
self.error.append(msg)
</DeepExtract>
else:
<DeepExtract>
if 'unknown type in validation: %s' % type:
print('** Error: %s' % 'unknown type in validation: %s' % type)
print('Stack trace follows')
print('-------------------')
traceback.print_stack()
sys.exit(1)
</DeepExtract>
|
def validate_nodes(root_node, vi):
""" Check if node contains all required components or if it is custom."""
to_check = [root_node]
while len(to_check) > 0:
node = to_check.pop(0)
custom = 'custom' in node.sdef and node.sdef['custom']
is_link = node.link_info is not None
type = node.sdef['type']
if custom:
if self.options['identify_custom_nodes'] and (not is_link):
(caid, cval) = self.options['custom_node_identifier']
if not (caid in node.h5attrs and vs.values_match(cval, node.h5attrs[caid])):
if type not in ('group', 'dataset') or node.link_info:
pass
elif node.parent and 'custom' in node.parent.sdef and node.parent.sdef['custom'] and (not self.closed_group(node.parent)):
vi['custom_nodes_inside_custom_missing_flag'][type].append(node.full_path)
else:
vi['custom_nodes_missing_flag'][type].append(node.full_path)
if 'h5nsig' in node.sdef:
msigs = node.parent.msigs
h5nsig = node.sdef['h5nsig']
matching_ids = []
for id in msigs:
idsig = msigs[id]
if idsig['name'] == h5nsig['name']:
matching_ids.append(id)
if not matching_ids:
explanation = None
parent = node.parent
mstats = parent.mstats
options = []
for id in sorted(matching_ids):
attrs = msigs[id]['attrs']
options.append(self.get_const_attributes_option(attrs))
found = ppdict(h5nsig['attrs'])
msg = 'expected one of:\n%s\nfound attributes: %s' % ('\n'.join(options), found)
explanation = msg
if explanation:
vi['explanations'][node.full_path] = explanation
elif self.closed_group(node.parent):
msg = '%s: addition not allowed because parent group specified as closed' % node.full_path
self.error.append(msg)
else:
vi['identified_custom_nodes'][type].append(node.full_path)
elif node.sdef['ns'] != self.default_ns and self.options['identify_extension_nodes'] and (not is_link):
eaid = self.options['extension_node_identifier']
found_match = False
if eaid in node.h5attrs:
found_val = node.h5attrs[eaid]
expected_val = '%s:%s' % (node.sdef['ns'], node.sdef['id'])
if vs.values_match(found_val, expected_val) or vs.values_match(found_val, node.sdef['ns']):
found_match = True
if found_match:
vi['identified_extension_nodes'][type].append(node.full_path)
else:
vi['extension_nodes_missing_flag'][type].append(node.full_path)
if not hasattr(node, 'attributes'):
return
ats = node.attributes
added_nodes_identifiers = []
if self.options['identify_extension_nodes']:
added_nodes_identifiers.append(self.options['extension_node_identifier'])
if self.options['identify_custom_nodes']:
cni = self.options['custom_node_identifier'][0]
if cni not in added_nodes_identifiers:
added_nodes_identifiers.append(cni)
for aid in sorted(ats):
if node.link_info:
msg = '%s: (%s) is link but has attribute (%s) - links should never have attributes.' % (node.full_path, node.sdef['type'], aid)
self.error.append(msg)
if 'autogen' in ats[aid]:
continue
qty = ats[aid]['qty']
if qty == 'custom':
msg = '%s: (%s) %s' % (node.full_path, node.sdef['type'], aid)
vi['added_attributes_not_described_by_extension'].append(msg)
continue
assert qty in ('?', '!', '^'), "attribute qty must be one of: '!', '^', '?' or 'custom'"
const = 'const' in ats[aid] and ats[aid]['const']
if const:
assert 'nv' not in ats[aid], '%s: attribute [%s] is type const, but nv specified' % (node.full_path, aid)
const_val = ats[aid]['value']
val_present = aid in node.h5attrs
if val_present:
aval = vs.make_str(node.h5attrs[aid])
last_source_ns = ats[aid]['source'][-1].split(':')[0] if 'source' in ats[aid] else None
if val_present and last_source_ns and (last_source_ns != self.default_ns):
msg = '%s: (%s) %s' % (node.full_path, node.sdef['type'], aid)
vi['added_attributes_described_by_extension'].append(msg)
if qty in ('!', '^'):
if not val_present:
if aid in added_nodes_identifiers:
continue
if const:
msg = "%s: (expected %s='%s' %s)" % (node.full_path, aid, const_val, stype(const_val))
else:
msg = '%s - %s' % (node.full_path, aid)
elist = 'missing_attributes' if qty == '!' else 'recommended_attributes_missing'
vi[elist].append(msg)
continue
if vs.values_match(node.h5attrs[aid], ''):
msg = '%s - %s' % (node.full_path, aid)
elist = 'required_attributes_empty' if qty == '!' else 'recommended_attributes_empty'
vi[elist].append(msg)
continue
if val_present and const and (not vs.values_match(aval, const_val)):
msg = "%s: %s\nexpected: '%s' %s\nfound: '%s' %s " % (node.full_path, aid, const_val, stype(const_val), aval, stype(aval))
vi['incorrect_attribute_values'].append(msg)
if node.link_info:
link_info = node.link_info
if 'node' in link_info:
if link_info['node'] is None:
continue
target_path = node.link_info['node'].full_path
find_links.add_item(vi['links'], target_path, node.full_path)
elif 'extlink' in link_info:
target = link_info['extlink']
try:
tnode = self.file_pointer[node.full_path]
except KeyError:
msg = "%s: external link target not found: file='%s', path='%s'" % (node.full_path, target[0], target[1])
self.warning.append(msg)
find_links.add_item(vi['ext_links'], target, node.full_path)
else:
if 'Unknown link_info type: %s' % link_info:
print('** Error: %s' % 'Unknown link_info type: %s' % link_info)
print('Stack trace follows')
print('-------------------')
traceback.print_stack()
sys.exit(1)
elif type == 'group':
if not hasattr(node, 'required'):
required_info = None
exclude_info = self.get_exclude_info(node)
excluded_ids = set(exclude_info['ids'].keys()) if exclude_info else set()
r_ref = set()
spec = {}
pattern = re.compile('([^\\d\\W][^/\\W]*/?)')
ops = set(['AND', 'and', 'XOR', '^', 'OR', 'or', 'NOT', 'not'])
for rid in node.required.keys():
cm = node.required[rid]
condition_string = cm[0]
error_message = cm[1]
ids_and_ops = set(re.findall(pattern, condition_string))
ids = ids_and_ops - ops
if not ids <= excluded_ids:
spec[rid] = cm
r_ref.update(ids)
if len(r_ref) == 0:
assert excluded_ids, 'did not find any ids in _required spec, and none were excluded: %s' % node.required
required_info = None
id_status = {}
for id in list(r_ref):
if id not in node.mstats:
print('%s identifier (%s) in _required specification not found in group' % (node.full_path, id))
print('valid options are:\n%s' % node.mstats.keys())
error_exit()
present = len(node.mstats[id]['created']) > 0
id_status[id] = present
required_info = {'spec': spec, 'id_status': id_status}
required_info = required_info
required_referenced = required_info['id_status'] if required_info else {}
if not hasattr(node, 'exclude_in'):
exclude_info = None
ex_spec = node.exclude_in
for path in ex_spec:
assert path.startswith('/'), '_exclude_in path must start with "/": %s in %s' % (path, ex_spec)
if node.full_path.startswith(path):
ids = {}
for id in ex_spec[path]:
qty = id[-1]
if qty in ('!', '^', '?'):
id = id[:-1]
else:
qty = '!'
ids[id] = qty
exclude_info = {'path': path, 'ids': ids}
exclude_info = exclude_info
exclude_info = None
for id in sorted(node.mstats.keys()):
idinfo = node.mstats[id]
qty = idinfo['qty']
type = idinfo['type']
created = idinfo['created']
is_excluded = exclude_info and id in exclude_info['ids']
if is_excluded:
if not created:
continue
else:
ex_qty = exclude_info['ids'][id]
assert ex_qty in ('?', '!', '^')
if ex_qty == '?':
continue
verb = 'must' if ex_qty == '!' else 'should'
msg = "%s - '%s' %s not be present within '%s'" % (created[0].full_path, id, verb, exclude_info['path'])
if ex_qty == '!':
self.error.append(msg)
else:
self.warning.append(msg)
if id.rstrip('/') not in required_referenced and len(created) == 0:
full_path = node.full_path + '/' + id if node.full_path != '' else id
full_path = re.sub('//+', '/', full_path)
self.validate_path(full_path)
id_full_path = full_path
if qty in ('!', '+'):
vi['missing_nodes'][type].append(id_full_path)
elif qty == '^':
vi['missing_recommended'][type].append(id_full_path)
try:
to_check.extend(sorted(created))
except TypeError as e:
if 'failed extend':
print('** Error: %s' % 'failed extend')
print('Stack trace follows')
print('-------------------')
traceback.print_stack()
sys.exit(1)
required_info = self.get_required_info(node)
if not required_info:
return
required_spec = required_info['spec']
id_status = required_info['id_status']
ce = self.eval_required(node, required_spec, id_status)
if ce:
[condition_string, error_message] = ce
msg = '%s: %s - %s' % (node.full_path, condition_string, error_message)
self.error.append(msg)
elif type == 'dataset':
if self.options['storage_method'] == 'commands':
return
if 'custom' in node.sdef and node.sdef['custom']:
return
if 'autogen' in node.dsinfo:
return
ddt = node.dsinfo['ddt']
found_dtype = str(self.file_pointer[node.full_path].dtype)
if re.match('^\\|V\\d+$', found_dtype):
found_dtype = 'binary'
types_match = valid_dtype(ddt, found_dtype)
if not types_match:
if ddt['type'] == 'text':
nid = self.file_pointer[node.full_path].id
h5type = nid.get_type()
if isinstance(h5type, h5py.h5t.TypeStringID):
return
if ddt['type'] == 'binary':
if 'int' in found_dtype:
return
msg = '%s -- data type in hdf5 file (%s) does not match specification type (%s)' % (node.full_path, found_dtype, ddt['type'])
self.error.append(msg)
return
if found_dtype == 'string_':
return
min_size = node.dsinfo['ddt']['minimum_size']
if not min_size:
return
pat = '([a-z]+)(\\d+)'
match = re.match(pat, found_dtype)
if not match:
msg = '%s: unable to split hdf5 dtype (%s) into type and size.' % (node.full_path, found_dtype)
self.warning.append(msg)
return
typ = match.group(1)
siz = int(match.group(2))
if siz < min_size:
msg = '%s: Data type (%s) has size (%i) less than minimum (%i)' % (node.full_path, found_dtype, siz, min_size)
self.error.append(msg)
else:
if 'unknown type in validation: %s' % type:
print('** Error: %s' % 'unknown type in validation: %s' % type)
print('Stack trace follows')
print('-------------------')
traceback.print_stack()
sys.exit(1)
</DeepExtract>
|
api-python
|
positive
|
def main():
parser = argparse.ArgumentParser(description='Downloads ScanNet public data release.')
parser.add_argument('-o', '--out_dir', required=True, help='directory in which to download')
parser.add_argument('--task_data', action='store_true', help='download task data (v1)')
parser.add_argument('--label_map', action='store_true', help='download label map file')
parser.add_argument('--v1', action='store_true', help='download ScanNet v1 instead of v2')
parser.add_argument('--id', help='specific scan id to download')
parser.add_argument('--preprocessed_frames', action='store_true', help='download preprocessed subset of ScanNet frames (' + PREPROCESSED_FRAMES_FILE[1] + ')')
parser.add_argument('--test_frames_2d', action='store_true', help='download 2D test frames (' + TEST_FRAMES_FILE[1] + '; also included with whole dataset download)')
parser.add_argument('--types', nargs='+', help='specific file type to download (.aggregation.json, .sens, .txt, _vh_clean.ply, _vh_clean_2.0.010000.segs.json, _vh_clean_2.ply, _vh_clean.segs.json, _vh_clean.aggregation.json, _vh_clean_2.labels.ply, _2d-instance.zip, _2d-instance-filt.zip, _2d-label.zip, _2d-label-filt.zip)')
args = parser.parse_args()
print('By pressing any key to continue you confirm that you have agreed to the ScanNet terms of use as described at:')
print(TOS_URL)
print('***')
print('Press any key to continue, or CTRL-C to exit.')
key = input('')
if args.v1:
global RELEASE
global RELEASE_TASKS
global RELEASE_NAME
global LABEL_MAP_FILE
RELEASE = RELEASES[V1_IDX]
RELEASE_TASKS = RELEASES_TASKS[V1_IDX]
RELEASE_NAME = RELEASES_NAMES[V1_IDX]
LABEL_MAP_FILE = LABEL_MAP_FILES[V1_IDX]
release_file = BASE_URL + RELEASE + '.txt'
<DeepExtract>
scan_lines = urlopen(release_file)
scans = []
for scan_line in scan_lines:
scan_id = scan_line.decode('utf8').rstrip('\n')
scans.append(scan_id)
release_scans = scans
</DeepExtract>
file_types = FILETYPES
release_test_file = BASE_URL + RELEASE + '_test.txt'
<DeepExtract>
scan_lines = urlopen(release_test_file)
scans = []
for scan_line in scan_lines:
scan_id = scan_line.decode('utf8').rstrip('\n')
scans.append(scan_id)
release_test_scans = scans
</DeepExtract>
file_types_test = FILETYPES_TEST
out_dir_scans = os.path.join(args.out_dir, 'scans')
out_dir_test_scans = os.path.join(args.out_dir, 'scans_test')
out_dir_tasks = os.path.join(args.out_dir, 'tasks')
if args.types:
file_types = args.types
for file_type in file_types:
if file_type not in FILETYPES:
print('ERROR: Invalid file type: ' + file_type)
return
file_types_test = []
for file_type in file_types:
if file_type not in FILETYPES_TEST:
file_types_test.append(file_type)
if args.task_data:
<DeepExtract>
print('Downloading ScanNet v1 task data...')
files = [LABEL_MAP_FILES[V1_IDX], 'obj_classification/data.zip', 'obj_classification/trained_models.zip', 'voxel_labeling/data.zip', 'voxel_labeling/trained_models.zip']
for file in files:
url = BASE_URL + RELEASES_TASKS[V1_IDX] + '/' + file
localpath = os.path.join(out_dir_tasks, file)
localdir = os.path.dirname(localpath)
if not os.path.isdir(localdir):
os.makedirs(localdir)
download_file(url, localpath)
print('Downloaded task data.')
</DeepExtract>
elif args.label_map:
<DeepExtract>
print('Downloading ScanNet ' + RELEASE_NAME + ' label mapping file...')
files = [LABEL_MAP_FILE]
for file in files:
url = BASE_URL + RELEASE_TASKS + '/' + file
localpath = os.path.join(args.out_dir, file)
localdir = os.path.dirname(localpath)
if not os.path.isdir(localdir):
os.makedirs(localdir)
download_file(url, localpath)
print('Downloaded ScanNet ' + RELEASE_NAME + ' label mapping file.')
</DeepExtract>
elif args.preprocessed_frames:
if args.v1:
print('ERROR: Preprocessed frames only available for ScanNet v2')
print('You are downloading the preprocessed subset of frames ' + PREPROCESSED_FRAMES_FILE[0] + ' which requires ' + PREPROCESSED_FRAMES_FILE[1] + ' of space.')
<DeepExtract>
out_dir = os.path.dirname(os.path.join(out_dir_tasks, PREPROCESSED_FRAMES_FILE[0]))
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
if not os.path.isfile(os.path.join(out_dir_tasks, PREPROCESSED_FRAMES_FILE[0])):
print('\t' + os.path.join(BASE_URL, RELEASE_TASKS, PREPROCESSED_FRAMES_FILE[0]) + ' > ' + os.path.join(out_dir_tasks, PREPROCESSED_FRAMES_FILE[0]))
(fh, out_file_tmp) = tempfile.mkstemp(dir=out_dir)
f = os.fdopen(fh, 'w')
f.close()
urllib.request.urlretrieve(os.path.join(BASE_URL, RELEASE_TASKS, PREPROCESSED_FRAMES_FILE[0]), out_file_tmp)
os.rename(out_file_tmp, os.path.join(out_dir_tasks, PREPROCESSED_FRAMES_FILE[0]))
else:
print('WARNING: skipping download of existing file ' + os.path.join(out_dir_tasks, PREPROCESSED_FRAMES_FILE[0]))
</DeepExtract>
elif args.test_frames_2d:
if args.v1:
print('ERROR: 2D test frames only available for ScanNet v2')
print('You are downloading the 2D test set ' + TEST_FRAMES_FILE[0] + ' which requires ' + TEST_FRAMES_FILE[1] + ' of space.')
<DeepExtract>
out_dir = os.path.dirname(os.path.join(out_dir_tasks, TEST_FRAMES_FILE[0]))
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
if not os.path.isfile(os.path.join(out_dir_tasks, TEST_FRAMES_FILE[0])):
print('\t' + os.path.join(BASE_URL, RELEASE_TASKS, TEST_FRAMES_FILE[0]) + ' > ' + os.path.join(out_dir_tasks, TEST_FRAMES_FILE[0]))
(fh, out_file_tmp) = tempfile.mkstemp(dir=out_dir)
f = os.fdopen(fh, 'w')
f.close()
urllib.request.urlretrieve(os.path.join(BASE_URL, RELEASE_TASKS, TEST_FRAMES_FILE[0]), out_file_tmp)
os.rename(out_file_tmp, os.path.join(out_dir_tasks, TEST_FRAMES_FILE[0]))
else:
print('WARNING: skipping download of existing file ' + os.path.join(out_dir_tasks, TEST_FRAMES_FILE[0]))
</DeepExtract>
elif args.id:
scan_id = args.id
is_test_scan = scan_id in release_test_scans
if scan_id not in release_scans and (not is_test_scan or args.v1):
print('ERROR: Invalid scan id: ' + scan_id)
else:
out_dir = os.path.join(out_dir_scans, scan_id) if not is_test_scan else os.path.join(out_dir_test_scans, scan_id)
scan_file_types = file_types if not is_test_scan else file_types_test
use_v1_sens = not is_test_scan
if not is_test_scan and (not args.v1) and ('.sens' in scan_file_types):
print("Note: ScanNet v2 uses the same .sens files as ScanNet v1: Press 'n' to exclude downloading .sens files for each scan")
key = input('')
if key.strip().lower() == 'n':
scan_file_types.remove('.sens')
<DeepExtract>
try:
print('Downloading ScanNet ' + RELEASE_NAME + ' scan ' + scan_id + ' ...')
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
for ft in scan_file_types:
v1_sens = use_v1_sens and ft == '.sens'
url = BASE_URL + RELEASE + '/' + scan_id + '/' + scan_id + ft if not v1_sens else BASE_URL + RELEASES[V1_IDX] + '/' + scan_id + '/' + scan_id + ft
out_file = out_dir + '/' + scan_id + ft
download_file(url, out_file)
print('Downloaded scan ' + scan_id)
except:
FAILED_DOWNLOAD.append(scan_id)
</DeepExtract>
else:
if len(file_types) == len(FILETYPES):
print('WARNING: You are downloading the entire ScanNet ' + RELEASE_NAME + ' release which requires ' + RELEASE_SIZE + ' of space.')
else:
print('WARNING: You are downloading all ScanNet ' + RELEASE_NAME + ' scans of type ' + file_types[0])
print('Note that existing scan directories will be skipped. Delete partially downloaded directories to re-download.')
print('***')
print('Press any key to continue, or CTRL-C to exit.')
key = input('')
if not args.v1 and '.sens' in file_types:
print("Note: ScanNet v2 uses the same .sens files as ScanNet v1: Press 'n' to exclude downloading .sens files for each scan")
key = input('')
if key.strip().lower() == 'n':
file_types.remove('.sens')
<DeepExtract>
if len(release_scans) == 0:
return
print('Downloading ScanNet ' + RELEASE_NAME + ' release to ' + out_dir_scans + '...')
for scan_id in release_scans:
scan_out_dir = os.path.join(out_dir_scans, scan_id)
download_scan(scan_id, scan_out_dir, file_types, True)
print('Downloaded ScanNet ' + RELEASE_NAME + ' release.')
</DeepExtract>
if not args.v1:
<DeepExtract>
print('Downloading ScanNet ' + RELEASE_NAME + ' label mapping file...')
files = [LABEL_MAP_FILE]
for file in files:
url = BASE_URL + RELEASE_TASKS + '/' + file
localpath = os.path.join(args.out_dir, file)
localdir = os.path.dirname(localpath)
if not os.path.isdir(localdir):
os.makedirs(localdir)
download_file(url, localpath)
print('Downloaded ScanNet ' + RELEASE_NAME + ' label mapping file.')
</DeepExtract>
<DeepExtract>
if len(release_test_scans) == 0:
return
print('Downloading ScanNet ' + RELEASE_NAME + ' release to ' + out_dir_test_scans + '...')
for scan_id in release_test_scans:
scan_out_dir = os.path.join(out_dir_test_scans, scan_id)
download_scan(scan_id, scan_out_dir, file_types_test, False)
print('Downloaded ScanNet ' + RELEASE_NAME + ' release.')
</DeepExtract>
<DeepExtract>
out_dir = os.path.dirname(os.path.join(out_dir_tasks, TEST_FRAMES_FILE[0]))
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
if not os.path.isfile(os.path.join(out_dir_tasks, TEST_FRAMES_FILE[0])):
print('\t' + os.path.join(BASE_URL, RELEASE_TASKS, TEST_FRAMES_FILE[0]) + ' > ' + os.path.join(out_dir_tasks, TEST_FRAMES_FILE[0]))
(fh, out_file_tmp) = tempfile.mkstemp(dir=out_dir)
f = os.fdopen(fh, 'w')
f.close()
urllib.request.urlretrieve(os.path.join(BASE_URL, RELEASE_TASKS, TEST_FRAMES_FILE[0]), out_file_tmp)
os.rename(out_file_tmp, os.path.join(out_dir_tasks, TEST_FRAMES_FILE[0]))
else:
print('WARNING: skipping download of existing file ' + os.path.join(out_dir_tasks, TEST_FRAMES_FILE[0]))
</DeepExtract>
print('FAILED DOWNLOADING')
print(FAILED_DOWNLOAD)
|
def main():
parser = argparse.ArgumentParser(description='Downloads ScanNet public data release.')
parser.add_argument('-o', '--out_dir', required=True, help='directory in which to download')
parser.add_argument('--task_data', action='store_true', help='download task data (v1)')
parser.add_argument('--label_map', action='store_true', help='download label map file')
parser.add_argument('--v1', action='store_true', help='download ScanNet v1 instead of v2')
parser.add_argument('--id', help='specific scan id to download')
parser.add_argument('--preprocessed_frames', action='store_true', help='download preprocessed subset of ScanNet frames (' + PREPROCESSED_FRAMES_FILE[1] + ')')
parser.add_argument('--test_frames_2d', action='store_true', help='download 2D test frames (' + TEST_FRAMES_FILE[1] + '; also included with whole dataset download)')
parser.add_argument('--types', nargs='+', help='specific file type to download (.aggregation.json, .sens, .txt, _vh_clean.ply, _vh_clean_2.0.010000.segs.json, _vh_clean_2.ply, _vh_clean.segs.json, _vh_clean.aggregation.json, _vh_clean_2.labels.ply, _2d-instance.zip, _2d-instance-filt.zip, _2d-label.zip, _2d-label-filt.zip)')
args = parser.parse_args()
print('By pressing any key to continue you confirm that you have agreed to the ScanNet terms of use as described at:')
print(TOS_URL)
print('***')
print('Press any key to continue, or CTRL-C to exit.')
key = input('')
if args.v1:
global RELEASE
global RELEASE_TASKS
global RELEASE_NAME
global LABEL_MAP_FILE
RELEASE = RELEASES[V1_IDX]
RELEASE_TASKS = RELEASES_TASKS[V1_IDX]
RELEASE_NAME = RELEASES_NAMES[V1_IDX]
LABEL_MAP_FILE = LABEL_MAP_FILES[V1_IDX]
release_file = BASE_URL + RELEASE + '.txt'
scan_lines = urlopen(release_file)
scans = []
for scan_line in scan_lines:
scan_id = scan_line.decode('utf8').rstrip('\n')
scans.append(scan_id)
release_scans = scans
file_types = FILETYPES
release_test_file = BASE_URL + RELEASE + '_test.txt'
scan_lines = urlopen(release_test_file)
scans = []
for scan_line in scan_lines:
scan_id = scan_line.decode('utf8').rstrip('\n')
scans.append(scan_id)
release_test_scans = scans
file_types_test = FILETYPES_TEST
out_dir_scans = os.path.join(args.out_dir, 'scans')
out_dir_test_scans = os.path.join(args.out_dir, 'scans_test')
out_dir_tasks = os.path.join(args.out_dir, 'tasks')
if args.types:
file_types = args.types
for file_type in file_types:
if file_type not in FILETYPES:
print('ERROR: Invalid file type: ' + file_type)
return
file_types_test = []
for file_type in file_types:
if file_type not in FILETYPES_TEST:
file_types_test.append(file_type)
if args.task_data:
print('Downloading ScanNet v1 task data...')
files = [LABEL_MAP_FILES[V1_IDX], 'obj_classification/data.zip', 'obj_classification/trained_models.zip', 'voxel_labeling/data.zip', 'voxel_labeling/trained_models.zip']
for file in files:
url = BASE_URL + RELEASES_TASKS[V1_IDX] + '/' + file
localpath = os.path.join(out_dir_tasks, file)
localdir = os.path.dirname(localpath)
if not os.path.isdir(localdir):
os.makedirs(localdir)
download_file(url, localpath)
print('Downloaded task data.')
elif args.label_map:
print('Downloading ScanNet ' + RELEASE_NAME + ' label mapping file...')
files = [LABEL_MAP_FILE]
for file in files:
url = BASE_URL + RELEASE_TASKS + '/' + file
localpath = os.path.join(args.out_dir, file)
localdir = os.path.dirname(localpath)
if not os.path.isdir(localdir):
os.makedirs(localdir)
download_file(url, localpath)
print('Downloaded ScanNet ' + RELEASE_NAME + ' label mapping file.')
elif args.preprocessed_frames:
if args.v1:
print('ERROR: Preprocessed frames only available for ScanNet v2')
print('You are downloading the preprocessed subset of frames ' + PREPROCESSED_FRAMES_FILE[0] + ' which requires ' + PREPROCESSED_FRAMES_FILE[1] + ' of space.')
out_dir = os.path.dirname(os.path.join(out_dir_tasks, PREPROCESSED_FRAMES_FILE[0]))
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
if not os.path.isfile(os.path.join(out_dir_tasks, PREPROCESSED_FRAMES_FILE[0])):
print('\t' + os.path.join(BASE_URL, RELEASE_TASKS, PREPROCESSED_FRAMES_FILE[0]) + ' > ' + os.path.join(out_dir_tasks, PREPROCESSED_FRAMES_FILE[0]))
(fh, out_file_tmp) = tempfile.mkstemp(dir=out_dir)
f = os.fdopen(fh, 'w')
f.close()
urllib.request.urlretrieve(os.path.join(BASE_URL, RELEASE_TASKS, PREPROCESSED_FRAMES_FILE[0]), out_file_tmp)
os.rename(out_file_tmp, os.path.join(out_dir_tasks, PREPROCESSED_FRAMES_FILE[0]))
else:
print('WARNING: skipping download of existing file ' + os.path.join(out_dir_tasks, PREPROCESSED_FRAMES_FILE[0]))
elif args.test_frames_2d:
if args.v1:
print('ERROR: 2D test frames only available for ScanNet v2')
print('You are downloading the 2D test set ' + TEST_FRAMES_FILE[0] + ' which requires ' + TEST_FRAMES_FILE[1] + ' of space.')
out_dir = os.path.dirname(os.path.join(out_dir_tasks, TEST_FRAMES_FILE[0]))
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
if not os.path.isfile(os.path.join(out_dir_tasks, TEST_FRAMES_FILE[0])):
print('\t' + os.path.join(BASE_URL, RELEASE_TASKS, TEST_FRAMES_FILE[0]) + ' > ' + os.path.join(out_dir_tasks, TEST_FRAMES_FILE[0]))
(fh, out_file_tmp) = tempfile.mkstemp(dir=out_dir)
f = os.fdopen(fh, 'w')
f.close()
urllib.request.urlretrieve(os.path.join(BASE_URL, RELEASE_TASKS, TEST_FRAMES_FILE[0]), out_file_tmp)
os.rename(out_file_tmp, os.path.join(out_dir_tasks, TEST_FRAMES_FILE[0]))
else:
print('WARNING: skipping download of existing file ' + os.path.join(out_dir_tasks, TEST_FRAMES_FILE[0]))
elif args.id:
scan_id = args.id
is_test_scan = scan_id in release_test_scans
if scan_id not in release_scans and (not is_test_scan or args.v1):
print('ERROR: Invalid scan id: ' + scan_id)
else:
out_dir = os.path.join(out_dir_scans, scan_id) if not is_test_scan else os.path.join(out_dir_test_scans, scan_id)
scan_file_types = file_types if not is_test_scan else file_types_test
use_v1_sens = not is_test_scan
if not is_test_scan and (not args.v1) and ('.sens' in scan_file_types):
print("Note: ScanNet v2 uses the same .sens files as ScanNet v1: Press 'n' to exclude downloading .sens files for each scan")
key = input('')
if key.strip().lower() == 'n':
scan_file_types.remove('.sens')
try:
print('Downloading ScanNet ' + RELEASE_NAME + ' scan ' + scan_id + ' ...')
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
for ft in scan_file_types:
v1_sens = use_v1_sens and ft == '.sens'
url = BASE_URL + RELEASE + '/' + scan_id + '/' + scan_id + ft if not v1_sens else BASE_URL + RELEASES[V1_IDX] + '/' + scan_id + '/' + scan_id + ft
out_file = out_dir + '/' + scan_id + ft
download_file(url, out_file)
print('Downloaded scan ' + scan_id)
except:
FAILED_DOWNLOAD.append(scan_id)
else:
if len(file_types) == len(FILETYPES):
print('WARNING: You are downloading the entire ScanNet ' + RELEASE_NAME + ' release which requires ' + RELEASE_SIZE + ' of space.')
else:
print('WARNING: You are downloading all ScanNet ' + RELEASE_NAME + ' scans of type ' + file_types[0])
print('Note that existing scan directories will be skipped. Delete partially downloaded directories to re-download.')
print('***')
print('Press any key to continue, or CTRL-C to exit.')
key = input('')
if not args.v1 and '.sens' in file_types:
print("Note: ScanNet v2 uses the same .sens files as ScanNet v1: Press 'n' to exclude downloading .sens files for each scan")
key = input('')
if key.strip().lower() == 'n':
file_types.remove('.sens')
if len(release_scans) == 0:
return
print('Downloading ScanNet ' + RELEASE_NAME + ' release to ' + out_dir_scans + '...')
for scan_id in release_scans:
scan_out_dir = os.path.join(out_dir_scans, scan_id)
download_scan(scan_id, scan_out_dir, file_types, True)
print('Downloaded ScanNet ' + RELEASE_NAME + ' release.')
if not args.v1:
print('Downloading ScanNet ' + RELEASE_NAME + ' label mapping file...')
files = [LABEL_MAP_FILE]
for file in files:
url = BASE_URL + RELEASE_TASKS + '/' + file
localpath = os.path.join(args.out_dir, file)
localdir = os.path.dirname(localpath)
if not os.path.isdir(localdir):
os.makedirs(localdir)
download_file(url, localpath)
print('Downloaded ScanNet ' + RELEASE_NAME + ' label mapping file.')
if len(release_test_scans) == 0:
return
print('Downloading ScanNet ' + RELEASE_NAME + ' release to ' + out_dir_test_scans + '...')
for scan_id in release_test_scans:
scan_out_dir = os.path.join(out_dir_test_scans, scan_id)
download_scan(scan_id, scan_out_dir, file_types_test, False)
print('Downloaded ScanNet ' + RELEASE_NAME + ' release.')
out_dir = os.path.dirname(os.path.join(out_dir_tasks, TEST_FRAMES_FILE[0]))
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
if not os.path.isfile(os.path.join(out_dir_tasks, TEST_FRAMES_FILE[0])):
print('\t' + os.path.join(BASE_URL, RELEASE_TASKS, TEST_FRAMES_FILE[0]) + ' > ' + os.path.join(out_dir_tasks, TEST_FRAMES_FILE[0]))
(fh, out_file_tmp) = tempfile.mkstemp(dir=out_dir)
f = os.fdopen(fh, 'w')
f.close()
urllib.request.urlretrieve(os.path.join(BASE_URL, RELEASE_TASKS, TEST_FRAMES_FILE[0]), out_file_tmp)
os.rename(out_file_tmp, os.path.join(out_dir_tasks, TEST_FRAMES_FILE[0]))
else:
print('WARNING: skipping download of existing file ' + os.path.join(out_dir_tasks, TEST_FRAMES_FILE[0]))
print('FAILED DOWNLOADING')
print(FAILED_DOWNLOAD)
|
3DIoUMatch
|
positive
|
def build_substatement(self, auth_info, stmt_data):
con_act_data = stmt_data.pop('context_contextActivities', {})
del stmt_data['objectType']
sub = SubStatement.objects.create(**stmt_data)
if con_act_data:
<DeepExtract>
for con_act_group in list(con_act_data.items()):
if isinstance(con_act_group[1], list):
for con_act in con_act_group[1]:
act = ActivityManager(con_act, auth=auth_info['agent'], define=auth_info['define']).activity
if con_act_group[0] == 'parent':
sub.context_ca_parent.add(act)
elif con_act_group[0] == 'grouping':
sub.context_ca_grouping.add(act)
elif con_act_group[0] == 'category':
sub.context_ca_category.add(act)
else:
sub.context_ca_other.add(act)
else:
act = ActivityManager(con_act_group[1], auth=auth_info['agent'], define=auth_info['define']).activity
if con_act_group[0] == 'parent':
sub.context_ca_parent.add(act)
elif con_act_group[0] == 'grouping':
sub.context_ca_grouping.add(act)
elif con_act_group[0] == 'category':
sub.context_ca_category.add(act)
else:
sub.context_ca_other.add(act)
sub.save()
</DeepExtract>
return sub
|
def build_substatement(self, auth_info, stmt_data):
con_act_data = stmt_data.pop('context_contextActivities', {})
del stmt_data['objectType']
sub = SubStatement.objects.create(**stmt_data)
if con_act_data:
for con_act_group in list(con_act_data.items()):
if isinstance(con_act_group[1], list):
for con_act in con_act_group[1]:
act = ActivityManager(con_act, auth=auth_info['agent'], define=auth_info['define']).activity
if con_act_group[0] == 'parent':
sub.context_ca_parent.add(act)
elif con_act_group[0] == 'grouping':
sub.context_ca_grouping.add(act)
elif con_act_group[0] == 'category':
sub.context_ca_category.add(act)
else:
sub.context_ca_other.add(act)
else:
act = ActivityManager(con_act_group[1], auth=auth_info['agent'], define=auth_info['define']).activity
if con_act_group[0] == 'parent':
sub.context_ca_parent.add(act)
elif con_act_group[0] == 'grouping':
sub.context_ca_grouping.add(act)
elif con_act_group[0] == 'category':
sub.context_ca_category.add(act)
else:
sub.context_ca_other.add(act)
sub.save()
return sub
|
ADL_LRS
|
positive
|
def add_fpn_retinanet_outputs(model, blobs_in, dim_in, spatial_scales):
"""RetinaNet head. For classification and box regression, we can chose to
have the same conv tower or a separate tower. "bl_feat_list" stores the list
of feature blobs for bbox prediction. These blobs can be shared cls feature
blobs if we share the tower or else are independent blobs.
"""
dim_out = dim_in
k_max = cfg.FPN.RPN_MAX_LEVEL
k_min = cfg.FPN.RPN_MIN_LEVEL
A = len(cfg.RETINANET.ASPECT_RATIOS) * cfg.RETINANET.SCALES_PER_OCTAVE
<DeepExtract>
prior_prob = cfg.RETINANET.PRIOR_PROB
scales_per_octave = cfg.RETINANET.SCALES_PER_OCTAVE
aspect_ratios = len(cfg.RETINANET.ASPECT_RATIOS)
if cfg.RETINANET.SOFTMAX:
bias = np.zeros((model.num_classes, 1), dtype=np.float32)
bias[0] = np.log((model.num_classes - 1) * (1 - prior_prob) / prior_prob)
bias = np.vstack([bias for _ in range(scales_per_octave * aspect_ratios)])
bias_init = ('GivenTensorFill', {'values': bias.astype(dtype=np.float32)})
else:
bias_init = ('ConstantFill', {'value': -np.log((1 - prior_prob) / prior_prob)})
bias_init = bias_init
</DeepExtract>
assert len(blobs_in) == k_max - k_min + 1
bbox_feat_list = []
cls_pred_dim = model.num_classes if cfg.RETINANET.SOFTMAX else model.num_classes - 1
bbox_regr_dim = 4 * (model.num_classes - 1) if cfg.RETINANET.CLASS_SPECIFIC_BBOX else 4
for lvl in range(k_min, k_max + 1):
bl_in = blobs_in[k_max - lvl]
for nconv in range(cfg.RETINANET.NUM_CONVS):
suffix = 'n{}_fpn{}'.format(nconv, lvl)
(dim_in, dim_out) = (dim_in, dim_in)
if lvl == k_min:
bl_out = model.Conv(bl_in, 'retnet_cls_conv_' + suffix, dim_in, dim_out, 3, stride=1, pad=1, weight_init=('GaussianFill', {'std': 0.01}), bias_init=('ConstantFill', {'value': 0.0}))
else:
bl_out = model.ConvShared(bl_in, 'retnet_cls_conv_' + suffix, dim_in, dim_out, 3, stride=1, pad=1, weight='retnet_cls_conv_n{}_fpn{}_w'.format(nconv, k_min), bias='retnet_cls_conv_n{}_fpn{}_b'.format(nconv, k_min))
bl_in = model.Relu(bl_out, bl_out)
bl_feat = bl_in
if lvl == k_min:
retnet_cls_pred = model.Conv(bl_feat, 'retnet_cls_pred_fpn{}'.format(lvl), dim_in, cls_pred_dim * A, 3, pad=1, stride=1, weight_init=('GaussianFill', {'std': 0.01}), bias_init=bias_init)
else:
retnet_cls_pred = model.ConvShared(bl_feat, 'retnet_cls_pred_fpn{}'.format(lvl), dim_in, cls_pred_dim * A, 3, pad=1, stride=1, weight='retnet_cls_pred_fpn{}_w'.format(k_min), bias='retnet_cls_pred_fpn{}_b'.format(k_min))
if not model.train:
if cfg.RETINANET.SOFTMAX:
model.net.GroupSpatialSoftmax(retnet_cls_pred, 'retnet_cls_prob_fpn{}'.format(lvl), num_classes=cls_pred_dim)
else:
model.net.Sigmoid(retnet_cls_pred, 'retnet_cls_prob_fpn{}'.format(lvl))
if cfg.RETINANET.SHARE_CLS_BBOX_TOWER:
bbox_feat_list.append(bl_feat)
if not cfg.RETINANET.SHARE_CLS_BBOX_TOWER:
for lvl in range(k_min, k_max + 1):
bl_in = blobs_in[k_max - lvl]
for nconv in range(cfg.RETINANET.NUM_CONVS):
suffix = 'n{}_fpn{}'.format(nconv, lvl)
(dim_in, dim_out) = (dim_in, dim_in)
if lvl == k_min:
bl_out = model.Conv(bl_in, 'retnet_bbox_conv_' + suffix, dim_in, dim_out, 3, stride=1, pad=1, weight_init=('GaussianFill', {'std': 0.01}), bias_init=('ConstantFill', {'value': 0.0}))
else:
bl_out = model.ConvShared(bl_in, 'retnet_bbox_conv_' + suffix, dim_in, dim_out, 3, stride=1, pad=1, weight='retnet_bbox_conv_n{}_fpn{}_w'.format(nconv, k_min), bias='retnet_bbox_conv_n{}_fpn{}_b'.format(nconv, k_min))
bl_in = model.Relu(bl_out, bl_out)
bl_feat = bl_in
bbox_feat_list.append(bl_feat)
for (i, lvl) in enumerate(range(k_min, k_max + 1)):
bbox_pred = 'retnet_bbox_pred_fpn{}'.format(lvl)
bl_feat = bbox_feat_list[i]
if lvl == k_min:
model.Conv(bl_feat, bbox_pred, dim_in, bbox_regr_dim * A, 3, pad=1, stride=1, weight_init=('GaussianFill', {'std': 0.01}), bias_init=('ConstantFill', {'value': 0.0}))
else:
model.ConvShared(bl_feat, bbox_pred, dim_in, bbox_regr_dim * A, 3, pad=1, stride=1, weight='retnet_bbox_pred_fpn{}_w'.format(k_min), bias='retnet_bbox_pred_fpn{}_b'.format(k_min))
|
def add_fpn_retinanet_outputs(model, blobs_in, dim_in, spatial_scales):
"""RetinaNet head. For classification and box regression, we can chose to
have the same conv tower or a separate tower. "bl_feat_list" stores the list
of feature blobs for bbox prediction. These blobs can be shared cls feature
blobs if we share the tower or else are independent blobs.
"""
dim_out = dim_in
k_max = cfg.FPN.RPN_MAX_LEVEL
k_min = cfg.FPN.RPN_MIN_LEVEL
A = len(cfg.RETINANET.ASPECT_RATIOS) * cfg.RETINANET.SCALES_PER_OCTAVE
prior_prob = cfg.RETINANET.PRIOR_PROB
scales_per_octave = cfg.RETINANET.SCALES_PER_OCTAVE
aspect_ratios = len(cfg.RETINANET.ASPECT_RATIOS)
if cfg.RETINANET.SOFTMAX:
bias = np.zeros((model.num_classes, 1), dtype=np.float32)
bias[0] = np.log((model.num_classes - 1) * (1 - prior_prob) / prior_prob)
bias = np.vstack([bias for _ in range(scales_per_octave * aspect_ratios)])
bias_init = ('GivenTensorFill', {'values': bias.astype(dtype=np.float32)})
else:
bias_init = ('ConstantFill', {'value': -np.log((1 - prior_prob) / prior_prob)})
bias_init = bias_init
assert len(blobs_in) == k_max - k_min + 1
bbox_feat_list = []
cls_pred_dim = model.num_classes if cfg.RETINANET.SOFTMAX else model.num_classes - 1
bbox_regr_dim = 4 * (model.num_classes - 1) if cfg.RETINANET.CLASS_SPECIFIC_BBOX else 4
for lvl in range(k_min, k_max + 1):
bl_in = blobs_in[k_max - lvl]
for nconv in range(cfg.RETINANET.NUM_CONVS):
suffix = 'n{}_fpn{}'.format(nconv, lvl)
(dim_in, dim_out) = (dim_in, dim_in)
if lvl == k_min:
bl_out = model.Conv(bl_in, 'retnet_cls_conv_' + suffix, dim_in, dim_out, 3, stride=1, pad=1, weight_init=('GaussianFill', {'std': 0.01}), bias_init=('ConstantFill', {'value': 0.0}))
else:
bl_out = model.ConvShared(bl_in, 'retnet_cls_conv_' + suffix, dim_in, dim_out, 3, stride=1, pad=1, weight='retnet_cls_conv_n{}_fpn{}_w'.format(nconv, k_min), bias='retnet_cls_conv_n{}_fpn{}_b'.format(nconv, k_min))
bl_in = model.Relu(bl_out, bl_out)
bl_feat = bl_in
if lvl == k_min:
retnet_cls_pred = model.Conv(bl_feat, 'retnet_cls_pred_fpn{}'.format(lvl), dim_in, cls_pred_dim * A, 3, pad=1, stride=1, weight_init=('GaussianFill', {'std': 0.01}), bias_init=bias_init)
else:
retnet_cls_pred = model.ConvShared(bl_feat, 'retnet_cls_pred_fpn{}'.format(lvl), dim_in, cls_pred_dim * A, 3, pad=1, stride=1, weight='retnet_cls_pred_fpn{}_w'.format(k_min), bias='retnet_cls_pred_fpn{}_b'.format(k_min))
if not model.train:
if cfg.RETINANET.SOFTMAX:
model.net.GroupSpatialSoftmax(retnet_cls_pred, 'retnet_cls_prob_fpn{}'.format(lvl), num_classes=cls_pred_dim)
else:
model.net.Sigmoid(retnet_cls_pred, 'retnet_cls_prob_fpn{}'.format(lvl))
if cfg.RETINANET.SHARE_CLS_BBOX_TOWER:
bbox_feat_list.append(bl_feat)
if not cfg.RETINANET.SHARE_CLS_BBOX_TOWER:
for lvl in range(k_min, k_max + 1):
bl_in = blobs_in[k_max - lvl]
for nconv in range(cfg.RETINANET.NUM_CONVS):
suffix = 'n{}_fpn{}'.format(nconv, lvl)
(dim_in, dim_out) = (dim_in, dim_in)
if lvl == k_min:
bl_out = model.Conv(bl_in, 'retnet_bbox_conv_' + suffix, dim_in, dim_out, 3, stride=1, pad=1, weight_init=('GaussianFill', {'std': 0.01}), bias_init=('ConstantFill', {'value': 0.0}))
else:
bl_out = model.ConvShared(bl_in, 'retnet_bbox_conv_' + suffix, dim_in, dim_out, 3, stride=1, pad=1, weight='retnet_bbox_conv_n{}_fpn{}_w'.format(nconv, k_min), bias='retnet_bbox_conv_n{}_fpn{}_b'.format(nconv, k_min))
bl_in = model.Relu(bl_out, bl_out)
bl_feat = bl_in
bbox_feat_list.append(bl_feat)
for (i, lvl) in enumerate(range(k_min, k_max + 1)):
bbox_pred = 'retnet_bbox_pred_fpn{}'.format(lvl)
bl_feat = bbox_feat_list[i]
if lvl == k_min:
model.Conv(bl_feat, bbox_pred, dim_in, bbox_regr_dim * A, 3, pad=1, stride=1, weight_init=('GaussianFill', {'std': 0.01}), bias_init=('ConstantFill', {'value': 0.0}))
else:
model.ConvShared(bl_feat, bbox_pred, dim_in, bbox_regr_dim * A, 3, pad=1, stride=1, weight='retnet_bbox_pred_fpn{}_w'.format(k_min), bias='retnet_bbox_pred_fpn{}_b'.format(k_min))
|
Detectron
|
positive
|
@check_ssh
def ssh(self, cmd, allow_fail=False):
"""Run the given command on the remote host, and block until the command has finished running.
:param cmd: The remote ssh command
:param allow_fail: If True, ignore nonzero exit status of the remote command,
else raise an ``RemoteCommandError``
:return: The exit status of the command.
:raise RemoteCommandError: If allow_fail is False and the command returns a non-zero exit status
"""
<DeepExtract>
'Running ssh command: %s' % cmd = '%s: %s' % (str(self), 'Running ssh command: %s' % cmd)
self.logger.log(logging.DEBUG, 'Running ssh command: %s' % cmd, *args, **kwargs)
</DeepExtract>
client = self.ssh_client
(stdin, stdout, stderr) = client.exec_command(cmd)
stdout.read()
exit_status = stdout.channel.recv_exit_status()
try:
if exit_status != 0:
if not allow_fail:
raise RemoteCommandError(self, cmd, exit_status, stderr.read())
else:
<DeepExtract>
"Running ssh command '%s' exited with status %d and message: %s" % (cmd, exit_status, stderr.read()) = '%s: %s' % (str(self), "Running ssh command '%s' exited with status %d and message: %s" % (cmd, exit_status, stderr.read()))
self.logger.log(logging.DEBUG, "Running ssh command '%s' exited with status %d and message: %s" % (cmd, exit_status, stderr.read()), *args, **kwargs)
</DeepExtract>
finally:
stdin.close()
stdout.close()
stderr.close()
return exit_status
|
@check_ssh
def ssh(self, cmd, allow_fail=False):
"""Run the given command on the remote host, and block until the command has finished running.
:param cmd: The remote ssh command
:param allow_fail: If True, ignore nonzero exit status of the remote command,
else raise an ``RemoteCommandError``
:return: The exit status of the command.
:raise RemoteCommandError: If allow_fail is False and the command returns a non-zero exit status
"""
'Running ssh command: %s' % cmd = '%s: %s' % (str(self), 'Running ssh command: %s' % cmd)
self.logger.log(logging.DEBUG, 'Running ssh command: %s' % cmd, *args, **kwargs)
client = self.ssh_client
(stdin, stdout, stderr) = client.exec_command(cmd)
stdout.read()
exit_status = stdout.channel.recv_exit_status()
try:
if exit_status != 0:
if not allow_fail:
raise RemoteCommandError(self, cmd, exit_status, stderr.read())
else:
"Running ssh command '%s' exited with status %d and message: %s" % (cmd, exit_status, stderr.read()) = '%s: %s' % (str(self), "Running ssh command '%s' exited with status %d and message: %s" % (cmd, exit_status, stderr.read()))
self.logger.log(logging.DEBUG, "Running ssh command '%s' exited with status %d and message: %s" % (cmd, exit_status, stderr.read()), *args, **kwargs)
finally:
stdin.close()
stdout.close()
stderr.close()
return exit_status
|
ducktape
|
positive
|
def double_comprehension(varname):
<DeepExtract>
ls = find_lines(varname)
line = ls[0] if len(ls) else None
</DeepExtract>
return ast.dump(ast.parse(line)).count('comprehension') == 2
|
def double_comprehension(varname):
ls = find_lines(varname)
line = ls[0] if len(ls) else None
return ast.dump(ast.parse(line)).count('comprehension') == 2
|
coding-the-matrix
|
positive
|
def process_single_json(json_dir, config, norm_data_path=pjoin(BASEPATH, 'data/treadmill_norm/test2d.npz'), scale=0.07, smooth=True, to_batch=False):
def to_tensor(x):
return torch.tensor(x).float().to(config.device)
anim2d = AnimationData2D.from_openpose_json(json_dir, scale=scale, smooth=smooth)
<DeepExtract>
style2d = torch.tensor(anim2d.get_style2d()).float().to(config.device)
</DeepExtract>
data = {'meta': {'style': 'test', 'content': json_dir.split('/')[-1]}, 'style2draw': style2d}
norm = np.load(norm_data_path, allow_pickle=True)
<DeepExtract>
data['style2d'] = (style2d - to_tensor(norm['mean']).unsqueeze(-1)) / to_tensor(norm['std']).unsqueeze(-1)
</DeepExtract>
if to_batch:
<DeepExtract>
for (key, value) in data.items():
if key == 'meta':
data[key] = {sub_key: [sub_value] for (sub_key, sub_value) in value.items()}
else:
data[key] = value.unsqueeze(0)
data = data
</DeepExtract>
return data
|
def process_single_json(json_dir, config, norm_data_path=pjoin(BASEPATH, 'data/treadmill_norm/test2d.npz'), scale=0.07, smooth=True, to_batch=False):
def to_tensor(x):
return torch.tensor(x).float().to(config.device)
anim2d = AnimationData2D.from_openpose_json(json_dir, scale=scale, smooth=smooth)
style2d = torch.tensor(anim2d.get_style2d()).float().to(config.device)
data = {'meta': {'style': 'test', 'content': json_dir.split('/')[-1]}, 'style2draw': style2d}
norm = np.load(norm_data_path, allow_pickle=True)
data['style2d'] = (style2d - to_tensor(norm['mean']).unsqueeze(-1)) / to_tensor(norm['std']).unsqueeze(-1)
if to_batch:
for (key, value) in data.items():
if key == 'meta':
data[key] = {sub_key: [sub_value] for (sub_key, sub_value) in value.items()}
else:
data[key] = value.unsqueeze(0)
data = data
return data
|
deep-motion-editing
|
positive
|
def accept(self):
if self.selectionWidget.has_equal_references_shape_types():
self.obj.BoundarySettings = self.boundaryWidget.boundarySettings()
self.obj.FoamBoundarySettings = self.foamWidget.boundarySettings()
self.obj.References = self.selectionWidget.references
<DeepExtract>
doc = FreeCADGui.getDocument(self.obj.Document)
doc.Document.recompute()
self.selectionWidget.setback_listobj_visibility()
if self.selectionWidget.sel_server:
FreeCADGui.Selection.removeObserver(self.selectionWidget.sel_server)
doc.resetEdit()
</DeepExtract>
return True
|
def accept(self):
if self.selectionWidget.has_equal_references_shape_types():
self.obj.BoundarySettings = self.boundaryWidget.boundarySettings()
self.obj.FoamBoundarySettings = self.foamWidget.boundarySettings()
self.obj.References = self.selectionWidget.references
doc = FreeCADGui.getDocument(self.obj.Document)
doc.Document.recompute()
self.selectionWidget.setback_listobj_visibility()
if self.selectionWidget.sel_server:
FreeCADGui.Selection.removeObserver(self.selectionWidget.sel_server)
doc.resetEdit()
return True
|
Cfd
|
positive
|
def quantize(obj_path: str, tiff_files: List[str], property_names: List[str], texture_index: int, features_range: List[Tuple[float, float]], features_percentile_range: List[Tuple[float, float]], generate_json: bool) -> Tuple[List[str], str]:
"""
Quantizes a float array in each tiff file to one component in a RGBA png file and
generates a json describing property textures encoded if generate_json is true.
The json file is described by
https://github.com/CesiumGS/3d-tiles/tree/main/specification/Metadata
and
https://github.com/CesiumGS/glTF/tree/3d-tiles-next/extensions/2.0/Vendor/EXT_structural_metadata
"""
tiffs_per_png = 3
png_components = 4
dir = os.path.dirname(obj_path)
file_no_ext = os.path.splitext(os.path.basename(obj_path))[0]
png_count = math.ceil(len(tiff_files) / tiffs_per_png)
quantized_files = []
pt = json.loads(property_texture_template)
schema_properties = pt['EXT_structural_metadata']['schema']['classes']['mesh']['properties']
propertyTextures_properties = pt['EXT_structural_metadata']['propertyTextures'][0]['properties']
for i in range(png_count):
png_data = vtkImageData()
png_array = vtkUnsignedCharArray()
png_array.SetNumberOfComponents(png_components)
pa = numpy_support.vtk_to_numpy(png_array)
for j in range(png_components):
tiff_index = i * tiffs_per_png + j
if j < tiffs_per_png and tiff_index < len(tiff_files):
print('reading tiff: {}'.format(dir + '/' + tiff_files[tiff_index]))
<DeepExtract>
tiff_reader = vtkGDALRasterReader()
tiff_reader.SetFileName(dir + '/' + tiff_files[tiff_index])
tiff_reader.Update()
tiff_data = tiff_reader.GetOutput()
tiff_array = tiff_data.GetPointData().GetArray(0)
dims = tiff_data.GetDimensions()
if tiff_array is None:
tiff_array = tiff_data.GetCellData().GetArray(0)
dims = (dims[0] - 1, dims[1] - 1, dims[2])
(tiff_array, dims) = (tiff_array, dims)
</DeepExtract>
ta = numpy_support.vtk_to_numpy(tiff_array)
if features_percentile_range:
ta = np.nan_to_num(ta, copy=True, nan=features_percentile_range[tiff_index][0])
ta[ta > features_percentile_range[tiff_index][1]] = features_percentile_range[tiff_index][1]
ta[ta < features_percentile_range[tiff_index][0]] = features_percentile_range[tiff_index][0]
fr = (np.nanmin(ta), np.nanmax(ta))
features_range[tiff_index] = (min(features_range[tiff_index][0], fr[0]), max(features_range[tiff_index][1], fr[1]))
fr = features_range[tiff_index]
if generate_json:
property_name = 'c' + property_names[tiff_index]
schema_properties[property_name] = {}
schema_properties[property_name]['name'] = 'Covariance ' + property_name
schema_properties[property_name]['type'] = 'SCALAR'
schema_properties[property_name]['componentType'] = 'UINT8'
schema_properties[property_name]['normalized'] = True
schema_properties[property_name]['offset'] = float(fr[0])
schema_properties[property_name]['scale'] = float(fr[1] - fr[0])
propertyTextures_properties[property_name] = {}
propertyTextures_properties[property_name]['index'] = texture_index + i
propertyTextures_properties[property_name]['texCoord'] = 0
propertyTextures_properties[property_name]['channels'] = [j]
if j == 0:
png_data.SetDimensions(dims)
png_array.SetNumberOfTuples(dims[0] * dims[1])
pa = numpy_support.vtk_to_numpy(png_array)
else:
prev_dims = png_data.GetDimensions()
if not prev_dims == dims:
logging.error('TIFF files with different dimensions. 0: {} {}: {}'.format(prev_dims, j, dims))
pa[:, j] = (ta - fr[0]) / (fr[1] - fr[0]) * 255
else:
pa[:, j] = 255
png_data.GetPointData().SetScalars(png_array)
flip_y_filter = None
png_writer = vtkPNGWriter()
flip_y_filter = vtkImageFlip()
flip_y_filter.SetFilteredAxis(1)
flip_y_filter.SetInputDataObject(png_data)
png_writer.SetInputConnection(flip_y_filter.GetOutputPort())
quantized_file_basename = file_no_ext + '_' + str(texture_index + i)
quantized_files.append(quantized_file_basename + '.png')
print('writing png: {}'.format(dir + '/' + quantized_file_basename + '.png'))
png_writer.SetFileName(dir + '/' + quantized_file_basename + '.png')
png_writer.Write()
property_texture_file = ''
if generate_json and quantized_files:
property_texture_file = dir + '/property_texture.json'
with open(property_texture_file, 'w') as outfile:
json.dump(pt, outfile, indent=4)
return (quantized_files, property_texture_file)
|
def quantize(obj_path: str, tiff_files: List[str], property_names: List[str], texture_index: int, features_range: List[Tuple[float, float]], features_percentile_range: List[Tuple[float, float]], generate_json: bool) -> Tuple[List[str], str]:
"""
Quantizes a float array in each tiff file to one component in a RGBA png file and
generates a json describing property textures encoded if generate_json is true.
The json file is described by
https://github.com/CesiumGS/3d-tiles/tree/main/specification/Metadata
and
https://github.com/CesiumGS/glTF/tree/3d-tiles-next/extensions/2.0/Vendor/EXT_structural_metadata
"""
tiffs_per_png = 3
png_components = 4
dir = os.path.dirname(obj_path)
file_no_ext = os.path.splitext(os.path.basename(obj_path))[0]
png_count = math.ceil(len(tiff_files) / tiffs_per_png)
quantized_files = []
pt = json.loads(property_texture_template)
schema_properties = pt['EXT_structural_metadata']['schema']['classes']['mesh']['properties']
propertyTextures_properties = pt['EXT_structural_metadata']['propertyTextures'][0]['properties']
for i in range(png_count):
png_data = vtkImageData()
png_array = vtkUnsignedCharArray()
png_array.SetNumberOfComponents(png_components)
pa = numpy_support.vtk_to_numpy(png_array)
for j in range(png_components):
tiff_index = i * tiffs_per_png + j
if j < tiffs_per_png and tiff_index < len(tiff_files):
print('reading tiff: {}'.format(dir + '/' + tiff_files[tiff_index]))
tiff_reader = vtkGDALRasterReader()
tiff_reader.SetFileName(dir + '/' + tiff_files[tiff_index])
tiff_reader.Update()
tiff_data = tiff_reader.GetOutput()
tiff_array = tiff_data.GetPointData().GetArray(0)
dims = tiff_data.GetDimensions()
if tiff_array is None:
tiff_array = tiff_data.GetCellData().GetArray(0)
dims = (dims[0] - 1, dims[1] - 1, dims[2])
(tiff_array, dims) = (tiff_array, dims)
ta = numpy_support.vtk_to_numpy(tiff_array)
if features_percentile_range:
ta = np.nan_to_num(ta, copy=True, nan=features_percentile_range[tiff_index][0])
ta[ta > features_percentile_range[tiff_index][1]] = features_percentile_range[tiff_index][1]
ta[ta < features_percentile_range[tiff_index][0]] = features_percentile_range[tiff_index][0]
fr = (np.nanmin(ta), np.nanmax(ta))
features_range[tiff_index] = (min(features_range[tiff_index][0], fr[0]), max(features_range[tiff_index][1], fr[1]))
fr = features_range[tiff_index]
if generate_json:
property_name = 'c' + property_names[tiff_index]
schema_properties[property_name] = {}
schema_properties[property_name]['name'] = 'Covariance ' + property_name
schema_properties[property_name]['type'] = 'SCALAR'
schema_properties[property_name]['componentType'] = 'UINT8'
schema_properties[property_name]['normalized'] = True
schema_properties[property_name]['offset'] = float(fr[0])
schema_properties[property_name]['scale'] = float(fr[1] - fr[0])
propertyTextures_properties[property_name] = {}
propertyTextures_properties[property_name]['index'] = texture_index + i
propertyTextures_properties[property_name]['texCoord'] = 0
propertyTextures_properties[property_name]['channels'] = [j]
if j == 0:
png_data.SetDimensions(dims)
png_array.SetNumberOfTuples(dims[0] * dims[1])
pa = numpy_support.vtk_to_numpy(png_array)
else:
prev_dims = png_data.GetDimensions()
if not prev_dims == dims:
logging.error('TIFF files with different dimensions. 0: {} {}: {}'.format(prev_dims, j, dims))
pa[:, j] = (ta - fr[0]) / (fr[1] - fr[0]) * 255
else:
pa[:, j] = 255
png_data.GetPointData().SetScalars(png_array)
flip_y_filter = None
png_writer = vtkPNGWriter()
flip_y_filter = vtkImageFlip()
flip_y_filter.SetFilteredAxis(1)
flip_y_filter.SetInputDataObject(png_data)
png_writer.SetInputConnection(flip_y_filter.GetOutputPort())
quantized_file_basename = file_no_ext + '_' + str(texture_index + i)
quantized_files.append(quantized_file_basename + '.png')
print('writing png: {}'.format(dir + '/' + quantized_file_basename + '.png'))
png_writer.SetFileName(dir + '/' + quantized_file_basename + '.png')
png_writer.Write()
property_texture_file = ''
if generate_json and quantized_files:
property_texture_file = dir + '/property_texture.json'
with open(property_texture_file, 'w') as outfile:
json.dump(pt, outfile, indent=4)
return (quantized_files, property_texture_file)
|
Danesfield
|
positive
|
def test_create_reference_constant(self):
tree1 = Node.add_root()
constant1 = ReferenceConstant.objects.create()
test_model1 = Model.objects.create()
node = tree1.add_child(content_object=constant1)
node.add_child(content_object=test_model1)
tree1 = Node.objects.get(id=tree1.id)
<DeepExtract>
xml_str = self.build_xml(tree1)
dict1 = BlocklyXmlParser().parse(xml_str)[0]
</DeepExtract>
tree2 = NodeTreeCreator().create(dict1)
self.assertIsInstance(tree2, Node)
self.assertIsNot(tree1, tree2)
self.assertFalse(self.tree_diff(tree1, tree2))
|
def test_create_reference_constant(self):
tree1 = Node.add_root()
constant1 = ReferenceConstant.objects.create()
test_model1 = Model.objects.create()
node = tree1.add_child(content_object=constant1)
node.add_child(content_object=test_model1)
tree1 = Node.objects.get(id=tree1.id)
xml_str = self.build_xml(tree1)
dict1 = BlocklyXmlParser().parse(xml_str)[0]
tree2 = NodeTreeCreator().create(dict1)
self.assertIsInstance(tree2, Node)
self.assertIsNot(tree1, tree2)
self.assertFalse(self.tree_diff(tree1, tree2))
|
django-business-logic
|
positive
|
def run(self):
<DeepExtract>
train_itr = self._fm.get_train_itr()
if train_itr > 0:
self._policy.restore(self._fm.train_policy_fname(train_itr - 1), train=True)
save_itr = train_itr
start_step = save_itr * self._save_every_n_steps
timeit.reset()
timeit.start('total')
(start_step, save_itr) = (start_step, save_itr)
</DeepExtract>
step = start_step
while step < self._total_steps:
step += 1
if step % self._eval_every_n_steps == 0:
self._run_env_eval(step, do_sampler_step=False, calculate_holdout=True)
if step >= self._learn_after_n_steps:
self._run_train_step(step)
if step % self._log_every_n_steps == 0:
self._run_log(step)
if step % self._save_every_n_steps == 0:
logger.info('Saving files for itr {0}'.format(save_itr))
self._save_train(save_itr)
save_itr += 1
if step >= self._total_steps:
logger.info('Saving files for itr {0}'.format(save_itr))
self._save_train(save_itr)
|
def run(self):
train_itr = self._fm.get_train_itr()
if train_itr > 0:
self._policy.restore(self._fm.train_policy_fname(train_itr - 1), train=True)
save_itr = train_itr
start_step = save_itr * self._save_every_n_steps
timeit.reset()
timeit.start('total')
(start_step, save_itr) = (start_step, save_itr)
step = start_step
while step < self._total_steps:
step += 1
if step % self._eval_every_n_steps == 0:
self._run_env_eval(step, do_sampler_step=False, calculate_holdout=True)
if step >= self._learn_after_n_steps:
self._run_train_step(step)
if step % self._log_every_n_steps == 0:
self._run_log(step)
if step % self._save_every_n_steps == 0:
logger.info('Saving files for itr {0}'.format(save_itr))
self._save_train(save_itr)
save_itr += 1
if step >= self._total_steps:
logger.info('Saving files for itr {0}'.format(save_itr))
self._save_train(save_itr)
|
CAPs
|
positive
|
@mod.route('/join/')
@mod.route('/join/csv/', defaults={'csv': True})
def api_join_view(csv=None):
<DeepExtract>
show = request.args.get('show', '')
sumlevel = request.args.get('sumlevel', '').lower()
required = request.args.get('required', '')
force = request.args.get('force', '')
where = request.args.get('where', '')
order = request.args.get('order', '')
sort = request.args.get('sort', '')
limit = request.args.get('limit', 500)
offset = request.args.get('offset', None)
exclude = request.args.get('exclude', None)
auto_crosswalk = request.args.get('auto_crosswalk', False)
display_names = request.args.get('display_names', False)
shows = show.split(',')
sumlevels = sumlevel.split(',')
if shows and (not sumlevel):
sumlevels = ['all' for show in shows]
values = required.split(',') if required else []
shows_and_levels = {val: sumlevels[idx] for (idx, val) in enumerate(shows)}
variables = manager.possible_variables
vars_and_vals = {var: request.args.get(var, None) for var in variables}
vars_and_vals = {k: v for (k, v) in vars_and_vals.items() if v}
vars_needed = vars_and_vals.keys() + shows + values
api_obj = ApiObject(vars_needed=vars_needed, vars_and_vals=vars_and_vals, shows_and_levels=shows_and_levels, values=values, where=where, force=force, order=order, sort=sort, limit=limit, exclude=exclude, auto_crosswalk=auto_crosswalk, display_names=display_names, offset=offset)
api_obj = api_obj
</DeepExtract>
if api_obj.limit and api_obj.limit > 80000:
raise DataUSAException('Limit parameter must be less than 80,000')
tables = manager.required_tables(api_obj)
data = join_api.joinable_query(tables, api_obj, manager.table_years, csv_format=csv)
return data
|
@mod.route('/join/')
@mod.route('/join/csv/', defaults={'csv': True})
def api_join_view(csv=None):
show = request.args.get('show', '')
sumlevel = request.args.get('sumlevel', '').lower()
required = request.args.get('required', '')
force = request.args.get('force', '')
where = request.args.get('where', '')
order = request.args.get('order', '')
sort = request.args.get('sort', '')
limit = request.args.get('limit', 500)
offset = request.args.get('offset', None)
exclude = request.args.get('exclude', None)
auto_crosswalk = request.args.get('auto_crosswalk', False)
display_names = request.args.get('display_names', False)
shows = show.split(',')
sumlevels = sumlevel.split(',')
if shows and (not sumlevel):
sumlevels = ['all' for show in shows]
values = required.split(',') if required else []
shows_and_levels = {val: sumlevels[idx] for (idx, val) in enumerate(shows)}
variables = manager.possible_variables
vars_and_vals = {var: request.args.get(var, None) for var in variables}
vars_and_vals = {k: v for (k, v) in vars_and_vals.items() if v}
vars_needed = vars_and_vals.keys() + shows + values
api_obj = ApiObject(vars_needed=vars_needed, vars_and_vals=vars_and_vals, shows_and_levels=shows_and_levels, values=values, where=where, force=force, order=order, sort=sort, limit=limit, exclude=exclude, auto_crosswalk=auto_crosswalk, display_names=display_names, offset=offset)
api_obj = api_obj
if api_obj.limit and api_obj.limit > 80000:
raise DataUSAException('Limit parameter must be less than 80,000')
tables = manager.required_tables(api_obj)
data = join_api.joinable_query(tables, api_obj, manager.table_years, csv_format=csv)
return data
|
datausa-api
|
positive
|
def test_print_age(capsys):
<DeepExtract>
if 2 < 0:
print('you are not of age yet')
else:
print(f'hello, your age is {2}')
</DeepExtract>
(out, err) = capsys.readouterr()
assert out == 'hello, your age is 2\n'
assert err == ''
|
def test_print_age(capsys):
if 2 < 0:
print('you are not of age yet')
else:
print(f'hello, your age is {2}')
(out, err) = capsys.readouterr()
assert out == 'hello, your age is 2\n'
assert err == ''
|
explains
|
positive
|
def ReadFileObject(self, file_object):
"""Reads a Windows AMCache file-like object.
Args:
file_object (file): file-like object.
Raises:
ParseError: if the file cannot be read.
"""
regf_file = pyregf.file()
regf_file.open_file_object(file_object)
root_key = regf_file.get_root_key()
root_key_name = root_key.get_name()
if root_key_name.lower() not in self._ROOT_KEY_NAMES:
return
root_key = regf_file.get_key_by_path('\\Root')
if root_key:
file_key = root_key.get_sub_key_by_path('File')
if file_key:
<DeepExtract>
for volume_key in file_key.sub_keys:
for file_reference_key in volume_key.sub_keys:
self._ReadFileReferenceKey(file_reference_key)
</DeepExtract>
programs_key = root_key.get_sub_key_by_path('Programs')
if programs_key:
<DeepExtract>
for program_key in programs_key.sub_keys:
self._ReadProgramKey(program_key)
</DeepExtract>
inventory_application_key = root_key.get_sub_key_by_path('InventoryApplication')
if inventory_application_key:
<DeepExtract>
for application_key in inventory_application_key.sub_keys:
self._ReadApplicationKey(application_key)
</DeepExtract>
inventory_application_file_key = root_key.get_sub_key_by_path('InventoryApplicationFile')
if inventory_application_file_key:
<DeepExtract>
for application_file_key in inventory_application_file_key.sub_keys:
self._ReadApplicationFileKey(application_file_key)
</DeepExtract>
|
def ReadFileObject(self, file_object):
"""Reads a Windows AMCache file-like object.
Args:
file_object (file): file-like object.
Raises:
ParseError: if the file cannot be read.
"""
regf_file = pyregf.file()
regf_file.open_file_object(file_object)
root_key = regf_file.get_root_key()
root_key_name = root_key.get_name()
if root_key_name.lower() not in self._ROOT_KEY_NAMES:
return
root_key = regf_file.get_key_by_path('\\Root')
if root_key:
file_key = root_key.get_sub_key_by_path('File')
if file_key:
for volume_key in file_key.sub_keys:
for file_reference_key in volume_key.sub_keys:
self._ReadFileReferenceKey(file_reference_key)
programs_key = root_key.get_sub_key_by_path('Programs')
if programs_key:
for program_key in programs_key.sub_keys:
self._ReadProgramKey(program_key)
inventory_application_key = root_key.get_sub_key_by_path('InventoryApplication')
if inventory_application_key:
for application_key in inventory_application_key.sub_keys:
self._ReadApplicationKey(application_key)
inventory_application_file_key = root_key.get_sub_key_by_path('InventoryApplicationFile')
if inventory_application_file_key:
for application_file_key in inventory_application_file_key.sub_keys:
self._ReadApplicationFileKey(application_file_key)
</DeepExtract>
|
dtformats
|
positive
|
def _save_profile(profile: Profile) -> pathlib.Path:
<DeepExtract>
if destination.exists() and (not destination.is_dir()):
raise ValueError(f'Destination {destination} is not a directory')
destination.mkdir(parents=True, exist_ok=True)
name = pathlib.Path(re.sub('[^\\w.]', '_', self.profiles_directory))
tf = tempfile.NamedTemporaryFile(prefix=f'{name.stem}_', suffix=name.suffix, dir=destination, delete=False)
tf.close()
profile_path = pathlib.Path(tf.name)
</DeepExtract>
profile_path.write_bytes(profile.profile_content)
self.printer.log_saved(profile, profile_path)
return profile_path
|
def _save_profile(profile: Profile) -> pathlib.Path:
if destination.exists() and (not destination.is_dir()):
raise ValueError(f'Destination {destination} is not a directory')
destination.mkdir(parents=True, exist_ok=True)
name = pathlib.Path(re.sub('[^\\w.]', '_', self.profiles_directory))
tf = tempfile.NamedTemporaryFile(prefix=f'{name.stem}_', suffix=name.suffix, dir=destination, delete=False)
tf.close()
profile_path = pathlib.Path(tf.name)
profile_path.write_bytes(profile.profile_content)
self.printer.log_saved(profile, profile_path)
return profile_path
|
cli-tools
|
positive
|
def get_export_data(self, file_format, queryset, *args, **kwargs):
"""
Returns file_format representation for given queryset.
"""
request = kwargs.pop('request')
<DeepExtract>
resource_class = self.get_resource_class(usage='export')
</DeepExtract>
data = resource_class(**self.get_export_resource_kwargs(request)).export(queryset, *args, **kwargs)
export_data = file_format.export_data(data)
return export_data
|
def get_export_data(self, file_format, queryset, *args, **kwargs):
"""
Returns file_format representation for given queryset.
"""
request = kwargs.pop('request')
resource_class = self.get_resource_class(usage='export')
data = resource_class(**self.get_export_resource_kwargs(request)).export(queryset, *args, **kwargs)
export_data = file_format.export_data(data)
return export_data
|
book
|
positive
|
def is_third_party(request):
"""
RFC 2965, section 3.3.6:
An unverifiable transaction is to a third-party host if its request-
host U does not domain-match the reach R of the request-host O in the
origin transaction.
"""
<DeepExtract>
req_host = request_host(request).lower()
</DeepExtract>
return not domain_match(req_host, reach(request.origin_req_host))
|
def is_third_party(request):
"""
RFC 2965, section 3.3.6:
An unverifiable transaction is to a third-party host if its request-
host U does not domain-match the reach R of the request-host O in the
origin transaction.
"""
req_host = request_host(request).lower()
return not domain_match(req_host, reach(request.origin_req_host))
|
BruteXSS
|
positive
|
def replace_with(self, replace_with):
if replace_with is self:
return
if replace_with is self.parent:
raise ValueError('Cannot replace a Tag with its parent.')
old_parent = self.parent
my_index = self.parent.index(self)
<DeepExtract>
if self.parent is not None:
del self.parent.contents[self.parent.index(self)]
last_child = self._last_descendant()
next_element = last_child.next_element
if self.previous_element is not None:
self.previous_element.next_element = next_element
if next_element is not None:
next_element.previous_element = self.previous_element
self.previous_element = None
last_child.next_element = None
self.parent = None
if self.previous_sibling is not None:
self.previous_sibling.next_sibling = self.next_sibling
if self.next_sibling is not None:
self.next_sibling.previous_sibling = self.previous_sibling
self.previous_sibling = self.next_sibling = None
return self
</DeepExtract>
old_parent.insert(my_index, replace_with)
return self
|
def replace_with(self, replace_with):
if replace_with is self:
return
if replace_with is self.parent:
raise ValueError('Cannot replace a Tag with its parent.')
old_parent = self.parent
my_index = self.parent.index(self)
if self.parent is not None:
del self.parent.contents[self.parent.index(self)]
last_child = self._last_descendant()
next_element = last_child.next_element
if self.previous_element is not None:
self.previous_element.next_element = next_element
if next_element is not None:
next_element.previous_element = self.previous_element
self.previous_element = None
last_child.next_element = None
self.parent = None
if self.previous_sibling is not None:
self.previous_sibling.next_sibling = self.next_sibling
if self.next_sibling is not None:
self.next_sibling.previous_sibling = self.previous_sibling
self.previous_sibling = self.next_sibling = None
return self
old_parent.insert(my_index, replace_with)
return self
|
BeautifulSoup4
|
positive
|
def test_datetime_attribute_encoder(self):
"""Test past and future `DatetimeAttributeEncoder`"""
attribute = 'month'
month_series = TimeSeries.from_times_and_values(times=tg.generate_index(start=pd.to_datetime('2000-01-01'), length=24, freq='MS'), values=np.arange(24))
encoder = FutureDatetimeAttributeEncoder(input_chunk_length=1, output_chunk_length=1, attribute='month')
first_halve = encoder.encode_train(target=month_series[:12], covariates=month_series[:12], merge_covariates=False)
second_halve = encoder.encode_train(target=month_series[12:], covariates=month_series[12:], merge_covariates=False)
self.assertTrue((first_halve.values() == second_halve.values()).all())
expected_components = 'darts_enc_pc_dta_month'
<DeepExtract>
encoder = PastDatetimeAttributeEncoder(input_chunk_length=self.input_chunk_length, output_chunk_length=self.output_chunk_length, attribute=attribute)
result_with_cov = [tg.datetime_attribute_timeseries(ts, attribute=attribute, cyclic=False, with_columns=expected_components) for ts in self.covariates_multi]
result_no_cov = [tg.datetime_attribute_timeseries(ts, attribute=attribute, cyclic=False, with_columns=expected_components) for ts in self.target_multi]
result_no_cov_inf_short = [tg.datetime_attribute_timeseries(ts, attribute=attribute, cyclic=False, with_columns=expected_components) for ts in self.inf_ts_short_past]
result_no_cov_inf_long = [tg.datetime_attribute_timeseries(ts, attribute=attribute, cyclic=False, with_columns=expected_components) for ts in self.inf_ts_long_past]
self.helper_test_encoder_single_train(encoder=encoder, target=self.target_multi, covariates=self.covariates_multi, result=result_with_cov, merge_covariates=False)
self.helper_test_encoder_single_train(encoder=encoder, target=self.target_multi, covariates=[None] * len(self.target_multi), result=result_no_cov, merge_covariates=False)
self.helper_test_encoder_single_inference(encoder=encoder, n=self.n_short, target=self.target_multi, covariates=self.covariates_multi, result=result_with_cov, merge_covariates=False)
self.helper_test_encoder_single_inference(encoder=encoder, n=self.n_long, target=self.target_multi, covariates=self.covariates_multi, result=result_with_cov, merge_covariates=False)
self.helper_test_encoder_single_inference(encoder=encoder, n=self.n_short, target=self.target_multi, covariates=[None] * len(self.target_multi), result=result_no_cov_inf_short, merge_covariates=False)
self.helper_test_encoder_single_inference(encoder=encoder, n=self.n_long, target=self.target_multi, covariates=[None] * len(self.target_multi), result=result_no_cov_inf_long, merge_covariates=False)
</DeepExtract>
expected_components = 'darts_enc_fc_dta_month'
<DeepExtract>
encoder = FutureDatetimeAttributeEncoder(input_chunk_length=self.input_chunk_length, output_chunk_length=self.output_chunk_length, attribute=attribute)
result_with_cov = [tg.datetime_attribute_timeseries(ts, attribute=attribute, cyclic=False, with_columns=expected_components) for ts in self.covariates_multi]
result_no_cov = [tg.datetime_attribute_timeseries(ts, attribute=attribute, cyclic=False, with_columns=expected_components) for ts in self.target_multi]
result_no_cov_inf_short = [tg.datetime_attribute_timeseries(ts, attribute=attribute, cyclic=False, with_columns=expected_components) for ts in self.inf_ts_short_future]
result_no_cov_inf_long = [tg.datetime_attribute_timeseries(ts, attribute=attribute, cyclic=False, with_columns=expected_components) for ts in self.inf_ts_long_future]
self.helper_test_encoder_single_train(encoder=encoder, target=self.target_multi, covariates=self.covariates_multi, result=result_with_cov, merge_covariates=False)
self.helper_test_encoder_single_train(encoder=encoder, target=self.target_multi, covariates=[None] * len(self.target_multi), result=result_no_cov, merge_covariates=False)
self.helper_test_encoder_single_inference(encoder=encoder, n=self.n_short, target=self.target_multi, covariates=self.covariates_multi, result=result_with_cov, merge_covariates=False)
self.helper_test_encoder_single_inference(encoder=encoder, n=self.n_long, target=self.target_multi, covariates=self.covariates_multi, result=result_with_cov, merge_covariates=False)
self.helper_test_encoder_single_inference(encoder=encoder, n=self.n_short, target=self.target_multi, covariates=[None] * len(self.target_multi), result=result_no_cov_inf_short, merge_covariates=False)
self.helper_test_encoder_single_inference(encoder=encoder, n=self.n_long, target=self.target_multi, covariates=[None] * len(self.target_multi), result=result_no_cov_inf_long, merge_covariates=False)
</DeepExtract>
|
def test_datetime_attribute_encoder(self):
"""Test past and future `DatetimeAttributeEncoder`"""
attribute = 'month'
month_series = TimeSeries.from_times_and_values(times=tg.generate_index(start=pd.to_datetime('2000-01-01'), length=24, freq='MS'), values=np.arange(24))
encoder = FutureDatetimeAttributeEncoder(input_chunk_length=1, output_chunk_length=1, attribute='month')
first_halve = encoder.encode_train(target=month_series[:12], covariates=month_series[:12], merge_covariates=False)
second_halve = encoder.encode_train(target=month_series[12:], covariates=month_series[12:], merge_covariates=False)
self.assertTrue((first_halve.values() == second_halve.values()).all())
expected_components = 'darts_enc_pc_dta_month'
encoder = PastDatetimeAttributeEncoder(input_chunk_length=self.input_chunk_length, output_chunk_length=self.output_chunk_length, attribute=attribute)
result_with_cov = [tg.datetime_attribute_timeseries(ts, attribute=attribute, cyclic=False, with_columns=expected_components) for ts in self.covariates_multi]
result_no_cov = [tg.datetime_attribute_timeseries(ts, attribute=attribute, cyclic=False, with_columns=expected_components) for ts in self.target_multi]
result_no_cov_inf_short = [tg.datetime_attribute_timeseries(ts, attribute=attribute, cyclic=False, with_columns=expected_components) for ts in self.inf_ts_short_past]
result_no_cov_inf_long = [tg.datetime_attribute_timeseries(ts, attribute=attribute, cyclic=False, with_columns=expected_components) for ts in self.inf_ts_long_past]
self.helper_test_encoder_single_train(encoder=encoder, target=self.target_multi, covariates=self.covariates_multi, result=result_with_cov, merge_covariates=False)
self.helper_test_encoder_single_train(encoder=encoder, target=self.target_multi, covariates=[None] * len(self.target_multi), result=result_no_cov, merge_covariates=False)
self.helper_test_encoder_single_inference(encoder=encoder, n=self.n_short, target=self.target_multi, covariates=self.covariates_multi, result=result_with_cov, merge_covariates=False)
self.helper_test_encoder_single_inference(encoder=encoder, n=self.n_long, target=self.target_multi, covariates=self.covariates_multi, result=result_with_cov, merge_covariates=False)
self.helper_test_encoder_single_inference(encoder=encoder, n=self.n_short, target=self.target_multi, covariates=[None] * len(self.target_multi), result=result_no_cov_inf_short, merge_covariates=False)
self.helper_test_encoder_single_inference(encoder=encoder, n=self.n_long, target=self.target_multi, covariates=[None] * len(self.target_multi), result=result_no_cov_inf_long, merge_covariates=False)
expected_components = 'darts_enc_fc_dta_month'
encoder = FutureDatetimeAttributeEncoder(input_chunk_length=self.input_chunk_length, output_chunk_length=self.output_chunk_length, attribute=attribute)
result_with_cov = [tg.datetime_attribute_timeseries(ts, attribute=attribute, cyclic=False, with_columns=expected_components) for ts in self.covariates_multi]
result_no_cov = [tg.datetime_attribute_timeseries(ts, attribute=attribute, cyclic=False, with_columns=expected_components) for ts in self.target_multi]
result_no_cov_inf_short = [tg.datetime_attribute_timeseries(ts, attribute=attribute, cyclic=False, with_columns=expected_components) for ts in self.inf_ts_short_future]
result_no_cov_inf_long = [tg.datetime_attribute_timeseries(ts, attribute=attribute, cyclic=False, with_columns=expected_components) for ts in self.inf_ts_long_future]
self.helper_test_encoder_single_train(encoder=encoder, target=self.target_multi, covariates=self.covariates_multi, result=result_with_cov, merge_covariates=False)
self.helper_test_encoder_single_train(encoder=encoder, target=self.target_multi, covariates=[None] * len(self.target_multi), result=result_no_cov, merge_covariates=False)
self.helper_test_encoder_single_inference(encoder=encoder, n=self.n_short, target=self.target_multi, covariates=self.covariates_multi, result=result_with_cov, merge_covariates=False)
self.helper_test_encoder_single_inference(encoder=encoder, n=self.n_long, target=self.target_multi, covariates=self.covariates_multi, result=result_with_cov, merge_covariates=False)
self.helper_test_encoder_single_inference(encoder=encoder, n=self.n_short, target=self.target_multi, covariates=[None] * len(self.target_multi), result=result_no_cov_inf_short, merge_covariates=False)
self.helper_test_encoder_single_inference(encoder=encoder, n=self.n_long, target=self.target_multi, covariates=[None] * len(self.target_multi), result=result_no_cov_inf_long, merge_covariates=False)
</DeepExtract>
|
darts
|
positive
|
def create_authenticator(self, connection, debug=False, **kwargs):
<DeepExtract>
raise errors.TokenExpired('Unable to refresh token - no refresh logic implemented.')
</DeepExtract>
return super(JWTTokenAuth, self).create_authenticator(connection, debug, **kwargs)
|
def create_authenticator(self, connection, debug=False, **kwargs):
raise errors.TokenExpired('Unable to refresh token - no refresh logic implemented.')
return super(JWTTokenAuth, self).create_authenticator(connection, debug, **kwargs)
|
azure-uamqp-python
|
positive
|
def get_global_context(request):
global cache_global_context
if settings.DEBUG:
cache_global_context = None
if not cache_global_context:
<DeepExtract>
tuts = list(Tutorial.objects.order_by('-read_count').values('series_id', 'title', 'slug')[0:5])
popular_tutorials = tuts
</DeepExtract>
cache_global_context = popular_tutorials
ret = {'SITE_TITLE': settings.SITE_TITLE, 'POPULAR_TUTORIALS': cache_global_context, 'knob_show_opencv_blueprints': knobs.show_opencv_blueprints, 'knob_show_vision_scrolls': knobs.show_vision_scrolls, 'knob_show_opencv_blueprints_banner_per_tutorial': knobs.show_opencv_blueprints_banner_per_tutorial, 'knob_show_name_that_dataset': knobs.show_name_that_dataset, 'knob_show_comments': knobs.show_comments, 'knob_show_contribute': knobs.show_contribute, 'knob_show_entropy': knobs.show_entropy, 'meta_title': settings.SITE_TITLE, 'meta_description': knobs.meta_description, 'knob_hide_sidebar': knobs.hide_sidebar, 'meta_thumb': '/static/img/logo-footer-left.png', 'mode_debug': settings.DEBUG, 'sha1': os.environ['sha1'] if 'sha1' in os.environ else None}
return ret
|
def get_global_context(request):
global cache_global_context
if settings.DEBUG:
cache_global_context = None
if not cache_global_context:
tuts = list(Tutorial.objects.order_by('-read_count').values('series_id', 'title', 'slug')[0:5])
popular_tutorials = tuts
cache_global_context = popular_tutorials
ret = {'SITE_TITLE': settings.SITE_TITLE, 'POPULAR_TUTORIALS': cache_global_context, 'knob_show_opencv_blueprints': knobs.show_opencv_blueprints, 'knob_show_vision_scrolls': knobs.show_vision_scrolls, 'knob_show_opencv_blueprints_banner_per_tutorial': knobs.show_opencv_blueprints_banner_per_tutorial, 'knob_show_name_that_dataset': knobs.show_name_that_dataset, 'knob_show_comments': knobs.show_comments, 'knob_show_contribute': knobs.show_contribute, 'knob_show_entropy': knobs.show_entropy, 'meta_title': settings.SITE_TITLE, 'meta_description': knobs.meta_description, 'knob_hide_sidebar': knobs.hide_sidebar, 'meta_thumb': '/static/img/logo-footer-left.png', 'mode_debug': settings.DEBUG, 'sha1': os.environ['sha1'] if 'sha1' in os.environ else None}
return ret
|
aishack
|
positive
|
def quant_round_constrain(t1, t2, trange, tzp):
qp = QParams(range=t1.new_tensor(trange), zero_point=t1.new_tensor(tzp), num_bits=4)
<DeepExtract>
if inplace:
output = t1
else:
output = t1.clone()
if qp is None:
assert qp.num_bits is not None, 'either provide qparams of num_bits to quantize'
qp = calculate_qparams(t1, num_bits=qp.num_bits, flatten_dims=flatten_dims, reduce_dim=reduce_dim)
zero_point = qp.zero_point
qp.num_bits = qp.num_bits
qmin = -2.0 ** (qp.num_bits - 1) if signed else 0.0
qmax = qmin + 2.0 ** qp.num_bits - 1.0
running_range = qp.range.clamp(min=1e-06, max=100000.0)
scale = running_range / (qmax - qmin)
running_zero_point_round = Round().apply(qmin - zero_point / scale, False)
zero_point = (qmin - running_zero_point_round.clamp(qmin, qmax)) * scale
output.add_(qmin * scale - zero_point).div_(scale)
if stochastic:
noise = output.new(output.shape).uniform_(-0.5, 0.5)
output.add_(noise)
output = Round().apply(output.clamp_(qmin, qmax), inplace)
if False:
output.mul_(scale).add_(zero_point - qmin * scale)
t1q = output
</DeepExtract>
<DeepExtract>
if inplace:
output = t2
else:
output = t2.clone()
if qp is None:
assert qp.num_bits is not None, 'either provide qparams of num_bits to quantize'
qp = calculate_qparams(t2, num_bits=qp.num_bits, flatten_dims=flatten_dims, reduce_dim=reduce_dim)
zero_point = qp.zero_point
qp.num_bits = qp.num_bits
qmin = -2.0 ** (qp.num_bits - 1) if signed else 0.0
qmax = qmin + 2.0 ** qp.num_bits - 1.0
running_range = qp.range.clamp(min=1e-06, max=100000.0)
scale = running_range / (qmax - qmin)
running_zero_point_round = Round().apply(qmin - zero_point / scale, False)
zero_point = (qmin - running_zero_point_round.clamp(qmin, qmax)) * scale
output.add_(qmin * scale - zero_point).div_(scale)
if stochastic:
noise = output.new(output.shape).uniform_(-0.5, 0.5)
output.add_(noise)
output = Round().apply(output.clamp_(qmin, qmax), inplace)
if False:
output.mul_(scale).add_(zero_point - qmin * scale)
t2q = output
</DeepExtract>
out = torch.max(torch.min(t2q, t1q + 1), t1q - 1)
return dequantize(out, num_bits=qp.num_bits, qparams=qp)
|
def quant_round_constrain(t1, t2, trange, tzp):
qp = QParams(range=t1.new_tensor(trange), zero_point=t1.new_tensor(tzp), num_bits=4)
if inplace:
output = t1
else:
output = t1.clone()
if qp is None:
assert qp.num_bits is not None, 'either provide qparams of num_bits to quantize'
qp = calculate_qparams(t1, num_bits=qp.num_bits, flatten_dims=flatten_dims, reduce_dim=reduce_dim)
zero_point = qp.zero_point
qp.num_bits = qp.num_bits
qmin = -2.0 ** (qp.num_bits - 1) if signed else 0.0
qmax = qmin + 2.0 ** qp.num_bits - 1.0
running_range = qp.range.clamp(min=1e-06, max=100000.0)
scale = running_range / (qmax - qmin)
running_zero_point_round = Round().apply(qmin - zero_point / scale, False)
zero_point = (qmin - running_zero_point_round.clamp(qmin, qmax)) * scale
output.add_(qmin * scale - zero_point).div_(scale)
if stochastic:
noise = output.new(output.shape).uniform_(-0.5, 0.5)
output.add_(noise)
output = Round().apply(output.clamp_(qmin, qmax), inplace)
if False:
output.mul_(scale).add_(zero_point - qmin * scale)
t1q = output
if inplace:
output = t2
else:
output = t2.clone()
if qp is None:
assert qp.num_bits is not None, 'either provide qparams of num_bits to quantize'
qp = calculate_qparams(t2, num_bits=qp.num_bits, flatten_dims=flatten_dims, reduce_dim=reduce_dim)
zero_point = qp.zero_point
qp.num_bits = qp.num_bits
qmin = -2.0 ** (qp.num_bits - 1) if signed else 0.0
qmax = qmin + 2.0 ** qp.num_bits - 1.0
running_range = qp.range.clamp(min=1e-06, max=100000.0)
scale = running_range / (qmax - qmin)
running_zero_point_round = Round().apply(qmin - zero_point / scale, False)
zero_point = (qmin - running_zero_point_round.clamp(qmin, qmax)) * scale
output.add_(qmin * scale - zero_point).div_(scale)
if stochastic:
noise = output.new(output.shape).uniform_(-0.5, 0.5)
output.add_(noise)
output = Round().apply(output.clamp_(qmin, qmax), inplace)
if False:
output.mul_(scale).add_(zero_point - qmin * scale)
t2q = output
out = torch.max(torch.min(t2q, t1q + 1), t1q - 1)
return dequantize(out, num_bits=qp.num_bits, qparams=qp)
|
CalibTIP
|
positive
|
@property
def indicators(self):
if self._indicators is None:
<DeepExtract>
self._indicators = []
try:
pkts = pyshark.FileCapture(self.file)
for p in pkts:
if 'IP' in p:
self._extract_ip(p)
if 'DNS' in p:
self._extract_dns(p)
if 'HTTP' in p:
self._extract_http(p)
if 'SSL' in p:
self._extract_ssl(p)
except pyshark.tshark.tshark.TSharkNotFoundException:
print('tshark is not installed, please install it')
</DeepExtract>
return self._indicators
|
@property
def indicators(self):
if self._indicators is None:
self._indicators = []
try:
pkts = pyshark.FileCapture(self.file)
for p in pkts:
if 'IP' in p:
self._extract_ip(p)
if 'DNS' in p:
self._extract_dns(p)
if 'HTTP' in p:
self._extract_http(p)
if 'SSL' in p:
self._extract_ssl(p)
except pyshark.tshark.tshark.TSharkNotFoundException:
print('tshark is not installed, please install it')
return self._indicators
|
analyst-scripts
|
positive
|
def restart(self):
"""Kills the current game and begins another game."""
self.roll_verified.set(HogGUI.KILL)
self.status_label.text = ''
<DeepExtract>
for i in range(10):
self.dice[i].pack_forget()
</DeepExtract>
<DeepExtract>
self.messages.delete(1.0, END)
</DeepExtract>
<DeepExtract>
self.turn = 1 - self.turn
self.switch(0)
self.s_labels[0].text = '0'
self.s_labels[1].text = '0'
self.status_label.text = ''
try:
commentary = hog.both(hog.announce_highest(0), hog.both(hog.announce_highest(1), hog.announce_lead_changes()))
(score, opponent_score) = hog.play(self.strategy, self.strategy, dice=self.make_dice(6), say=commentary)
except HogGUIException:
pass
else:
self.s_labels[0].text = score
self.s_labels[1].text = opponent_score
winner = 0 if score > opponent_score else 1
self.status_label.text = 'Game over! {} wins!'.format(name(winner))
</DeepExtract>
|
def restart(self):
"""Kills the current game and begins another game."""
self.roll_verified.set(HogGUI.KILL)
self.status_label.text = ''
for i in range(10):
self.dice[i].pack_forget()
self.messages.delete(1.0, END)
self.turn = 1 - self.turn
self.switch(0)
self.s_labels[0].text = '0'
self.s_labels[1].text = '0'
self.status_label.text = ''
try:
commentary = hog.both(hog.announce_highest(0), hog.both(hog.announce_highest(1), hog.announce_lead_changes()))
(score, opponent_score) = hog.play(self.strategy, self.strategy, dice=self.make_dice(6), say=commentary)
except HogGUIException:
pass
else:
self.s_labels[0].text = score
self.s_labels[1].text = opponent_score
winner = 0 if score > opponent_score else 1
self.status_label.text = 'Game over! {} wins!'.format(name(winner))
</DeepExtract>
|
cs61a-2018-spring
|
positive
|
def _traceindex(pos, node=None, addr=[], fail=True):
node = node or self.node
addr = address.Address(addr)
for i in range(len(node) + 1):
for c in node.index(i):
index = i + len(node)
<DeepExtract>
node.get(index, c) = node.get(index, c) or self.node
addr + [index, c] = address.Address(addr + [index, c])
for i in range(len(node.get(index, c)) + 1):
for c in node.get(index, c).index(i):
index = i + len(node.get(index, c))
(naddr, pos) = self._traceindex(pos, node.get(index, c).get(index, c), addr + [index, c] + [index, c], False)
if naddr != None:
(naddr, pos) = (naddr, pos)
if pos == 0:
(naddr, pos) = (addr + [index, c], i + len(node.get(index, c)))
elif i < len(node.get(index, c)) and (not node.get(index, c)._del[i]):
pos -= 1
if False:
raise IndexError('pos out of range, longer than len(node.flatten())')
else:
(naddr, pos) = (None, pos)
</DeepExtract>
if naddr != None:
return (naddr, pos)
if pos == 0:
return (addr, i + len(node))
elif i < len(node) and (not node._del[i]):
pos -= 1
if fail:
raise IndexError('pos out of range, longer than len(node.flatten())')
else:
return (None, pos)
|
def _traceindex(pos, node=None, addr=[], fail=True):
node = node or self.node
addr = address.Address(addr)
for i in range(len(node) + 1):
for c in node.index(i):
index = i + len(node)
node.get(index, c) = node.get(index, c) or self.node
addr + [index, c] = address.Address(addr + [index, c])
for i in range(len(node.get(index, c)) + 1):
for c in node.get(index, c).index(i):
index = i + len(node.get(index, c))
(naddr, pos) = self._traceindex(pos, node.get(index, c).get(index, c), addr + [index, c] + [index, c], False)
if naddr != None:
(naddr, pos) = (naddr, pos)
if pos == 0:
(naddr, pos) = (addr + [index, c], i + len(node.get(index, c)))
elif i < len(node.get(index, c)) and (not node.get(index, c)._del[i]):
pos -= 1
if False:
raise IndexError('pos out of range, longer than len(node.flatten())')
else:
(naddr, pos) = (None, pos)
if naddr != None:
return (naddr, pos)
if pos == 0:
return (addr, i + len(node))
elif i < len(node) and (not node._del[i]):
pos -= 1
if fail:
raise IndexError('pos out of range, longer than len(node.flatten())')
else:
return (None, pos)
|
ConcurrenTree
|
positive
|
def dtype(self):
<DeepExtract>
if self._handles is None:
raise IOError('Operating on an image that has been closed.')
</DeepExtract>
<DeepExtract>
self.__asert_open()
dtype = self._gdal_band(0).DataType
</DeepExtract>
if dtype in _GDAL_TO_NUMPY_TYPES:
return _GDAL_TO_NUMPY_TYPES[dtype]
raise Exception('Unrecognized gdal data type: ' + str(dtype))
|
def dtype(self):
if self._handles is None:
raise IOError('Operating on an image that has been closed.')
self.__asert_open()
dtype = self._gdal_band(0).DataType
if dtype in _GDAL_TO_NUMPY_TYPES:
return _GDAL_TO_NUMPY_TYPES[dtype]
raise Exception('Unrecognized gdal data type: ' + str(dtype))
|
delta
|
positive
|
def project_tree(path):
with PushDir(path):
<DeepExtract>
marker = self.project_marker
for i in range(0, 5):
if os.path.exists(marker) and os.path.isfile(marker):
root = os.path.dirname(os.path.realpath(marker))
marker = os.path.join('..', marker)
raise ValueError('Unable to determine project root.')
</DeepExtract>
self._configuration = parse_config(os.path.join(root, '.mrbob.ini'))
tree = dict(root=root, src=os.path.join(root, 'src'), pkg=os.path.join(root, 'src', self.name), resources=os.path.join(root, 'src', self.name, 'resources'), transforms=os.path.join(root, 'src', self.name, 'transforms'), common=os.path.join(root, 'src', self.name, 'transforms', 'common'))
return tree
|
def project_tree(path):
with PushDir(path):
marker = self.project_marker
for i in range(0, 5):
if os.path.exists(marker) and os.path.isfile(marker):
root = os.path.dirname(os.path.realpath(marker))
marker = os.path.join('..', marker)
raise ValueError('Unable to determine project root.')
self._configuration = parse_config(os.path.join(root, '.mrbob.ini'))
tree = dict(root=root, src=os.path.join(root, 'src'), pkg=os.path.join(root, 'src', self.name), resources=os.path.join(root, 'src', self.name, 'resources'), transforms=os.path.join(root, 'src', self.name, 'transforms'), common=os.path.join(root, 'src', self.name, 'transforms', 'common'))
return tree
|
canari3
|
positive
|
def filter_scanline(type, line, fo, prev=None):
"""Apply a scanline filter to a scanline. `type` specifies the
filter type (0 to 4); `line` specifies the current (unfiltered)
scanline as a sequence of bytes; `prev` specifies the previous
(unfiltered) scanline as a sequence of bytes. `fo` specifies the
filter offset; normally this is size of a pixel in bytes (the number
of bytes per sample times the number of channels), but when this is
< 1 (for bit depths < 8) then the filter offset is 1.
"""
assert 0 <= type < 5
out = array('B', [type])
def sub():
ai = -fo
for x in line:
if ai >= 0:
x = x - line[ai] & 255
out.append(x)
ai += 1
def up():
for (i, x) in enumerate(line):
x = x - prev[i] & 255
out.append(x)
def average():
ai = -fo
for (i, x) in enumerate(line):
if ai >= 0:
x = x - (line[ai] + prev[i] >> 1) & 255
else:
x = x - (prev[i] >> 1) & 255
out.append(x)
ai += 1
def paeth():
ai = -fo
for (i, x) in enumerate(line):
a = 0
b = prev[i]
c = 0
if ai >= 0:
a = line[ai]
c = prev[ai]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
Pr = a
elif pb <= pc:
Pr = b
else:
Pr = c
x = x - Pr & 255
out.append(x)
ai += 1
if not prev:
if type == 2:
type = 0
elif type == 3:
prev = [0] * len(line)
elif type == 4:
type = 1
if type == 0:
out.extend(line)
elif type == 1:
<DeepExtract>
ai = -fo
for x in line:
if ai >= 0:
x = x - line[ai] & 255
out.append(x)
ai += 1
</DeepExtract>
elif type == 2:
<DeepExtract>
for (i, x) in enumerate(line):
x = x - prev[i] & 255
out.append(x)
</DeepExtract>
elif type == 3:
<DeepExtract>
ai = -fo
for (i, x) in enumerate(line):
if ai >= 0:
x = x - (line[ai] + prev[i] >> 1) & 255
else:
x = x - (prev[i] >> 1) & 255
out.append(x)
ai += 1
</DeepExtract>
else:
<DeepExtract>
ai = -fo
for (i, x) in enumerate(line):
a = 0
b = prev[i]
c = 0
if ai >= 0:
a = line[ai]
c = prev[ai]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
Pr = a
elif pb <= pc:
Pr = b
else:
Pr = c
x = x - Pr & 255
out.append(x)
ai += 1
</DeepExtract>
return out
|
def filter_scanline(type, line, fo, prev=None):
"""Apply a scanline filter to a scanline. `type` specifies the
filter type (0 to 4); `line` specifies the current (unfiltered)
scanline as a sequence of bytes; `prev` specifies the previous
(unfiltered) scanline as a sequence of bytes. `fo` specifies the
filter offset; normally this is size of a pixel in bytes (the number
of bytes per sample times the number of channels), but when this is
< 1 (for bit depths < 8) then the filter offset is 1.
"""
assert 0 <= type < 5
out = array('B', [type])
def sub():
ai = -fo
for x in line:
if ai >= 0:
x = x - line[ai] & 255
out.append(x)
ai += 1
def up():
for (i, x) in enumerate(line):
x = x - prev[i] & 255
out.append(x)
def average():
ai = -fo
for (i, x) in enumerate(line):
if ai >= 0:
x = x - (line[ai] + prev[i] >> 1) & 255
else:
x = x - (prev[i] >> 1) & 255
out.append(x)
ai += 1
def paeth():
ai = -fo
for (i, x) in enumerate(line):
a = 0
b = prev[i]
c = 0
if ai >= 0:
a = line[ai]
c = prev[ai]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
Pr = a
elif pb <= pc:
Pr = b
else:
Pr = c
x = x - Pr & 255
out.append(x)
ai += 1
if not prev:
if type == 2:
type = 0
elif type == 3:
prev = [0] * len(line)
elif type == 4:
type = 1
if type == 0:
out.extend(line)
elif type == 1:
ai = -fo
for x in line:
if ai >= 0:
x = x - line[ai] & 255
out.append(x)
ai += 1
elif type == 2:
for (i, x) in enumerate(line):
x = x - prev[i] & 255
out.append(x)
elif type == 3:
ai = -fo
for (i, x) in enumerate(line):
if ai >= 0:
x = x - (line[ai] + prev[i] >> 1) & 255
else:
x = x - (prev[i] >> 1) & 255
out.append(x)
ai += 1
else:
ai = -fo
for (i, x) in enumerate(line):
a = 0
b = prev[i]
c = 0
if ai >= 0:
a = line[ai]
c = prev[ai]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
Pr = a
elif pb <= pc:
Pr = b
else:
Pr = c
x = x - Pr & 255
out.append(x)
ai += 1
return out
|
addon_common
|
positive
|
def del_value_in_path(element: JsonElement, path_or_name: Union[List[str], str]) -> JsonElement:
path = path_or_name if isinstance(path_or_name, list) else path_or_name.split('.')
pl = len(path) - 1
def at_idx(current: JsonElement, idx: int) -> JsonElement:
if current is None or not isinstance(current, dict) or path[idx] not in current:
return element
elif pl == idx:
current.pop(path[-1], None)
return element
else:
<DeepExtract>
if at == idx + 1:
result = current[path[idx]]
elif current[path[idx]] is None or not isinstance(current[path[idx]], dict) or path[idx + 1] not in current[path[idx]]:
result = None
else:
result = at_idx(current[path[idx]][path[idx + 1]], idx + 1 + 1)
</DeepExtract>
if not current[path[idx]]:
current[path[idx]] = None
return result
return at_idx(element, 0)
|
def del_value_in_path(element: JsonElement, path_or_name: Union[List[str], str]) -> JsonElement:
path = path_or_name if isinstance(path_or_name, list) else path_or_name.split('.')
pl = len(path) - 1
def at_idx(current: JsonElement, idx: int) -> JsonElement:
if current is None or not isinstance(current, dict) or path[idx] not in current:
return element
elif pl == idx:
current.pop(path[-1], None)
return element
else:
if at == idx + 1:
result = current[path[idx]]
elif current[path[idx]] is None or not isinstance(current[path[idx]], dict) or path[idx + 1] not in current[path[idx]]:
result = None
else:
result = at_idx(current[path[idx]][path[idx + 1]], idx + 1 + 1)
if not current[path[idx]]:
current[path[idx]] = None
return result
return at_idx(element, 0)
|
cloudkeeper
|
positive
|
def start(self):
super().start()
self.start_called = True
<DeepExtract>
</DeepExtract>
log.debug('_TestSourceElement.start')
|
def start(self):
super().start()
self.start_called = True
log.debug('_TestSourceElement.start')
|
ambianic-edge
|
positive
|
def test_list(self):
lst = [0, 1, False, True]
a = bitarray(lst)
self.assertEqual(a, bitarray('0101'))
<DeepExtract>
self.assertIsInstance(a, bitarray)
(ptr, size, endian, padbits, alloc, readonly, buf, exports) = a.buffer_info()
self.assertEqual(size, bits2bytes(len(a)))
self.assertEqual(padbits, 8 * size - len(a))
self.assertTrue(0 <= padbits < 8)
self.assertEqual(endian, a.endian())
self.assertTrue(endian in ('little', 'big'))
self.assertEqual(a.nbytes, size)
self.assertEqual(a.padbits, padbits)
self.assertEqual(a.readonly, readonly)
self.assertEqual(len(a) + a.padbits, 8 * a.nbytes)
if buf:
self.assertEqual(alloc, 0)
self.assertEqual(len(a) % 8, 0)
self.assertEqual(len(a), 8 * size)
self.assertEqual(padbits, 0)
else:
self.assertTrue(alloc >= size)
if ptr == 0:
self.assertEqual(size, 0)
self.assertEqual(alloc, 0)
if type(a).__name__ == 'frozenbitarray':
self.assertEqual(readonly, 1)
if padbits:
b = bitarray(endian=endian)
b.frombytes(a.tobytes()[-1:])
self.assertFalse(b[-padbits:].any())
elif not buf:
self.assertEqual(readonly, 0)
</DeepExtract>
if not is_py3k:
a = bitarray([long(1), long(0)])
self.assertEqual(a, bitarray('10'))
self.assertRaises(ValueError, bitarray, [0, 1, 2])
self.assertRaises(TypeError, bitarray, [0, 1, None])
for n in range(50):
lst = [bool(randint(0, 1)) for d in range(n)]
a = bitarray(lst)
self.assertEqual(a.tolist(), lst)
<DeepExtract>
self.assertIsInstance(a, bitarray)
(ptr, size, endian, padbits, alloc, readonly, buf, exports) = a.buffer_info()
self.assertEqual(size, bits2bytes(len(a)))
self.assertEqual(padbits, 8 * size - len(a))
self.assertTrue(0 <= padbits < 8)
self.assertEqual(endian, a.endian())
self.assertTrue(endian in ('little', 'big'))
self.assertEqual(a.nbytes, size)
self.assertEqual(a.padbits, padbits)
self.assertEqual(a.readonly, readonly)
self.assertEqual(len(a) + a.padbits, 8 * a.nbytes)
if buf:
self.assertEqual(alloc, 0)
self.assertEqual(len(a) % 8, 0)
self.assertEqual(len(a), 8 * size)
self.assertEqual(padbits, 0)
else:
self.assertTrue(alloc >= size)
if ptr == 0:
self.assertEqual(size, 0)
self.assertEqual(alloc, 0)
if type(a).__name__ == 'frozenbitarray':
self.assertEqual(readonly, 1)
if padbits:
b = bitarray(endian=endian)
b.frombytes(a.tobytes()[-1:])
self.assertFalse(b[-padbits:].any())
elif not buf:
self.assertEqual(readonly, 0)
</DeepExtract>
|
def test_list(self):
lst = [0, 1, False, True]
a = bitarray(lst)
self.assertEqual(a, bitarray('0101'))
self.assertIsInstance(a, bitarray)
(ptr, size, endian, padbits, alloc, readonly, buf, exports) = a.buffer_info()
self.assertEqual(size, bits2bytes(len(a)))
self.assertEqual(padbits, 8 * size - len(a))
self.assertTrue(0 <= padbits < 8)
self.assertEqual(endian, a.endian())
self.assertTrue(endian in ('little', 'big'))
self.assertEqual(a.nbytes, size)
self.assertEqual(a.padbits, padbits)
self.assertEqual(a.readonly, readonly)
self.assertEqual(len(a) + a.padbits, 8 * a.nbytes)
if buf:
self.assertEqual(alloc, 0)
self.assertEqual(len(a) % 8, 0)
self.assertEqual(len(a), 8 * size)
self.assertEqual(padbits, 0)
else:
self.assertTrue(alloc >= size)
if ptr == 0:
self.assertEqual(size, 0)
self.assertEqual(alloc, 0)
if type(a).__name__ == 'frozenbitarray':
self.assertEqual(readonly, 1)
if padbits:
b = bitarray(endian=endian)
b.frombytes(a.tobytes()[-1:])
self.assertFalse(b[-padbits:].any())
elif not buf:
self.assertEqual(readonly, 0)
if not is_py3k:
a = bitarray([long(1), long(0)])
self.assertEqual(a, bitarray('10'))
self.assertRaises(ValueError, bitarray, [0, 1, 2])
self.assertRaises(TypeError, bitarray, [0, 1, None])
for n in range(50):
lst = [bool(randint(0, 1)) for d in range(n)]
a = bitarray(lst)
self.assertEqual(a.tolist(), lst)
self.assertIsInstance(a, bitarray)
(ptr, size, endian, padbits, alloc, readonly, buf, exports) = a.buffer_info()
self.assertEqual(size, bits2bytes(len(a)))
self.assertEqual(padbits, 8 * size - len(a))
self.assertTrue(0 <= padbits < 8)
self.assertEqual(endian, a.endian())
self.assertTrue(endian in ('little', 'big'))
self.assertEqual(a.nbytes, size)
self.assertEqual(a.padbits, padbits)
self.assertEqual(a.readonly, readonly)
self.assertEqual(len(a) + a.padbits, 8 * a.nbytes)
if buf:
self.assertEqual(alloc, 0)
self.assertEqual(len(a) % 8, 0)
self.assertEqual(len(a), 8 * size)
self.assertEqual(padbits, 0)
else:
self.assertTrue(alloc >= size)
if ptr == 0:
self.assertEqual(size, 0)
self.assertEqual(alloc, 0)
if type(a).__name__ == 'frozenbitarray':
self.assertEqual(readonly, 1)
if padbits:
b = bitarray(endian=endian)
b.frombytes(a.tobytes()[-1:])
self.assertFalse(b[-padbits:].any())
elif not buf:
self.assertEqual(readonly, 0)
</DeepExtract>
|
bitarray
|
positive
|
def arrayType(self):
localctx = BraketPragmasParser.ArrayTypeContext(self, self._ctx, self.state)
<DeepExtract>
if hasattr(localctx, 'enterBraketPragma'):
localctx.enterBraketPragma(self)
</DeepExtract>
try:
self.enterOuterAlt(localctx, 1)
self.state = 871
self.match(BraketPragmasParser.ARRAY)
self.state = 872
self.match(BraketPragmasParser.LBRACKET)
self.state = 873
<DeepExtract>
localctx = BraketPragmasParser.ScalarTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 146, self.RULE_scalarType)
self._la = 0
try:
self.state = 865
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [BraketPragmasParser.BIT]:
self.enterOuterAlt(localctx, 1)
self.state = 835
self.match(BraketPragmasParser.BIT)
self.state = 837
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == BraketPragmasParser.LBRACKET:
self.state = 836
self.designator()
pass
elif token in [BraketPragmasParser.INT]:
self.enterOuterAlt(localctx, 2)
self.state = 839
self.match(BraketPragmasParser.INT)
self.state = 841
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == BraketPragmasParser.LBRACKET:
self.state = 840
self.designator()
pass
elif token in [BraketPragmasParser.UINT]:
self.enterOuterAlt(localctx, 3)
self.state = 843
self.match(BraketPragmasParser.UINT)
self.state = 845
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == BraketPragmasParser.LBRACKET:
self.state = 844
self.designator()
pass
elif token in [BraketPragmasParser.FLOAT]:
self.enterOuterAlt(localctx, 4)
self.state = 847
self.match(BraketPragmasParser.FLOAT)
self.state = 849
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == BraketPragmasParser.LBRACKET:
self.state = 848
self.designator()
pass
elif token in [BraketPragmasParser.ANGLE]:
self.enterOuterAlt(localctx, 5)
self.state = 851
self.match(BraketPragmasParser.ANGLE)
self.state = 853
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == BraketPragmasParser.LBRACKET:
self.state = 852
self.designator()
pass
elif token in [BraketPragmasParser.BOOL]:
self.enterOuterAlt(localctx, 6)
self.state = 855
self.match(BraketPragmasParser.BOOL)
pass
elif token in [BraketPragmasParser.DURATION]:
self.enterOuterAlt(localctx, 7)
self.state = 856
self.match(BraketPragmasParser.DURATION)
pass
elif token in [BraketPragmasParser.STRETCH]:
self.enterOuterAlt(localctx, 8)
self.state = 857
self.match(BraketPragmasParser.STRETCH)
pass
elif token in [BraketPragmasParser.COMPLEX]:
self.enterOuterAlt(localctx, 9)
self.state = 858
self.match(BraketPragmasParser.COMPLEX)
self.state = 863
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == BraketPragmasParser.LBRACKET:
self.state = 859
self.match(BraketPragmasParser.LBRACKET)
self.state = 860
self.scalarType()
self.state = 861
self.match(BraketPragmasParser.RBRACKET)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
</DeepExtract>
self.state = 874
self.match(BraketPragmasParser.COMMA)
self.state = 875
<DeepExtract>
localctx = BraketPragmasParser.ExpressionListContext(self, self._ctx, self.state)
self.enterRule(localctx, 166, self.RULE_expressionList)
self._la = 0
try:
self.enterOuterAlt(localctx, 1)
self.state = 936
self.expression(0)
self.state = 941
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input, 98, self._ctx)
while _alt != 2 and _alt != ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 937
self.match(BraketPragmasParser.COMMA)
self.state = 938
self.expression(0)
self.state = 943
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input, 98, self._ctx)
self.state = 945
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == BraketPragmasParser.COMMA:
self.state = 944
self.match(BraketPragmasParser.COMMA)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
</DeepExtract>
self.state = 876
self.match(BraketPragmasParser.RBRACKET)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
<DeepExtract>
if hasattr(listener, 'exitBraketPragma'):
listener.exitBraketPragma(self)
</DeepExtract>
return localctx
|
def arrayType(self):
localctx = BraketPragmasParser.ArrayTypeContext(self, self._ctx, self.state)
if hasattr(localctx, 'enterBraketPragma'):
localctx.enterBraketPragma(self)
try:
self.enterOuterAlt(localctx, 1)
self.state = 871
self.match(BraketPragmasParser.ARRAY)
self.state = 872
self.match(BraketPragmasParser.LBRACKET)
self.state = 873
localctx = BraketPragmasParser.ScalarTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 146, self.RULE_scalarType)
self._la = 0
try:
self.state = 865
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [BraketPragmasParser.BIT]:
self.enterOuterAlt(localctx, 1)
self.state = 835
self.match(BraketPragmasParser.BIT)
self.state = 837
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == BraketPragmasParser.LBRACKET:
self.state = 836
self.designator()
pass
elif token in [BraketPragmasParser.INT]:
self.enterOuterAlt(localctx, 2)
self.state = 839
self.match(BraketPragmasParser.INT)
self.state = 841
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == BraketPragmasParser.LBRACKET:
self.state = 840
self.designator()
pass
elif token in [BraketPragmasParser.UINT]:
self.enterOuterAlt(localctx, 3)
self.state = 843
self.match(BraketPragmasParser.UINT)
self.state = 845
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == BraketPragmasParser.LBRACKET:
self.state = 844
self.designator()
pass
elif token in [BraketPragmasParser.FLOAT]:
self.enterOuterAlt(localctx, 4)
self.state = 847
self.match(BraketPragmasParser.FLOAT)
self.state = 849
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == BraketPragmasParser.LBRACKET:
self.state = 848
self.designator()
pass
elif token in [BraketPragmasParser.ANGLE]:
self.enterOuterAlt(localctx, 5)
self.state = 851
self.match(BraketPragmasParser.ANGLE)
self.state = 853
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == BraketPragmasParser.LBRACKET:
self.state = 852
self.designator()
pass
elif token in [BraketPragmasParser.BOOL]:
self.enterOuterAlt(localctx, 6)
self.state = 855
self.match(BraketPragmasParser.BOOL)
pass
elif token in [BraketPragmasParser.DURATION]:
self.enterOuterAlt(localctx, 7)
self.state = 856
self.match(BraketPragmasParser.DURATION)
pass
elif token in [BraketPragmasParser.STRETCH]:
self.enterOuterAlt(localctx, 8)
self.state = 857
self.match(BraketPragmasParser.STRETCH)
pass
elif token in [BraketPragmasParser.COMPLEX]:
self.enterOuterAlt(localctx, 9)
self.state = 858
self.match(BraketPragmasParser.COMPLEX)
self.state = 863
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == BraketPragmasParser.LBRACKET:
self.state = 859
self.match(BraketPragmasParser.LBRACKET)
self.state = 860
self.scalarType()
self.state = 861
self.match(BraketPragmasParser.RBRACKET)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
self.state = 874
self.match(BraketPragmasParser.COMMA)
self.state = 875
localctx = BraketPragmasParser.ExpressionListContext(self, self._ctx, self.state)
self.enterRule(localctx, 166, self.RULE_expressionList)
self._la = 0
try:
self.enterOuterAlt(localctx, 1)
self.state = 936
self.expression(0)
self.state = 941
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input, 98, self._ctx)
while _alt != 2 and _alt != ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 937
self.match(BraketPragmasParser.COMMA)
self.state = 938
self.expression(0)
self.state = 943
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input, 98, self._ctx)
self.state = 945
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == BraketPragmasParser.COMMA:
self.state = 944
self.match(BraketPragmasParser.COMMA)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
self.state = 876
self.match(BraketPragmasParser.RBRACKET)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
if hasattr(listener, 'exitBraketPragma'):
listener.exitBraketPragma(self)
return localctx
|
amazon-braket-default-simulator-python
|
positive
|
def main(argv=sys.argv):
from fixtures.data import trees, geo
from fixtures.styles_and_cultures import styles_and_cultures
from fixtures.materials import materials
from fixtures.eventtypes import eventtypes
from fixtures.heritagetypes import heritagetypes
from fixtures.periods import periods
from fixtures.species import species
from fixtures.bluebirds import bluebirds
if len(argv) < 2:
<DeepExtract>
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> [var=value]\n(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
</DeepExtract>
config_uri = argv[1]
options = parse_vars(argv[2:])
setup_logging(config_uri)
settings = get_appsettings(config_uri, options=options)
engine = engine_from_config(settings, 'sqlalchemy.')
db_session = sessionmaker(bind=engine)()
import_provider(trees, ConceptScheme(id=1, uri='urn:x-skosprovider:trees', labels=[Label('Verschillende soorten bomen', 'prefLabel', 'nl'), Label('Different types of trees', 'prefLabel', 'en')]), db_session)
import_provider(geo, ConceptScheme(id=2, uri='urn:x-skosprovider:geo', labels=[Label('Geografie', 'prefLabel', 'nl'), Label('Geography', 'prefLabel', 'en')]), db_session)
import_provider(styles_and_cultures, ConceptScheme(id=3, uri='https://id.erfgoed.net/thesauri/stijlen_en_culturen', labels=[Label('Stijlen en Culturen', 'prefLabel', 'nl'), Label('Styles and Cultures', 'prefLabel', 'en')]), db_session)
import_provider(materials, ConceptScheme(id=4, uri='https://id.erfgoed.net/thesauri/materialen', labels=[Label('Materialen', 'prefLabel', 'nl'), Label('Materials', 'prefLabel', 'en')]), db_session)
import_provider(eventtypes, ConceptScheme(id=5, uri='https://id.erfgoed.net/thesauri/gebeurtenistypes', labels=[Label('Gebeurtenistypes', 'prefLabel', 'nl'), Label('Event types', 'prefLabel', 'en')]), db_session)
import_provider(heritagetypes, ConceptScheme(id=6, uri='https://id.erfgoed.net/thesauri/erfgoedtypes', labels=[Label('Erfgoedtypes', 'prefLabel', 'nl'), Label('Heritage types', 'prefLabel', 'en')]), db_session)
import_provider(periods, ConceptScheme(id=7, uri='https://id.erfgoed.net/thesauri/dateringen', labels=[Label('Dateringen', 'prefLabel', 'nl'), Label('Periods', 'prefLabel', 'en')]), db_session)
import_provider(species, ConceptScheme(id=8, uri='https://id.erfgoed.net/thesauri/soorten', labels=[Label('Soorten', 'prefLabel', 'nl'), Label('Species', 'prefLabel', 'en')]), db_session)
import_provider(bluebirds, ConceptScheme(id=9, uri='https://id.bluebirds.org', labels=[Label('Blauwe vogels', 'prefLabel', 'nl'), Label('Blue birds', 'prefLabel', 'en')]), db_session)
db_session.commit()
db_session.close()
print('--atramhasis-db-initialized--')
|
def main(argv=sys.argv):
from fixtures.data import trees, geo
from fixtures.styles_and_cultures import styles_and_cultures
from fixtures.materials import materials
from fixtures.eventtypes import eventtypes
from fixtures.heritagetypes import heritagetypes
from fixtures.periods import periods
from fixtures.species import species
from fixtures.bluebirds import bluebirds
if len(argv) < 2:
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> [var=value]\n(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
config_uri = argv[1]
options = parse_vars(argv[2:])
setup_logging(config_uri)
settings = get_appsettings(config_uri, options=options)
engine = engine_from_config(settings, 'sqlalchemy.')
db_session = sessionmaker(bind=engine)()
import_provider(trees, ConceptScheme(id=1, uri='urn:x-skosprovider:trees', labels=[Label('Verschillende soorten bomen', 'prefLabel', 'nl'), Label('Different types of trees', 'prefLabel', 'en')]), db_session)
import_provider(geo, ConceptScheme(id=2, uri='urn:x-skosprovider:geo', labels=[Label('Geografie', 'prefLabel', 'nl'), Label('Geography', 'prefLabel', 'en')]), db_session)
import_provider(styles_and_cultures, ConceptScheme(id=3, uri='https://id.erfgoed.net/thesauri/stijlen_en_culturen', labels=[Label('Stijlen en Culturen', 'prefLabel', 'nl'), Label('Styles and Cultures', 'prefLabel', 'en')]), db_session)
import_provider(materials, ConceptScheme(id=4, uri='https://id.erfgoed.net/thesauri/materialen', labels=[Label('Materialen', 'prefLabel', 'nl'), Label('Materials', 'prefLabel', 'en')]), db_session)
import_provider(eventtypes, ConceptScheme(id=5, uri='https://id.erfgoed.net/thesauri/gebeurtenistypes', labels=[Label('Gebeurtenistypes', 'prefLabel', 'nl'), Label('Event types', 'prefLabel', 'en')]), db_session)
import_provider(heritagetypes, ConceptScheme(id=6, uri='https://id.erfgoed.net/thesauri/erfgoedtypes', labels=[Label('Erfgoedtypes', 'prefLabel', 'nl'), Label('Heritage types', 'prefLabel', 'en')]), db_session)
import_provider(periods, ConceptScheme(id=7, uri='https://id.erfgoed.net/thesauri/dateringen', labels=[Label('Dateringen', 'prefLabel', 'nl'), Label('Periods', 'prefLabel', 'en')]), db_session)
import_provider(species, ConceptScheme(id=8, uri='https://id.erfgoed.net/thesauri/soorten', labels=[Label('Soorten', 'prefLabel', 'nl'), Label('Species', 'prefLabel', 'en')]), db_session)
import_provider(bluebirds, ConceptScheme(id=9, uri='https://id.bluebirds.org', labels=[Label('Blauwe vogels', 'prefLabel', 'nl'), Label('Blue birds', 'prefLabel', 'en')]), db_session)
db_session.commit()
db_session.close()
print('--atramhasis-db-initialized--')
|
atramhasis
|
positive
|
def _travis_specific_setup(jinja_env, forge_config, forge_dir, platform):
<DeepExtract>
if platform == 'linux':
cfbs_fpath = os.path.join(forge_dir, forge_config['recipe_dir'], 'run_conda_forge_build_setup_linux')
elif platform == 'win':
cfbs_fpath = os.path.join(forge_dir, forge_config['recipe_dir'], 'run_conda_forge_build_setup_win.bat')
else:
cfbs_fpath = os.path.join(forge_dir, forge_config['recipe_dir'], 'run_conda_forge_build_setup_osx')
build_setup = ''
if os.path.exists(cfbs_fpath):
if platform == 'linux':
build_setup += textwrap.dedent(' # Overriding global run_conda_forge_build_setup_linux with local copy.\n source ${RECIPE_ROOT}/run_conda_forge_build_setup_linux\n\n ')
elif platform == 'win':
build_setup += textwrap.dedent(' :: Overriding global run_conda_forge_build_setup_win with local copy.\n {recipe_dir}\\run_conda_forge_build_setup_win\n '.format(recipe_dir=forge_config['recipe_dir']))
else:
build_setup += textwrap.dedent(' # Overriding global run_conda_forge_build_setup_osx with local copy.\n source {recipe_dir}/run_conda_forge_build_setup_osx\n '.format(recipe_dir=forge_config['recipe_dir']))
elif platform == 'win':
build_setup += textwrap.dedent(' run_conda_forge_build_setup\n\n ')
else:
build_setup += textwrap.dedent(' source run_conda_forge_build_setup\n\n ')
build_setup = build_setup
</DeepExtract>
platform_templates = {'linux': ['.scripts/run_docker_build.sh', '.scripts/build_steps.sh'], 'osx': ['.scripts/run_osx_build.sh'], 'win': []}
template_files = platform_templates.get(platform, [])
if platform == 'linux':
<DeepExtract>
yum_requirements_fpath = os.path.join(forge_dir, forge_config['recipe_dir'], 'yum_requirements.txt')
yum_build_setup = ''
if os.path.exists(yum_requirements_fpath):
with open(yum_requirements_fpath) as fh:
requirements = [line.strip() for line in fh if line.strip() and (not line.strip().startswith('#'))]
if not requirements:
raise ValueError('No yum requirements enabled in the yum_requirements.txt, please remove the file or add some.')
yum_build_setup = textwrap.dedent('\n # Install the yum requirements defined canonically in the\n # "recipe/yum_requirements.txt" file. After updating that file,\n # run "conda smithy rerender" and this line will be updated\n # automatically.\n /usr/bin/sudo -n yum install -y {}\n\n\n '.format(' '.join(requirements)))
yum_build_setup = yum_build_setup
</DeepExtract>
if yum_build_setup:
forge_config['yum_build_setup'] = yum_build_setup
forge_config['build_setup'] = build_setup
<DeepExtract>
for template_file in template_files:
template = jinja_env.get_template(os.path.basename(template_file) + '.tmpl')
target_fname = os.path.join(forge_dir, template_file)
new_file_contents = template.render(**forge_config)
if target_fname in get_common_scripts(forge_dir) and os.path.exists(target_fname):
with open(target_fname, 'r') as fh:
old_file_contents = fh.read()
if old_file_contents != new_file_contents:
import difflib
logger.debug('diff:\n%s' % '\n'.join(difflib.unified_diff(old_file_contents.splitlines(), new_file_contents.splitlines(), fromfile=target_fname, tofile=target_fname)))
raise RuntimeError('Same file {} is rendered twice with different contents'.format(target_fname))
with write_file(target_fname) as fh:
fh.write(new_file_contents)
set_exe_file(target_fname, True)
</DeepExtract>
|
def _travis_specific_setup(jinja_env, forge_config, forge_dir, platform):
if platform == 'linux':
cfbs_fpath = os.path.join(forge_dir, forge_config['recipe_dir'], 'run_conda_forge_build_setup_linux')
elif platform == 'win':
cfbs_fpath = os.path.join(forge_dir, forge_config['recipe_dir'], 'run_conda_forge_build_setup_win.bat')
else:
cfbs_fpath = os.path.join(forge_dir, forge_config['recipe_dir'], 'run_conda_forge_build_setup_osx')
build_setup = ''
if os.path.exists(cfbs_fpath):
if platform == 'linux':
build_setup += textwrap.dedent(' # Overriding global run_conda_forge_build_setup_linux with local copy.\n source ${RECIPE_ROOT}/run_conda_forge_build_setup_linux\n\n ')
elif platform == 'win':
build_setup += textwrap.dedent(' :: Overriding global run_conda_forge_build_setup_win with local copy.\n {recipe_dir}\\run_conda_forge_build_setup_win\n '.format(recipe_dir=forge_config['recipe_dir']))
else:
build_setup += textwrap.dedent(' # Overriding global run_conda_forge_build_setup_osx with local copy.\n source {recipe_dir}/run_conda_forge_build_setup_osx\n '.format(recipe_dir=forge_config['recipe_dir']))
elif platform == 'win':
build_setup += textwrap.dedent(' run_conda_forge_build_setup\n\n ')
else:
build_setup += textwrap.dedent(' source run_conda_forge_build_setup\n\n ')
build_setup = build_setup
platform_templates = {'linux': ['.scripts/run_docker_build.sh', '.scripts/build_steps.sh'], 'osx': ['.scripts/run_osx_build.sh'], 'win': []}
template_files = platform_templates.get(platform, [])
if platform == 'linux':
yum_requirements_fpath = os.path.join(forge_dir, forge_config['recipe_dir'], 'yum_requirements.txt')
yum_build_setup = ''
if os.path.exists(yum_requirements_fpath):
with open(yum_requirements_fpath) as fh:
requirements = [line.strip() for line in fh if line.strip() and (not line.strip().startswith('#'))]
if not requirements:
raise ValueError('No yum requirements enabled in the yum_requirements.txt, please remove the file or add some.')
yum_build_setup = textwrap.dedent('\n # Install the yum requirements defined canonically in the\n # "recipe/yum_requirements.txt" file. After updating that file,\n # run "conda smithy rerender" and this line will be updated\n # automatically.\n /usr/bin/sudo -n yum install -y {}\n\n\n '.format(' '.join(requirements)))
yum_build_setup = yum_build_setup
if yum_build_setup:
forge_config['yum_build_setup'] = yum_build_setup
forge_config['build_setup'] = build_setup
for template_file in template_files:
template = jinja_env.get_template(os.path.basename(template_file) + '.tmpl')
target_fname = os.path.join(forge_dir, template_file)
new_file_contents = template.render(**forge_config)
if target_fname in get_common_scripts(forge_dir) and os.path.exists(target_fname):
with open(target_fname, 'r') as fh:
old_file_contents = fh.read()
if old_file_contents != new_file_contents:
import difflib
logger.debug('diff:\n%s' % '\n'.join(difflib.unified_diff(old_file_contents.splitlines(), new_file_contents.splitlines(), fromfile=target_fname, tofile=target_fname)))
raise RuntimeError('Same file {} is rendered twice with different contents'.format(target_fname))
with write_file(target_fname) as fh:
fh.write(new_file_contents)
set_exe_file(target_fname, True)
</DeepExtract>
|
conda-smithy
|
positive
|
def get_similarity_transform(src_pts, dst_pts, reflective=True):
"""
Function:
----------
Find Similarity Transform Matrix 'trans':
u = src_pts[:, 0]
v = src_pts[:, 1]
x = dst_pts[:, 0]
y = dst_pts[:, 1]
[x, y, 1] = [u, v, 1] * trans
Parameters:
----------
@src_pts: Kx2 np.array
source points, each row is a pair of coordinates (x, y)
@dst_pts: Kx2 np.array
destination points, each row is a pair of transformed
coordinates (x, y)
@reflective: True or False
if True:
use reflective similarity transform
else:
use non-reflective similarity transform
Returns:
----------
@trans: 3x3 np.array
transform matrix from uv to xy
trans_inv: 3x3 np.array
inverse of trans, transform matrix from xy to uv
"""
if reflective:
<DeepExtract>
options = {'K': 2}
(trans1, trans1_inv) = findNonreflectiveSimilarity(src_pts, dst_pts, options)
xyR = dst_pts
xyR[:, 0] = -1 * xyR[:, 0]
(trans2r, trans2r_inv) = findNonreflectiveSimilarity(src_pts, xyR, options)
TreflectY = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, 1]])
trans2 = np.dot(trans2r, TreflectY)
xy1 = tformfwd(trans1, src_pts)
norm1 = norm(xy1 - dst_pts)
xy2 = tformfwd(trans2, src_pts)
norm2 = norm(xy2 - dst_pts)
if norm1 <= norm2:
(trans, trans_inv) = (trans1, trans1_inv)
else:
trans2_inv = inv(trans2)
(trans, trans_inv) = (trans2, trans2_inv)
</DeepExtract>
else:
<DeepExtract>
options = {'K': 2}
K = options['K']
M = dst_pts.shape[0]
x = dst_pts[:, 0].reshape((-1, 1))
y = dst_pts[:, 1].reshape((-1, 1))
tmp1 = np.hstack((x, y, np.ones((M, 1)), np.zeros((M, 1))))
tmp2 = np.hstack((y, -x, np.zeros((M, 1)), np.ones((M, 1))))
X = np.vstack((tmp1, tmp2))
u = src_pts[:, 0].reshape((-1, 1))
v = src_pts[:, 1].reshape((-1, 1))
U = np.vstack((u, v))
if rank(X) >= 2 * K:
(r, _, _, _) = lstsq(X, U)
r = np.squeeze(r)
else:
raise Exception('cp2tform: two Unique Points Req')
sc = r[0]
ss = r[1]
tx = r[2]
ty = r[3]
Tinv = np.array([[sc, -ss, 0], [ss, sc, 0], [tx, ty, 1]])
T = inv(Tinv)
T[:, 2] = np.array([0, 0, 1])
(trans, trans_inv) = (T, Tinv)
</DeepExtract>
return (trans, trans_inv)
|
def get_similarity_transform(src_pts, dst_pts, reflective=True):
"""
Function:
----------
Find Similarity Transform Matrix 'trans':
u = src_pts[:, 0]
v = src_pts[:, 1]
x = dst_pts[:, 0]
y = dst_pts[:, 1]
[x, y, 1] = [u, v, 1] * trans
Parameters:
----------
@src_pts: Kx2 np.array
source points, each row is a pair of coordinates (x, y)
@dst_pts: Kx2 np.array
destination points, each row is a pair of transformed
coordinates (x, y)
@reflective: True or False
if True:
use reflective similarity transform
else:
use non-reflective similarity transform
Returns:
----------
@trans: 3x3 np.array
transform matrix from uv to xy
trans_inv: 3x3 np.array
inverse of trans, transform matrix from xy to uv
"""
if reflective:
options = {'K': 2}
(trans1, trans1_inv) = findNonreflectiveSimilarity(src_pts, dst_pts, options)
xyR = dst_pts
xyR[:, 0] = -1 * xyR[:, 0]
(trans2r, trans2r_inv) = findNonreflectiveSimilarity(src_pts, xyR, options)
TreflectY = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, 1]])
trans2 = np.dot(trans2r, TreflectY)
xy1 = tformfwd(trans1, src_pts)
norm1 = norm(xy1 - dst_pts)
xy2 = tformfwd(trans2, src_pts)
norm2 = norm(xy2 - dst_pts)
if norm1 <= norm2:
(trans, trans_inv) = (trans1, trans1_inv)
else:
trans2_inv = inv(trans2)
(trans, trans_inv) = (trans2, trans2_inv)
else:
options = {'K': 2}
K = options['K']
M = dst_pts.shape[0]
x = dst_pts[:, 0].reshape((-1, 1))
y = dst_pts[:, 1].reshape((-1, 1))
tmp1 = np.hstack((x, y, np.ones((M, 1)), np.zeros((M, 1))))
tmp2 = np.hstack((y, -x, np.zeros((M, 1)), np.ones((M, 1))))
X = np.vstack((tmp1, tmp2))
u = src_pts[:, 0].reshape((-1, 1))
v = src_pts[:, 1].reshape((-1, 1))
U = np.vstack((u, v))
if rank(X) >= 2 * K:
(r, _, _, _) = lstsq(X, U)
r = np.squeeze(r)
else:
raise Exception('cp2tform: two Unique Points Req')
sc = r[0]
ss = r[1]
tx = r[2]
ty = r[3]
Tinv = np.array([[sc, -ss, 0], [ss, sc, 0], [tx, ty, 1]])
T = inv(Tinv)
T[:, 2] = np.array([0, 0, 1])
(trans, trans_inv) = (T, Tinv)
return (trans, trans_inv)
|
Cross-Resolution-Face-Recognition
|
positive
|
def _access_rights(self, subject, predicate):
"""
Returns the rights statement or an empty string if no one is found.
"""
result = ''
<DeepExtract>
for _object in self.g.objects(subject, predicate):
obj = _object
obj = None
</DeepExtract>
if obj:
if isinstance(obj, BNode) and self._object(obj, RDF.type) == DCT.RightsStatement:
<DeepExtract>
default_lang = config.get('ckan.locale_default', 'en')
fallback = ''
for o in self.g.objects(obj, RDFS.label):
if isinstance(o, Literal):
if o.language and o.language == default_lang:
result = str(o)
elif fallback == '':
fallback = str(o)
else:
result = str(o)
result = fallback
</DeepExtract>
elif isinstance(obj, Literal) or isinstance(obj, URIRef):
result = six.text_type(obj)
return result
|
def _access_rights(self, subject, predicate):
"""
Returns the rights statement or an empty string if no one is found.
"""
result = ''
for _object in self.g.objects(subject, predicate):
obj = _object
obj = None
if obj:
if isinstance(obj, BNode) and self._object(obj, RDF.type) == DCT.RightsStatement:
default_lang = config.get('ckan.locale_default', 'en')
fallback = ''
for o in self.g.objects(obj, RDFS.label):
if isinstance(o, Literal):
if o.language and o.language == default_lang:
result = str(o)
elif fallback == '':
fallback = str(o)
else:
result = str(o)
result = fallback
elif isinstance(obj, Literal) or isinstance(obj, URIRef):
result = six.text_type(obj)
return result
|
ckanext-dcat
|
positive
|
def __init__(self, eventrouter, **kwargs):
self.active = False
for (key, value) in kwargs.items():
setattr(self, key, value)
self.eventrouter = eventrouter
self.slack_name = kwargs['name']
self.slack_purpose = kwargs.get('purpose', {'value': ''})
self.topic = kwargs.get('topic', {'value': ''})
self.identifier = kwargs['id']
self.last_read = SlackTS(kwargs.get('last_read', SlackTS()))
self.channel_buffer = None
self.team = kwargs.get('team', None)
self.got_history = False
self.messages = OrderedDict()
self.hashed_messages = {}
self.new_messages = False
self.typing = {}
self.type = 'channel'
<DeepExtract>
self.name = '#' + self.slack_name
</DeepExtract>
self.current_short_name = self.name
<DeepExtract>
self.members = set(kwargs.get('members', []))
self.update_nicklist()
</DeepExtract>
self.unread_count_display = 0
self.last_line_from = None
|
def __init__(self, eventrouter, **kwargs):
self.active = False
for (key, value) in kwargs.items():
setattr(self, key, value)
self.eventrouter = eventrouter
self.slack_name = kwargs['name']
self.slack_purpose = kwargs.get('purpose', {'value': ''})
self.topic = kwargs.get('topic', {'value': ''})
self.identifier = kwargs['id']
self.last_read = SlackTS(kwargs.get('last_read', SlackTS()))
self.channel_buffer = None
self.team = kwargs.get('team', None)
self.got_history = False
self.messages = OrderedDict()
self.hashed_messages = {}
self.new_messages = False
self.typing = {}
self.type = 'channel'
self.name = '#' + self.slack_name
self.current_short_name = self.name
self.members = set(kwargs.get('members', []))
self.update_nicklist()
self.unread_count_display = 0
self.last_line_from = None
|
awesome-dots
|
positive
|
def call(self, y_true, y_pred):
<DeepExtract>
y_true = tf.cast(y_true, tf.int32)
true_convert = tf.gather(self._lookup, y_true, axis=None)
nodata_value = config.dataset.classes.class_id('nodata')
nodata = y_true == nodata_value
for c in self._nodata_classes:
nodata = tf.logical_or(nodata, y_true == c)
while len(nodata.shape) < len(y_pred.shape):
nodata = tf.expand_dims(nodata, -1)
y_pred = tf.cast(y_pred, tf.float32) * tf.cast(tf.logical_not(nodata), tf.float32)
true_convert = tf.cast(tf.logical_not(nodata), tf.float32) * true_convert
(y_true, y_pred) = (true_convert, y_pred)
</DeepExtract>
return tensorflow.keras.losses.binary_crossentropy(y_true, y_pred)
|
def call(self, y_true, y_pred):
y_true = tf.cast(y_true, tf.int32)
true_convert = tf.gather(self._lookup, y_true, axis=None)
nodata_value = config.dataset.classes.class_id('nodata')
nodata = y_true == nodata_value
for c in self._nodata_classes:
nodata = tf.logical_or(nodata, y_true == c)
while len(nodata.shape) < len(y_pred.shape):
nodata = tf.expand_dims(nodata, -1)
y_pred = tf.cast(y_pred, tf.float32) * tf.cast(tf.logical_not(nodata), tf.float32)
true_convert = tf.cast(tf.logical_not(nodata), tf.float32) * true_convert
(y_true, y_pred) = (true_convert, y_pred)
return tensorflow.keras.losses.binary_crossentropy(y_true, y_pred)
|
delta
|
positive
|
def DFA(feature, source_info):
"""
Paper Link: https://ieeexplore.ieee.org/document/7550085
"""
def k(x, y, sigma=1):
distance = torch.squeeze(torch.nn.PairwiseDistance(p=2.0, eps=1e-06, keepdim=False)(x.unsqueeze(0), y.unsqueeze(0)))
return torch.exp(-distance / (2 * sigma * sigma))
(N_s, N_t, numOfPerClass) = (feature.size(0) // 2, feature.size(0) // 2, feature.size(0) // 14)
<DeepExtract>
distance = torch.squeeze(torch.nn.PairwiseDistance(p=2.0, eps=1e-06, keepdim=False)(torch.mean(feature[:N_s], 0).unsqueeze(0), torch.mean(feature[N_s:], 0).unsqueeze(0)))
gist_loss = torch.exp(-distance / (2 * sigma * sigma))
</DeepExtract>
intra_loss = 0
for Class in range(7):
for i in range(Class * numOfPerClass, (Class + 1) * numOfPerClass - 1):
for j in range(i + 1, (Class + 1) * numOfPerClass):
intra_loss += k(feature[i], feature[j]) / (numOfPerClass * (numOfPerClass - 1) * 7)
inter_loss = 0
for Class in range(7):
for i in range(Class * numOfPerClass, (Class + 1) * numOfPerClass - 1):
for j in range((Class + 1) * numOfPerClass, N_s):
inter_loss += k(feature[i], feature[j]) / (numOfPerClass * (N_s - numOfPerClass) * 7)
return (gist_loss, intra_loss, inter_loss)
|
def DFA(feature, source_info):
"""
Paper Link: https://ieeexplore.ieee.org/document/7550085
"""
def k(x, y, sigma=1):
distance = torch.squeeze(torch.nn.PairwiseDistance(p=2.0, eps=1e-06, keepdim=False)(x.unsqueeze(0), y.unsqueeze(0)))
return torch.exp(-distance / (2 * sigma * sigma))
(N_s, N_t, numOfPerClass) = (feature.size(0) // 2, feature.size(0) // 2, feature.size(0) // 14)
distance = torch.squeeze(torch.nn.PairwiseDistance(p=2.0, eps=1e-06, keepdim=False)(torch.mean(feature[:N_s], 0).unsqueeze(0), torch.mean(feature[N_s:], 0).unsqueeze(0)))
gist_loss = torch.exp(-distance / (2 * sigma * sigma))
intra_loss = 0
for Class in range(7):
for i in range(Class * numOfPerClass, (Class + 1) * numOfPerClass - 1):
for j in range(i + 1, (Class + 1) * numOfPerClass):
intra_loss += k(feature[i], feature[j]) / (numOfPerClass * (numOfPerClass - 1) * 7)
inter_loss = 0
for Class in range(7):
for i in range(Class * numOfPerClass, (Class + 1) * numOfPerClass - 1):
for j in range((Class + 1) * numOfPerClass, N_s):
inter_loss += k(feature[i], feature[j]) / (numOfPerClass * (N_s - numOfPerClass) * 7)
return (gist_loss, intra_loss, inter_loss)
|
CD-FER-Benchmark
|
positive
|
def expand(bbox, geoh, depth):
neighbors = geohash.neighbors(geoh)
for neighbor in neighbors:
other = geohash.bbox(neighbor)
if with_neighbors > depth:
<DeepExtract>
neighbors = geohash.neighbors(neighbor)
for neighbor in neighbors:
other = geohash.bbox(neighbor)
if with_neighbors > depth + 1:
expand(bbox, neighbor, depth + 1 + 1)
else:
if other['n'] > bbox['n']:
bbox['n'] = other['n']
if other['s'] < bbox['s']:
bbox['s'] = other['s']
if other['e'] > bbox['e']:
bbox['e'] = other['e']
if other['w'] < bbox['w']:
bbox['w'] = other['w']
</DeepExtract>
else:
if other['n'] > bbox['n']:
bbox['n'] = other['n']
if other['s'] < bbox['s']:
bbox['s'] = other['s']
if other['e'] > bbox['e']:
bbox['e'] = other['e']
if other['w'] < bbox['w']:
bbox['w'] = other['w']
|
def expand(bbox, geoh, depth):
neighbors = geohash.neighbors(geoh)
for neighbor in neighbors:
other = geohash.bbox(neighbor)
if with_neighbors > depth:
neighbors = geohash.neighbors(neighbor)
for neighbor in neighbors:
other = geohash.bbox(neighbor)
if with_neighbors > depth + 1:
expand(bbox, neighbor, depth + 1 + 1)
else:
if other['n'] > bbox['n']:
bbox['n'] = other['n']
if other['s'] < bbox['s']:
bbox['s'] = other['s']
if other['e'] > bbox['e']:
bbox['e'] = other['e']
if other['w'] < bbox['w']:
bbox['w'] = other['w']
else:
if other['n'] > bbox['n']:
bbox['n'] = other['n']
if other['s'] < bbox['s']:
bbox['s'] = other['s']
if other['e'] > bbox['e']:
bbox['e'] = other['e']
if other['w'] < bbox['w']:
bbox['w'] = other['w']
|
addok
|
positive
|
def after_train_iter(self, runner):
runner.optimizer.zero_grad()
runner.outputs['loss'].backward()
<DeepExtract>
grads = [param.grad.data for param in runner.model.parameters() if param.requires_grad and param.grad is not None]
world_size = dist.get_world_size()
if self.coalesce:
_allreduce_coalesced(grads, world_size, self.bucket_size_mb)
else:
for tensor in grads:
dist.all_reduce(tensor.div_(world_size))
</DeepExtract>
if self.grad_clip is not None:
self.clip_grads(runner.model.parameters())
runner.optimizer.step()
|
def after_train_iter(self, runner):
runner.optimizer.zero_grad()
runner.outputs['loss'].backward()
grads = [param.grad.data for param in runner.model.parameters() if param.requires_grad and param.grad is not None]
world_size = dist.get_world_size()
if self.coalesce:
_allreduce_coalesced(grads, world_size, self.bucket_size_mb)
else:
for tensor in grads:
dist.all_reduce(tensor.div_(world_size))
if self.grad_clip is not None:
self.clip_grads(runner.model.parameters())
runner.optimizer.step()
|
AE_TextSpotter
|
positive
|
def UseManualIndexing(client, database_id):
"""The default index policy on a DocumentContainer will AUTOMATICALLY index ALL documents added.
There may be cases where you can want to turn-off automatic indexing and only selectively add only specific documents to the index.
This method demonstrates how to control this by setting the value of automatic within indexingPolicy to False
"""
try:
<DeepExtract>
try:
collection_link = GetContainerLink(database_id, COLLECTION_ID)
client.DeleteContainer(collection_link)
print("Collection with id '{0}' was deleted".format(COLLECTION_ID))
except errors.HTTPFailure as e:
if e.status_code == 404:
pass
elif e.status_code == 400:
print('Bad request for collection link', collection_link)
raise
else:
raise
</DeepExtract>
<DeepExtract>
database_link = 'dbs' + '/' + database_id
</DeepExtract>
created_Container = client.CreateContainer(database_link, {'id': COLLECTION_ID, 'indexingPolicy': {'automatic': False}})
print(created_Container)
print('\n' + '-' * 25 + '\n2. Collection created with index policy')
<DeepExtract>
for (k, v) in created_Container['indexingPolicy'].items():
print('{:<15}'.format(k), v)
print()
</DeepExtract>
<DeepExtract>
collection_link = GetDatabaseLink(database_id) + '/' + 'colls' + '/' + COLLECTION_ID
</DeepExtract>
doc = client.CreateItem(collection_link, {'id': 'doc1', 'orderId': 'order1'})
print('\n' + '-' * 25 + 'Document doc1 created with order1' + '-' * 25)
print(doc)
query = {'query': 'SELECT * FROM r WHERE r.orderId=@orderNo', 'parameters': [{'name': '@orderNo', 'value': 'order1'}]}
<DeepExtract>
try:
results = list(client.QueryItems(collection_link, query))
print(message)
for doc in results:
print(doc)
return results
except errors.HTTPFailure as e:
if e.status_code == 404:
print("Document doesn't exist")
elif e.status_code == 400:
print('Bad Request exception occured: ', e)
pass
else:
raise
finally:
print()
</DeepExtract>
docRead = client.ReadItem(GetDocumentLink(database_id, COLLECTION_ID, 'doc1'))
print('Document read by ID: \n', docRead['id'])
doc2 = client.CreateItem(collection_link, {'id': 'doc2', 'orderId': 'order2'}, {'indexingDirective': documents.IndexingDirective.Include})
print('\n' + '-' * 25 + 'Document doc2 created with order2' + '-' * 25)
print(doc2)
query = {'query': 'SELECT * FROM r WHERE r.orderId=@orderNo', 'parameters': [{'name': '@orderNo', 'value': 'order2'}]}
<DeepExtract>
try:
results = list(client.QueryItems(collection_link, query))
print(message)
for doc in results:
print(doc)
return results
except errors.HTTPFailure as e:
if e.status_code == 404:
print("Document doesn't exist")
elif e.status_code == 400:
print('Bad Request exception occured: ', e)
pass
else:
raise
finally:
print()
</DeepExtract>
client.DeleteContainer(collection_link)
print('\n')
except errors.HTTPFailure as e:
if e.status_code == 409:
print('Entity already exists')
elif e.status_code == 404:
print("Entity doesn't exist")
else:
raise
|
def UseManualIndexing(client, database_id):
"""The default index policy on a DocumentContainer will AUTOMATICALLY index ALL documents added.
There may be cases where you can want to turn-off automatic indexing and only selectively add only specific documents to the index.
This method demonstrates how to control this by setting the value of automatic within indexingPolicy to False
"""
try:
try:
collection_link = GetContainerLink(database_id, COLLECTION_ID)
client.DeleteContainer(collection_link)
print("Collection with id '{0}' was deleted".format(COLLECTION_ID))
except errors.HTTPFailure as e:
if e.status_code == 404:
pass
elif e.status_code == 400:
print('Bad request for collection link', collection_link)
raise
else:
raise
database_link = 'dbs' + '/' + database_id
created_Container = client.CreateContainer(database_link, {'id': COLLECTION_ID, 'indexingPolicy': {'automatic': False}})
print(created_Container)
print('\n' + '-' * 25 + '\n2. Collection created with index policy')
for (k, v) in created_Container['indexingPolicy'].items():
print('{:<15}'.format(k), v)
print()
collection_link = GetDatabaseLink(database_id) + '/' + 'colls' + '/' + COLLECTION_ID
doc = client.CreateItem(collection_link, {'id': 'doc1', 'orderId': 'order1'})
print('\n' + '-' * 25 + 'Document doc1 created with order1' + '-' * 25)
print(doc)
query = {'query': 'SELECT * FROM r WHERE r.orderId=@orderNo', 'parameters': [{'name': '@orderNo', 'value': 'order1'}]}
try:
results = list(client.QueryItems(collection_link, query))
print(message)
for doc in results:
print(doc)
return results
except errors.HTTPFailure as e:
if e.status_code == 404:
print("Document doesn't exist")
elif e.status_code == 400:
print('Bad Request exception occured: ', e)
pass
else:
raise
finally:
print()
docRead = client.ReadItem(GetDocumentLink(database_id, COLLECTION_ID, 'doc1'))
print('Document read by ID: \n', docRead['id'])
doc2 = client.CreateItem(collection_link, {'id': 'doc2', 'orderId': 'order2'}, {'indexingDirective': documents.IndexingDirective.Include})
print('\n' + '-' * 25 + 'Document doc2 created with order2' + '-' * 25)
print(doc2)
query = {'query': 'SELECT * FROM r WHERE r.orderId=@orderNo', 'parameters': [{'name': '@orderNo', 'value': 'order2'}]}
try:
results = list(client.QueryItems(collection_link, query))
print(message)
for doc in results:
print(doc)
return results
except errors.HTTPFailure as e:
if e.status_code == 404:
print("Document doesn't exist")
elif e.status_code == 400:
print('Bad Request exception occured: ', e)
pass
else:
raise
finally:
print()
client.DeleteContainer(collection_link)
print('\n')
except errors.HTTPFailure as e:
if e.status_code == 409:
print('Entity already exists')
elif e.status_code == 404:
print("Entity doesn't exist")
else:
raise
|
azure-cosmos-python
|
positive
|
def prune(self, password):
if self.backup_start_time is None:
self.backup_start_time = time.time()
self.logger.info('Prune started')
if self.cancel:
self.logger.info('Stopping')
return None
self.callback('Fetching metadata')
<DeepExtract>
self.logger.debug('Repo.get_snapshot_ids()')
snapshot_paths = self.backend.ls('snapshots')
snapshot_ids = []
for snapshot_path in snapshot_paths:
snapshot_id = os.path.basename(snapshot_path)
snapshot_ids.append(snapshot_id)
self.snapshot_ids = snapshot_ids
</DeepExtract>
blob_hashes = {os.path.basename(path) for path in self.backend.ls('chunks')}
self.logger.info(f'Found {len(blob_hashes):,} blobs')
self.logger.info('Downloaded metadata')
ref_count = {}
for blob_hash in blob_hashes:
ref_count[blob_hash] = 0
<DeepExtract>
self.logger.debug('Repo._get_master_key()')
salt = self.backend.read('keys/key-salt')
master = decrypt(self.backend.read('keys/master-key'), generate_key(salt, password))
</DeepExtract>
for snapshot_id in self.snapshot_ids:
if self.cancel:
self.logger.info('Stopping')
return None
self.callback(f'Checking: {snapshot_id}')
self.logger.debug(f'Pruning {snapshot_id}')
<DeepExtract>
snapshot_obj = json.loads(decrypt_decompress(self.backend.read(f'snapshots/{snapshot_id}'), master))
</DeepExtract>
try:
<DeepExtract>
self.logger.debug('Repo._ref_count_snapshot_nodes()')
for node in snapshot_obj['snapshot'].values():
if node['type'] == 'file':
(start, ostart, end, oend) = node['range']
for i in range(start, end + 1):
blob_hash = snapshot_obj['chunks'][i]
ref_count[blob_hash] += 1
</DeepExtract>
except KeyError:
self.logger.error(f'Corrupted snapshot {snapshot_id}')
raise CorruptSnapshot(snapshot_id)
<DeepExtract>
self.logger.debug('Repo._prune_using_ref_count()')
pool = BoundedThreadPoolExecutor(self.max_thread_queue_size, self.thread_count)
deleted = 0
for (blob_hash, count) in ref_count.items():
if self.cancel:
deleted = None
if count is 0:
deleted += 1
blob_path = f'chunks/{blob_hash}'
pool.submit(self._rm_blob, blob_path, deleted)
pool.shutdown()
deleted = deleted
</DeepExtract>
self.logger.info(f'Pruned {deleted:,} blobs')
self.logger.info(f'Backup complete in {self._time_str_since_start()}')
return deleted
|
def prune(self, password):
if self.backup_start_time is None:
self.backup_start_time = time.time()
self.logger.info('Prune started')
if self.cancel:
self.logger.info('Stopping')
return None
self.callback('Fetching metadata')
self.logger.debug('Repo.get_snapshot_ids()')
snapshot_paths = self.backend.ls('snapshots')
snapshot_ids = []
for snapshot_path in snapshot_paths:
snapshot_id = os.path.basename(snapshot_path)
snapshot_ids.append(snapshot_id)
self.snapshot_ids = snapshot_ids
blob_hashes = {os.path.basename(path) for path in self.backend.ls('chunks')}
self.logger.info(f'Found {len(blob_hashes):,} blobs')
self.logger.info('Downloaded metadata')
ref_count = {}
for blob_hash in blob_hashes:
ref_count[blob_hash] = 0
self.logger.debug('Repo._get_master_key()')
salt = self.backend.read('keys/key-salt')
master = decrypt(self.backend.read('keys/master-key'), generate_key(salt, password))
for snapshot_id in self.snapshot_ids:
if self.cancel:
self.logger.info('Stopping')
return None
self.callback(f'Checking: {snapshot_id}')
self.logger.debug(f'Pruning {snapshot_id}')
snapshot_obj = json.loads(decrypt_decompress(self.backend.read(f'snapshots/{snapshot_id}'), master))
try:
self.logger.debug('Repo._ref_count_snapshot_nodes()')
for node in snapshot_obj['snapshot'].values():
if node['type'] == 'file':
(start, ostart, end, oend) = node['range']
for i in range(start, end + 1):
blob_hash = snapshot_obj['chunks'][i]
ref_count[blob_hash] += 1
except KeyError:
self.logger.error(f'Corrupted snapshot {snapshot_id}')
raise CorruptSnapshot(snapshot_id)
self.logger.debug('Repo._prune_using_ref_count()')
pool = BoundedThreadPoolExecutor(self.max_thread_queue_size, self.thread_count)
deleted = 0
for (blob_hash, count) in ref_count.items():
if self.cancel:
deleted = None
if count is 0:
deleted += 1
blob_path = f'chunks/{blob_hash}'
pool.submit(self._rm_blob, blob_path, deleted)
pool.shutdown()
deleted = deleted
self.logger.info(f'Pruned {deleted:,} blobs')
self.logger.info(f'Backup complete in {self._time_str_since_start()}')
return deleted
|
BlobBackup
|
positive
|
def get_subnet_group(client, module):
params = dict()
params['DBSubnetGroupName'] = module.params.get('name').lower()
try:
<DeepExtract>
paginator = client.get_paginator('describe_db_subnet_groups')
_result = paginator.paginate(**kwargs).build_full_result()
</DeepExtract>
except is_boto3_error_code('DBSubnetGroupNotFoundFault'):
return None
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't describe subnet groups.")
if _result:
result = camel_dict_to_snake_dict(_result['DBSubnetGroups'][0])
result['tags'] = get_tags(client, module, result['db_subnet_group_arn'])
return result
|
def get_subnet_group(client, module):
params = dict()
params['DBSubnetGroupName'] = module.params.get('name').lower()
try:
paginator = client.get_paginator('describe_db_subnet_groups')
_result = paginator.paginate(**kwargs).build_full_result()
except is_boto3_error_code('DBSubnetGroupNotFoundFault'):
return None
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't describe subnet groups.")
if _result:
result = camel_dict_to_snake_dict(_result['DBSubnetGroups'][0])
result['tags'] = get_tags(client, module, result['db_subnet_group_arn'])
return result
|
amazon.aws
|
positive
|
def ga_loc_target(gt_bboxes_list, featmap_sizes, anchor_scale, anchor_strides, center_ratio=0.2, ignore_ratio=0.5):
"""Compute location targets for guided anchoring.
Each feature map is divided into positive, negative and ignore regions.
- positive regions: target 1, weight 1
- ignore regions: target 0, weight 0
- negative regions: target 0, weight 0.1
Args:
gt_bboxes_list (list[Tensor]): Gt bboxes of each image.
featmap_sizes (list[tuple]): Multi level sizes of each feature maps.
anchor_scale (int): Anchor scale.
anchor_strides ([list[int]]): Multi level anchor strides.
center_ratio (float): Ratio of center region.
ignore_ratio (float): Ratio of ignore region.
Returns:
tuple
"""
img_per_gpu = len(gt_bboxes_list)
num_lvls = len(featmap_sizes)
r1 = (1 - center_ratio) / 2
r2 = (1 - ignore_ratio) / 2
all_loc_targets = []
all_loc_weights = []
all_ignore_map = []
for lvl_id in range(num_lvls):
(h, w) = featmap_sizes[lvl_id]
loc_targets = torch.zeros(img_per_gpu, 1, h, w, device=gt_bboxes_list[0].device, dtype=torch.float32)
loc_weights = torch.full_like(loc_targets, -1)
ignore_map = torch.zeros_like(loc_targets)
all_loc_targets.append(loc_targets)
all_loc_weights.append(loc_weights)
all_ignore_map.append(ignore_map)
for img_id in range(img_per_gpu):
gt_bboxes = gt_bboxes_list[img_id]
scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * (gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1))
min_anchor_size = scale.new_full((1,), float(anchor_scale * anchor_strides[0]))
target_lvls = torch.floor(torch.log2(scale) - torch.log2(min_anchor_size) + 0.5)
target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long()
for gt_id in range(gt_bboxes.size(0)):
lvl = target_lvls[gt_id].item()
gt_ = gt_bboxes[gt_id, :4] / anchor_strides[lvl]
<DeepExtract>
x1 = torch.round((1 - r2) * gt_[0] + r2 * gt_[2]).long()
y1 = torch.round((1 - r2) * gt_[1] + r2 * gt_[3]).long()
x2 = torch.round(r2 * gt_[0] + (1 - r2) * gt_[2]).long()
y2 = torch.round(r2 * gt_[1] + (1 - r2) * gt_[3]).long()
if featmap_sizes[lvl] is not None:
x1 = x1.clamp(min=0, max=featmap_sizes[lvl][1] - 1)
y1 = y1.clamp(min=0, max=featmap_sizes[lvl][0] - 1)
x2 = x2.clamp(min=0, max=featmap_sizes[lvl][1] - 1)
y2 = y2.clamp(min=0, max=featmap_sizes[lvl][0] - 1)
(ignore_x1, ignore_y1, ignore_x2, ignore_y2) = (x1, y1, x2, y2)
</DeepExtract>
<DeepExtract>
x1 = torch.round((1 - r1) * gt_[0] + r1 * gt_[2]).long()
y1 = torch.round((1 - r1) * gt_[1] + r1 * gt_[3]).long()
x2 = torch.round(r1 * gt_[0] + (1 - r1) * gt_[2]).long()
y2 = torch.round(r1 * gt_[1] + (1 - r1) * gt_[3]).long()
if featmap_sizes[lvl] is not None:
x1 = x1.clamp(min=0, max=featmap_sizes[lvl][1] - 1)
y1 = y1.clamp(min=0, max=featmap_sizes[lvl][0] - 1)
x2 = x2.clamp(min=0, max=featmap_sizes[lvl][1] - 1)
y2 = y2.clamp(min=0, max=featmap_sizes[lvl][0] - 1)
(ctr_x1, ctr_y1, ctr_x2, ctr_y2) = (x1, y1, x2, y2)
</DeepExtract>
all_loc_targets[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, ctr_x1:ctr_x2 + 1] = 1
all_loc_weights[lvl][img_id, 0, ignore_y1:ignore_y2 + 1, ignore_x1:ignore_x2 + 1] = 0
all_loc_weights[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, ctr_x1:ctr_x2 + 1] = 1
if lvl > 0:
d_lvl = lvl - 1
gt_ = gt_bboxes[gt_id, :4] / anchor_strides[d_lvl]
<DeepExtract>
x1 = torch.round((1 - r2) * gt_[0] + r2 * gt_[2]).long()
y1 = torch.round((1 - r2) * gt_[1] + r2 * gt_[3]).long()
x2 = torch.round(r2 * gt_[0] + (1 - r2) * gt_[2]).long()
y2 = torch.round(r2 * gt_[1] + (1 - r2) * gt_[3]).long()
if featmap_sizes[d_lvl] is not None:
x1 = x1.clamp(min=0, max=featmap_sizes[d_lvl][1] - 1)
y1 = y1.clamp(min=0, max=featmap_sizes[d_lvl][0] - 1)
x2 = x2.clamp(min=0, max=featmap_sizes[d_lvl][1] - 1)
y2 = y2.clamp(min=0, max=featmap_sizes[d_lvl][0] - 1)
(ignore_x1, ignore_y1, ignore_x2, ignore_y2) = (x1, y1, x2, y2)
</DeepExtract>
all_ignore_map[d_lvl][img_id, 0, ignore_y1:ignore_y2 + 1, ignore_x1:ignore_x2 + 1] = 1
if lvl < num_lvls - 1:
u_lvl = lvl + 1
gt_ = gt_bboxes[gt_id, :4] / anchor_strides[u_lvl]
<DeepExtract>
x1 = torch.round((1 - r2) * gt_[0] + r2 * gt_[2]).long()
y1 = torch.round((1 - r2) * gt_[1] + r2 * gt_[3]).long()
x2 = torch.round(r2 * gt_[0] + (1 - r2) * gt_[2]).long()
y2 = torch.round(r2 * gt_[1] + (1 - r2) * gt_[3]).long()
if featmap_sizes[u_lvl] is not None:
x1 = x1.clamp(min=0, max=featmap_sizes[u_lvl][1] - 1)
y1 = y1.clamp(min=0, max=featmap_sizes[u_lvl][0] - 1)
x2 = x2.clamp(min=0, max=featmap_sizes[u_lvl][1] - 1)
y2 = y2.clamp(min=0, max=featmap_sizes[u_lvl][0] - 1)
(ignore_x1, ignore_y1, ignore_x2, ignore_y2) = (x1, y1, x2, y2)
</DeepExtract>
all_ignore_map[u_lvl][img_id, 0, ignore_y1:ignore_y2 + 1, ignore_x1:ignore_x2 + 1] = 1
for lvl_id in range(num_lvls):
all_loc_weights[lvl_id][(all_loc_weights[lvl_id] < 0) & (all_ignore_map[lvl_id] > 0)] = 0
all_loc_weights[lvl_id][all_loc_weights[lvl_id] < 0] = 0.1
loc_avg_factor = sum([t.size(0) * t.size(-1) * t.size(-2) for t in all_loc_targets]) / 200
return (all_loc_targets, all_loc_weights, loc_avg_factor)
|
def ga_loc_target(gt_bboxes_list, featmap_sizes, anchor_scale, anchor_strides, center_ratio=0.2, ignore_ratio=0.5):
"""Compute location targets for guided anchoring.
Each feature map is divided into positive, negative and ignore regions.
- positive regions: target 1, weight 1
- ignore regions: target 0, weight 0
- negative regions: target 0, weight 0.1
Args:
gt_bboxes_list (list[Tensor]): Gt bboxes of each image.
featmap_sizes (list[tuple]): Multi level sizes of each feature maps.
anchor_scale (int): Anchor scale.
anchor_strides ([list[int]]): Multi level anchor strides.
center_ratio (float): Ratio of center region.
ignore_ratio (float): Ratio of ignore region.
Returns:
tuple
"""
img_per_gpu = len(gt_bboxes_list)
num_lvls = len(featmap_sizes)
r1 = (1 - center_ratio) / 2
r2 = (1 - ignore_ratio) / 2
all_loc_targets = []
all_loc_weights = []
all_ignore_map = []
for lvl_id in range(num_lvls):
(h, w) = featmap_sizes[lvl_id]
loc_targets = torch.zeros(img_per_gpu, 1, h, w, device=gt_bboxes_list[0].device, dtype=torch.float32)
loc_weights = torch.full_like(loc_targets, -1)
ignore_map = torch.zeros_like(loc_targets)
all_loc_targets.append(loc_targets)
all_loc_weights.append(loc_weights)
all_ignore_map.append(ignore_map)
for img_id in range(img_per_gpu):
gt_bboxes = gt_bboxes_list[img_id]
scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * (gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1))
min_anchor_size = scale.new_full((1,), float(anchor_scale * anchor_strides[0]))
target_lvls = torch.floor(torch.log2(scale) - torch.log2(min_anchor_size) + 0.5)
target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long()
for gt_id in range(gt_bboxes.size(0)):
lvl = target_lvls[gt_id].item()
gt_ = gt_bboxes[gt_id, :4] / anchor_strides[lvl]
x1 = torch.round((1 - r2) * gt_[0] + r2 * gt_[2]).long()
y1 = torch.round((1 - r2) * gt_[1] + r2 * gt_[3]).long()
x2 = torch.round(r2 * gt_[0] + (1 - r2) * gt_[2]).long()
y2 = torch.round(r2 * gt_[1] + (1 - r2) * gt_[3]).long()
if featmap_sizes[lvl] is not None:
x1 = x1.clamp(min=0, max=featmap_sizes[lvl][1] - 1)
y1 = y1.clamp(min=0, max=featmap_sizes[lvl][0] - 1)
x2 = x2.clamp(min=0, max=featmap_sizes[lvl][1] - 1)
y2 = y2.clamp(min=0, max=featmap_sizes[lvl][0] - 1)
(ignore_x1, ignore_y1, ignore_x2, ignore_y2) = (x1, y1, x2, y2)
x1 = torch.round((1 - r1) * gt_[0] + r1 * gt_[2]).long()
y1 = torch.round((1 - r1) * gt_[1] + r1 * gt_[3]).long()
x2 = torch.round(r1 * gt_[0] + (1 - r1) * gt_[2]).long()
y2 = torch.round(r1 * gt_[1] + (1 - r1) * gt_[3]).long()
if featmap_sizes[lvl] is not None:
x1 = x1.clamp(min=0, max=featmap_sizes[lvl][1] - 1)
y1 = y1.clamp(min=0, max=featmap_sizes[lvl][0] - 1)
x2 = x2.clamp(min=0, max=featmap_sizes[lvl][1] - 1)
y2 = y2.clamp(min=0, max=featmap_sizes[lvl][0] - 1)
(ctr_x1, ctr_y1, ctr_x2, ctr_y2) = (x1, y1, x2, y2)
all_loc_targets[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, ctr_x1:ctr_x2 + 1] = 1
all_loc_weights[lvl][img_id, 0, ignore_y1:ignore_y2 + 1, ignore_x1:ignore_x2 + 1] = 0
all_loc_weights[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, ctr_x1:ctr_x2 + 1] = 1
if lvl > 0:
d_lvl = lvl - 1
gt_ = gt_bboxes[gt_id, :4] / anchor_strides[d_lvl]
x1 = torch.round((1 - r2) * gt_[0] + r2 * gt_[2]).long()
y1 = torch.round((1 - r2) * gt_[1] + r2 * gt_[3]).long()
x2 = torch.round(r2 * gt_[0] + (1 - r2) * gt_[2]).long()
y2 = torch.round(r2 * gt_[1] + (1 - r2) * gt_[3]).long()
if featmap_sizes[d_lvl] is not None:
x1 = x1.clamp(min=0, max=featmap_sizes[d_lvl][1] - 1)
y1 = y1.clamp(min=0, max=featmap_sizes[d_lvl][0] - 1)
x2 = x2.clamp(min=0, max=featmap_sizes[d_lvl][1] - 1)
y2 = y2.clamp(min=0, max=featmap_sizes[d_lvl][0] - 1)
(ignore_x1, ignore_y1, ignore_x2, ignore_y2) = (x1, y1, x2, y2)
all_ignore_map[d_lvl][img_id, 0, ignore_y1:ignore_y2 + 1, ignore_x1:ignore_x2 + 1] = 1
if lvl < num_lvls - 1:
u_lvl = lvl + 1
gt_ = gt_bboxes[gt_id, :4] / anchor_strides[u_lvl]
x1 = torch.round((1 - r2) * gt_[0] + r2 * gt_[2]).long()
y1 = torch.round((1 - r2) * gt_[1] + r2 * gt_[3]).long()
x2 = torch.round(r2 * gt_[0] + (1 - r2) * gt_[2]).long()
y2 = torch.round(r2 * gt_[1] + (1 - r2) * gt_[3]).long()
if featmap_sizes[u_lvl] is not None:
x1 = x1.clamp(min=0, max=featmap_sizes[u_lvl][1] - 1)
y1 = y1.clamp(min=0, max=featmap_sizes[u_lvl][0] - 1)
x2 = x2.clamp(min=0, max=featmap_sizes[u_lvl][1] - 1)
y2 = y2.clamp(min=0, max=featmap_sizes[u_lvl][0] - 1)
(ignore_x1, ignore_y1, ignore_x2, ignore_y2) = (x1, y1, x2, y2)
all_ignore_map[u_lvl][img_id, 0, ignore_y1:ignore_y2 + 1, ignore_x1:ignore_x2 + 1] = 1
for lvl_id in range(num_lvls):
all_loc_weights[lvl_id][(all_loc_weights[lvl_id] < 0) & (all_ignore_map[lvl_id] > 0)] = 0
all_loc_weights[lvl_id][all_loc_weights[lvl_id] < 0] = 0.1
loc_avg_factor = sum([t.size(0) * t.size(-1) * t.size(-2) for t in all_loc_targets]) / 200
return (all_loc_targets, all_loc_weights, loc_avg_factor)
|
C-HOI
|
positive
|
@property
def fileheader(self):
""" Return the Fileheader
Returns
-------
fileheader : Fileheader
"""
<DeepExtract>
if exact is None:
exact = exact
if not self.caching:
if not object_name:
attics = self.pool.get('FILE-HEADER', exact, self.logical_file.error_handler)
else:
attics = self.pool.get('FILE-HEADER', object_name, exact, self.logical_file.error_handler)
values = self.promote(attics)
if isinstance(exact, utils.exact_matcher) and object_name is not None:
values = self['FILE-HEADER'][object_name]
types = [x for x in self.types() if exact.match('FILE-HEADER', x)]
matches = []
for obj_type in types:
for (name, objs) in self[obj_type].items():
if object_name is None or exact.match(object_name, name):
matches.extend(objs)
values = matches
</DeepExtract>
if len(values) != 1:
msg = 'Expected exactly one fileheader. Was: {}'
log.warning(msg.format(values))
if len(values) == 0:
return None
else:
return values[0]
|
@property
def fileheader(self):
""" Return the Fileheader
Returns
-------
fileheader : Fileheader
"""
if exact is None:
exact = exact
if not self.caching:
if not object_name:
attics = self.pool.get('FILE-HEADER', exact, self.logical_file.error_handler)
else:
attics = self.pool.get('FILE-HEADER', object_name, exact, self.logical_file.error_handler)
values = self.promote(attics)
if isinstance(exact, utils.exact_matcher) and object_name is not None:
values = self['FILE-HEADER'][object_name]
types = [x for x in self.types() if exact.match('FILE-HEADER', x)]
matches = []
for obj_type in types:
for (name, objs) in self[obj_type].items():
if object_name is None or exact.match(object_name, name):
matches.extend(objs)
values = matches
if len(values) != 1:
msg = 'Expected exactly one fileheader. Was: {}'
log.warning(msg.format(values))
if len(values) == 0:
return None
else:
return values[0]
|
dlisio
|
positive
|
def store_file(file):
<DeepExtract>
if not os.path.exists(MAPPING_FILE):
fileinfo = None
asset_group_id_str = binascii.b2a_hex(asset_group_id).decode()
with open(MAPPING_FILE, 'r') as f:
mapping = json.load(f)
if mapping is None:
fileinfo = None
if asset_group_id_str in mapping and os.path.basename(file) in mapping[asset_group_id_str]:
result = dict()
if 'transaction_id' in mapping[asset_group_id_str][os.path.basename(file)]:
result['transaction_id'] = binascii.a2b_hex(mapping[asset_group_id_str][os.path.basename(file)]['transaction_id'])
if 'asset_id' in mapping[asset_group_id_str][os.path.basename(file)]:
if isinstance(mapping[asset_group_id_str][os.path.basename(file)]['asset_id'], list):
entry = []
for ast in mapping[asset_group_id_str][os.path.basename(file)]['asset_id']:
entry.append(binascii.a2b_hex(ast))
result['asset_id'] = entry
else:
result['asset_id'] = binascii.a2b_hex(mapping[asset_group_id_str][os.path.basename(file)]['asset_id'])
fileinfo = result
fileinfo = None
</DeepExtract>
if fileinfo is not None:
print('the file already stored : %s' % os.path.basename(file))
sys.exit(0)
<DeepExtract>
with open(file, 'rb') as fin:
data = fin.read()
bbc_app_client = setup_bbc_client()
store_transaction = bbclib.make_transaction(relation_num=1, witness=True)
user_info = 'Owner is %s' % user_name
bbclib.add_relation_asset(store_transaction, relation_idx=0, asset_group_id=asset_group_id, user_id=user_id, asset_body=user_info, asset_file=data)
store_transaction.witness.add_witness(user_id)
if None:
bbc_app_client.search_transaction(None)
response_data = bbc_app_client.callback.synchronize()
if response_data[KeyType.status] < ESUCCESS:
print('ERROR: ', response_data[KeyType.reason].decode())
sys.exit(0)
(prev_tx, fmt_type) = bbclib.deserialize(response_data[KeyType.transaction_data])
bbclib.add_relation_pointer(transaction=store_transaction, relation_idx=0, ref_transaction_id=prev_tx.transaction_id)
sig = store_transaction.sign(private_key=key_pair.private_key, public_key=key_pair.public_key)
store_transaction.get_sig_index(user_id)
store_transaction.add_signature_object(user_id=user_id, signature=sig)
store_transaction.digest()
print(store_transaction)
ret = bbc_app_client.insert_transaction(store_transaction)
assert ret
response_data = bbc_app_client.callback.synchronize()
if response_data[KeyType.status] < ESUCCESS:
print('ERROR: ', response_data[KeyType.reason].decode())
sys.exit(0)
store_id_mappings(os.path.basename(file), asset_group_id, transaction_id=response_data[KeyType.transaction_id], asset_ids=store_transaction.relations[0].asset.asset_id)
</DeepExtract>
print('file stored : %s' % os.path.basename(file))
print('done store %s' % file)
|
def store_file(file):
if not os.path.exists(MAPPING_FILE):
fileinfo = None
asset_group_id_str = binascii.b2a_hex(asset_group_id).decode()
with open(MAPPING_FILE, 'r') as f:
mapping = json.load(f)
if mapping is None:
fileinfo = None
if asset_group_id_str in mapping and os.path.basename(file) in mapping[asset_group_id_str]:
result = dict()
if 'transaction_id' in mapping[asset_group_id_str][os.path.basename(file)]:
result['transaction_id'] = binascii.a2b_hex(mapping[asset_group_id_str][os.path.basename(file)]['transaction_id'])
if 'asset_id' in mapping[asset_group_id_str][os.path.basename(file)]:
if isinstance(mapping[asset_group_id_str][os.path.basename(file)]['asset_id'], list):
entry = []
for ast in mapping[asset_group_id_str][os.path.basename(file)]['asset_id']:
entry.append(binascii.a2b_hex(ast))
result['asset_id'] = entry
else:
result['asset_id'] = binascii.a2b_hex(mapping[asset_group_id_str][os.path.basename(file)]['asset_id'])
fileinfo = result
fileinfo = None
if fileinfo is not None:
print('the file already stored : %s' % os.path.basename(file))
sys.exit(0)
with open(file, 'rb') as fin:
data = fin.read()
bbc_app_client = setup_bbc_client()
store_transaction = bbclib.make_transaction(relation_num=1, witness=True)
user_info = 'Owner is %s' % user_name
bbclib.add_relation_asset(store_transaction, relation_idx=0, asset_group_id=asset_group_id, user_id=user_id, asset_body=user_info, asset_file=data)
store_transaction.witness.add_witness(user_id)
if None:
bbc_app_client.search_transaction(None)
response_data = bbc_app_client.callback.synchronize()
if response_data[KeyType.status] < ESUCCESS:
print('ERROR: ', response_data[KeyType.reason].decode())
sys.exit(0)
(prev_tx, fmt_type) = bbclib.deserialize(response_data[KeyType.transaction_data])
bbclib.add_relation_pointer(transaction=store_transaction, relation_idx=0, ref_transaction_id=prev_tx.transaction_id)
sig = store_transaction.sign(private_key=key_pair.private_key, public_key=key_pair.public_key)
store_transaction.get_sig_index(user_id)
store_transaction.add_signature_object(user_id=user_id, signature=sig)
store_transaction.digest()
print(store_transaction)
ret = bbc_app_client.insert_transaction(store_transaction)
assert ret
response_data = bbc_app_client.callback.synchronize()
if response_data[KeyType.status] < ESUCCESS:
print('ERROR: ', response_data[KeyType.reason].decode())
sys.exit(0)
store_id_mappings(os.path.basename(file), asset_group_id, transaction_id=response_data[KeyType.transaction_id], asset_ids=store_transaction.relations[0].asset.asset_id)
print('file stored : %s' % os.path.basename(file))
print('done store %s' % file)
|
bbc1
|
positive
|
def render_pep440_pre(pieces):
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces['closest-tag']:
if pieces['distance']:
<DeepExtract>
vc = str.split(pieces['closest-tag'], '.post')
(tag_version, post_version) = (vc[0], int(vc[1] or 0) if len(vc) == 2 else None)
</DeepExtract>
rendered = tag_version
if post_version is not None:
rendered += '.post%d.dev%d' % (post_version + 1, pieces['distance'])
else:
rendered += '.post0.dev%d' % pieces['distance']
else:
rendered = pieces['closest-tag']
else:
rendered = '0.post0.dev%d' % pieces['distance']
return rendered
|
def render_pep440_pre(pieces):
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces['closest-tag']:
if pieces['distance']:
vc = str.split(pieces['closest-tag'], '.post')
(tag_version, post_version) = (vc[0], int(vc[1] or 0) if len(vc) == 2 else None)
rendered = tag_version
if post_version is not None:
rendered += '.post%d.dev%d' % (post_version + 1, pieces['distance'])
else:
rendered += '.post0.dev%d' % pieces['distance']
else:
rendered = pieces['closest-tag']
else:
rendered = '0.post0.dev%d' % pieces['distance']
return rendered
|
dask-sql
|
positive
|
def create_dataloader(stacked_image, train_stats, batch_size):
<DeepExtract>
stacked_image = np.reshape(stacked_image, [stacked_image.shape[0] * stacked_image.shape[1], stacked_image.shape[2]])
stacked_image = stacked_image.astype(np.float64)
stacked_image -= train_stats['mean']
stacked_image = stacked_image / train_stats['std']
normalized_data = stacked_image
</DeepExtract>
dataset = Dataset_Test(normalized_data)
loader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=4, drop_last=False)
return loader
|
def create_dataloader(stacked_image, train_stats, batch_size):
stacked_image = np.reshape(stacked_image, [stacked_image.shape[0] * stacked_image.shape[1], stacked_image.shape[2]])
stacked_image = stacked_image.astype(np.float64)
stacked_image -= train_stats['mean']
stacked_image = stacked_image / train_stats['std']
normalized_data = stacked_image
dataset = Dataset_Test(normalized_data)
loader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=4, drop_last=False)
return loader
|
Danesfield
|
positive
|
def __init__(self, num_branches, blocks, num_blocks, in_channels, num_channels, multiscale_output=True, with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True)):
super(HRModule, self).__init__()
<DeepExtract>
if num_branches != len(num_blocks):
error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_BLOCKS({len(num_blocks)})'
raise ValueError(error_msg)
if num_branches != len(num_channels):
error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_CHANNELS({len(num_channels)})'
raise ValueError(error_msg)
if num_branches != len(in_channels):
error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_INCHANNELS({len(in_channels)})'
raise ValueError(error_msg)
</DeepExtract>
self.in_channels = in_channels
self.num_branches = num_branches
self.multiscale_output = multiscale_output
self.norm_cfg = norm_cfg
self.conv_cfg = conv_cfg
self.with_cp = with_cp
<DeepExtract>
branches = []
for i in range(num_branches):
branches.append(self._make_one_branch(i, blocks, num_blocks, num_channels))
self.branches = nn.ModuleList(branches)
</DeepExtract>
<DeepExtract>
if self.num_branches == 1:
self.fuse_layers = None
num_branches = self.num_branches
in_channels = self.in_channels
fuse_layers = []
num_out_branches = num_branches if self.multiscale_output else 1
for i in range(num_out_branches):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels[j], in_channels[i], kernel_size=1, stride=1, padding=0, bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1], Upsample(scale_factor=2 ** (j - i), mode='bilinear', align_corners=False)))
elif j == i:
fuse_layer.append(None)
else:
conv_downsamples = []
for k in range(i - j):
if k == i - j - 1:
conv_downsamples.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels[j], in_channels[i], kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1]))
else:
conv_downsamples.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels[j], in_channels[j], kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, in_channels[j])[1], nn.ReLU(inplace=False)))
fuse_layer.append(nn.Sequential(*conv_downsamples))
fuse_layers.append(nn.ModuleList(fuse_layer))
self.fuse_layers = nn.ModuleList(fuse_layers)
</DeepExtract>
self.relu = nn.ReLU(inplace=False)
|
def __init__(self, num_branches, blocks, num_blocks, in_channels, num_channels, multiscale_output=True, with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True)):
super(HRModule, self).__init__()
if num_branches != len(num_blocks):
error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_BLOCKS({len(num_blocks)})'
raise ValueError(error_msg)
if num_branches != len(num_channels):
error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_CHANNELS({len(num_channels)})'
raise ValueError(error_msg)
if num_branches != len(in_channels):
error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_INCHANNELS({len(in_channels)})'
raise ValueError(error_msg)
self.in_channels = in_channels
self.num_branches = num_branches
self.multiscale_output = multiscale_output
self.norm_cfg = norm_cfg
self.conv_cfg = conv_cfg
self.with_cp = with_cp
branches = []
for i in range(num_branches):
branches.append(self._make_one_branch(i, blocks, num_blocks, num_channels))
self.branches = nn.ModuleList(branches)
if self.num_branches == 1:
self.fuse_layers = None
num_branches = self.num_branches
in_channels = self.in_channels
fuse_layers = []
num_out_branches = num_branches if self.multiscale_output else 1
for i in range(num_out_branches):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels[j], in_channels[i], kernel_size=1, stride=1, padding=0, bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1], Upsample(scale_factor=2 ** (j - i), mode='bilinear', align_corners=False)))
elif j == i:
fuse_layer.append(None)
else:
conv_downsamples = []
for k in range(i - j):
if k == i - j - 1:
conv_downsamples.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels[j], in_channels[i], kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1]))
else:
conv_downsamples.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels[j], in_channels[j], kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, in_channels[j])[1], nn.ReLU(inplace=False)))
fuse_layer.append(nn.Sequential(*conv_downsamples))
fuse_layers.append(nn.ModuleList(fuse_layer))
self.fuse_layers = nn.ModuleList(fuse_layers)
self.relu = nn.ReLU(inplace=False)
|
Auto-Seg-Loss
|
positive
|
def __init__(self, dataset, *args, **kwargs):
super().__init__()
assert isinstance(dataset, str)
if len(args):
assert len(args) == 1
assert isinstance(args[0], dict)
assert not kwargs
kwargs = args[0]
<DeepExtract>
if 'year' in kwargs:
if 'month' not in kwargs:
kwargs['month'] = [f'{i + 1:02}' for i in range(0, 12)]
if 'day' not in kwargs:
kwargs['day'] = [f'{i + 1:02}' for i in range(0, 31)]
split_on = kwargs.pop('split_on', None)
if split_on is None or not isinstance(kwargs.get(split_on), (list, tuple)):
requests = [kwargs]
result = []
for v in kwargs[split_on]:
r = dict(**kwargs)
r[split_on] = v
result.append(r)
requests = result
</DeepExtract>
<DeepExtract>
prompt = CDSAPIKeyPrompt()
prompt.check()
try:
return cdsapi.Client()
except Exception as e:
if '.cdsapirc' in str(e):
prompt.ask_user_and_save()
return cdsapi.Client()
raise
</DeepExtract>
nthreads = min(self.settings('number-of-download-threads'), len(requests))
if nthreads < 2:
self.path = [self._retrieve(dataset, r) for r in requests]
else:
with SoftThreadPool(nthreads=nthreads) as pool:
futures = [pool.submit(self._retrieve, dataset, r) for r in requests]
iterator = (f.result() for f in futures)
self.path = list(tqdm(iterator, leave=True, total=len(requests)))
|
def __init__(self, dataset, *args, **kwargs):
super().__init__()
assert isinstance(dataset, str)
if len(args):
assert len(args) == 1
assert isinstance(args[0], dict)
assert not kwargs
kwargs = args[0]
if 'year' in kwargs:
if 'month' not in kwargs:
kwargs['month'] = [f'{i + 1:02}' for i in range(0, 12)]
if 'day' not in kwargs:
kwargs['day'] = [f'{i + 1:02}' for i in range(0, 31)]
split_on = kwargs.pop('split_on', None)
if split_on is None or not isinstance(kwargs.get(split_on), (list, tuple)):
requests = [kwargs]
result = []
for v in kwargs[split_on]:
r = dict(**kwargs)
r[split_on] = v
result.append(r)
requests = result
prompt = CDSAPIKeyPrompt()
prompt.check()
try:
return cdsapi.Client()
except Exception as e:
if '.cdsapirc' in str(e):
prompt.ask_user_and_save()
return cdsapi.Client()
raise
nthreads = min(self.settings('number-of-download-threads'), len(requests))
if nthreads < 2:
self.path = [self._retrieve(dataset, r) for r in requests]
else:
with SoftThreadPool(nthreads=nthreads) as pool:
futures = [pool.submit(self._retrieve, dataset, r) for r in requests]
iterator = (f.result() for f in futures)
self.path = list(tqdm(iterator, leave=True, total=len(requests)))
|
climetlab
|
positive
|
def _get_nonce(self, url, new_nonce_url):
if not self._nonces:
logger.debug('Requesting fresh nonce')
if new_nonce_url is None:
<DeepExtract>
response = self._send_request('HEAD', *args, **kwargs)
</DeepExtract>
else:
<DeepExtract>
response_ct = response.headers.get('Content-Type')
try:
jobj = response.json()
except ValueError:
jobj = None
if response.status_code == 409:
raise errors.ConflictError(response.headers.get('Location'))
if not response.ok:
if jobj is not None:
if response_ct != self.head(new_nonce_url).JSON_ERROR_CONTENT_TYPE:
logger.debug('Ignoring wrong Content-Type (%r) for JSON Error', response_ct)
try:
raise messages.Error.from_json(jobj)
except jose.DeserializationError as error:
raise errors.ClientError((response, error))
else:
raise errors.ClientError(response)
else:
if jobj is not None and response_ct != self.head(new_nonce_url).JSON_CONTENT_TYPE:
logger.debug('Ignoring wrong Content-Type (%r) for JSON decodable response', response_ct)
if None == self.head(new_nonce_url).JSON_CONTENT_TYPE and jobj is None:
raise errors.ClientError('Unexpected response Content-Type: {0}'.format(response_ct))
response = response
</DeepExtract>
<DeepExtract>
if self.REPLAY_NONCE_HEADER in response.headers:
nonce = response.headers[self.REPLAY_NONCE_HEADER]
try:
decoded_nonce = jws.Header._fields['nonce'].decode(nonce)
except jose.DeserializationError as error:
raise errors.BadNonce(nonce, error)
logger.debug('Storing nonce: %s', nonce)
self._nonces.add(decoded_nonce)
else:
raise errors.MissingNonce(response)
</DeepExtract>
return self._nonces.pop()
|
def _get_nonce(self, url, new_nonce_url):
if not self._nonces:
logger.debug('Requesting fresh nonce')
if new_nonce_url is None:
response = self._send_request('HEAD', *args, **kwargs)
else:
response_ct = response.headers.get('Content-Type')
try:
jobj = response.json()
except ValueError:
jobj = None
if response.status_code == 409:
raise errors.ConflictError(response.headers.get('Location'))
if not response.ok:
if jobj is not None:
if response_ct != self.head(new_nonce_url).JSON_ERROR_CONTENT_TYPE:
logger.debug('Ignoring wrong Content-Type (%r) for JSON Error', response_ct)
try:
raise messages.Error.from_json(jobj)
except jose.DeserializationError as error:
raise errors.ClientError((response, error))
else:
raise errors.ClientError(response)
else:
if jobj is not None and response_ct != self.head(new_nonce_url).JSON_CONTENT_TYPE:
logger.debug('Ignoring wrong Content-Type (%r) for JSON decodable response', response_ct)
if None == self.head(new_nonce_url).JSON_CONTENT_TYPE and jobj is None:
raise errors.ClientError('Unexpected response Content-Type: {0}'.format(response_ct))
response = response
if self.REPLAY_NONCE_HEADER in response.headers:
nonce = response.headers[self.REPLAY_NONCE_HEADER]
try:
decoded_nonce = jws.Header._fields['nonce'].decode(nonce)
except jose.DeserializationError as error:
raise errors.BadNonce(nonce, error)
logger.debug('Storing nonce: %s', nonce)
self._nonces.add(decoded_nonce)
else:
raise errors.MissingNonce(response)
return self._nonces.pop()
|
acme-debian
|
positive
|
def cb_ctrl(self, client_socket, client_address, data):
logger.log(logging.NOTSET, 'Control packet: %s' % binary_to_hex(data))
if len(data) < 5:
return 0
else:
(pkt_type, payload_len) = struct.unpack('>BI', data[0:5])
payload = data[5:]
if len(payload) < payload_len:
return 0
else:
<DeepExtract>
if pkt_type == CtrlPacketType.SIGNAL_START:
logger.debug('Starting for payload: %s' % binary_to_hex(payload))
self.parse_ies(payload)
self.sdr.start()
timeout = 3
current_time = 0.0
while len(self.stored_data) <= self.wait_num_chunks:
sleep(0.0001)
current_time += 0.0001
if current_time >= timeout:
logger.warning('Timeout while waiting for data. Did the SDR crash? Reinstantiating...')
del self.sdr
self.data_socket.socket.close()
self.data_socket = SocketWrapper(socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM), ('127.0.0.1', 3884), self.cb_data)
self.data_socket.start()
self.sdr = SDR(**self.sdr_args)
self.process_ctrl_packet(pkt_type, payload)
elif pkt_type == CtrlPacketType.SIGNAL_END:
self.sdr.stop()
self.sdr.wait()
logger.debug('Stopped after receiving %d chunks' % len(self.stored_data))
if len(self.stored_data) > 0:
np_data = np.fromstring(b''.join(self.stored_data), dtype=np.complex64)
self.trace_set.append(np_data)
self.plaintexts.append(self.stored_plaintext)
self.keys.append(self.stored_key)
if len(self.trace_set) >= self.args.traces_per_set:
assert len(self.trace_set) == len(self.plaintexts)
assert len(self.trace_set) == len(self.keys)
np_trace_set = np.array(self.trace_set)
np_plaintexts = np.array(self.plaintexts, dtype=np.uint8)
np_keys = np.array(self.keys, dtype=np.uint8)
if self.online is not None:
self.emma_client.send(np_trace_set, np_plaintexts, None, np_keys, None)
else:
if not self.args.dry:
self.save(np_trace_set, np_plaintexts, np_keys)
else:
print('Dry run! Not saving.')
self.limit_counter += len(self.trace_set)
if self.limit_counter >= self.limit:
print('Done')
exit(0)
self.trace_set = []
self.plaintexts = []
self.keys = []
self.stored_data = []
self.stored_plaintext = []
</DeepExtract>
if self.ctrl_socket_type == CtrlType.SERIAL:
client_socket.write(b'k')
else:
client_socket.sendall('k')
return payload_len + 5
|
def cb_ctrl(self, client_socket, client_address, data):
logger.log(logging.NOTSET, 'Control packet: %s' % binary_to_hex(data))
if len(data) < 5:
return 0
else:
(pkt_type, payload_len) = struct.unpack('>BI', data[0:5])
payload = data[5:]
if len(payload) < payload_len:
return 0
else:
if pkt_type == CtrlPacketType.SIGNAL_START:
logger.debug('Starting for payload: %s' % binary_to_hex(payload))
self.parse_ies(payload)
self.sdr.start()
timeout = 3
current_time = 0.0
while len(self.stored_data) <= self.wait_num_chunks:
sleep(0.0001)
current_time += 0.0001
if current_time >= timeout:
logger.warning('Timeout while waiting for data. Did the SDR crash? Reinstantiating...')
del self.sdr
self.data_socket.socket.close()
self.data_socket = SocketWrapper(socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM), ('127.0.0.1', 3884), self.cb_data)
self.data_socket.start()
self.sdr = SDR(**self.sdr_args)
self.process_ctrl_packet(pkt_type, payload)
elif pkt_type == CtrlPacketType.SIGNAL_END:
self.sdr.stop()
self.sdr.wait()
logger.debug('Stopped after receiving %d chunks' % len(self.stored_data))
if len(self.stored_data) > 0:
np_data = np.fromstring(b''.join(self.stored_data), dtype=np.complex64)
self.trace_set.append(np_data)
self.plaintexts.append(self.stored_plaintext)
self.keys.append(self.stored_key)
if len(self.trace_set) >= self.args.traces_per_set:
assert len(self.trace_set) == len(self.plaintexts)
assert len(self.trace_set) == len(self.keys)
np_trace_set = np.array(self.trace_set)
np_plaintexts = np.array(self.plaintexts, dtype=np.uint8)
np_keys = np.array(self.keys, dtype=np.uint8)
if self.online is not None:
self.emma_client.send(np_trace_set, np_plaintexts, None, np_keys, None)
else:
if not self.args.dry:
self.save(np_trace_set, np_plaintexts, np_keys)
else:
print('Dry run! Not saving.')
self.limit_counter += len(self.trace_set)
if self.limit_counter >= self.limit:
print('Done')
exit(0)
self.trace_set = []
self.plaintexts = []
self.keys = []
self.stored_data = []
self.stored_plaintext = []
if self.ctrl_socket_type == CtrlType.SERIAL:
client_socket.write(b'k')
else:
client_socket.sendall('k')
return payload_len + 5
|
emma
|
positive
|
def get_random_datetime(start_date=EWSDate(1996, 1, 1), end_date=EWSDate(2030, 1, 1), tz=UTC):
while True:
try:
<DeepExtract>
random_date = EWSDate.fromordinal(random.randint(start_date.toordinal(), end_date.toordinal()))
</DeepExtract>
random_datetime = datetime.datetime.combine(random_date, datetime.time.min) + datetime.timedelta(minutes=random.randint(0, 60 * 24))
return tz.localize(EWSDateTime.from_datetime(random_datetime), is_dst=None)
except (AmbiguousTimeError, NonExistentTimeError):
pass
|
def get_random_datetime(start_date=EWSDate(1996, 1, 1), end_date=EWSDate(2030, 1, 1), tz=UTC):
while True:
try:
random_date = EWSDate.fromordinal(random.randint(start_date.toordinal(), end_date.toordinal()))
random_datetime = datetime.datetime.combine(random_date, datetime.time.min) + datetime.timedelta(minutes=random.randint(0, 60 * 24))
return tz.localize(EWSDateTime.from_datetime(random_datetime), is_dst=None)
except (AmbiguousTimeError, NonExistentTimeError):
pass
|
exchangelib
|
positive
|
def Product(item):
if isinstance(item, Expression):
item = item.run()
if isinstance(item, int):
item = abs(item)
result = 1
while item:
result *= item % 10
item //= 10
return result
if isinstance(item, float):
item = format(abs(item), '.15e').split('e')[0].strip('0')
if not hasattr(item, '__iter__'):
item = str(item)
if isinstance(item, str):
if all((c in '0123456789.-' for c in item)) and item.count('.') < 2 and (not '-' in item[1:]):
return product([int(c) for c in item if c >= '0'])
return product(map(literal_eval, re.findall('-?\\d*\\.?\\d+|-?\\d+', item)))
if item and isinstance(item[0], Expression):
<DeepExtract>
clone = item[:]
clone[:] = [lambda o: o.run()(item) for item in clone]
item = clone
</DeepExtract>
return product(item)
|
def Product(item):
if isinstance(item, Expression):
item = item.run()
if isinstance(item, int):
item = abs(item)
result = 1
while item:
result *= item % 10
item //= 10
return result
if isinstance(item, float):
item = format(abs(item), '.15e').split('e')[0].strip('0')
if not hasattr(item, '__iter__'):
item = str(item)
if isinstance(item, str):
if all((c in '0123456789.-' for c in item)) and item.count('.') < 2 and (not '-' in item[1:]):
return product([int(c) for c in item if c >= '0'])
return product(map(literal_eval, re.findall('-?\\d*\\.?\\d+|-?\\d+', item)))
if item and isinstance(item[0], Expression):
clone = item[:]
clone[:] = [lambda o: o.run()(item) for item in clone]
item = clone
return product(item)
|
Charcoal
|
positive
|
def log_app_apk_install_start_time_on_device(device_name):
<DeepExtract>
session_device_summary = session_log.device_summaries.get(device_name)
if session_device_summary is None:
session_device_summary = SessionDeviceSummary()
session_device_summary.device_name = device_name
session_log.device_summaries.update({device_name: session_device_summary})
</DeepExtract>
session_device_summary = session_log.device_summaries.get(device_name)
if session_device_summary is None:
session_device_summary = SessionDeviceSummary()
if session_device_summary.apk_install_start_time is None:
session_device_summary.apk_install_start_time = time.time()
session_log.device_summaries.update({device_name: session_device_summary})
|
def log_app_apk_install_start_time_on_device(device_name):
session_device_summary = session_log.device_summaries.get(device_name)
if session_device_summary is None:
session_device_summary = SessionDeviceSummary()
session_device_summary.device_name = device_name
session_log.device_summaries.update({device_name: session_device_summary})
session_device_summary = session_log.device_summaries.get(device_name)
if session_device_summary is None:
session_device_summary = SessionDeviceSummary()
if session_device_summary.apk_install_start_time is None:
session_device_summary.apk_install_start_time = time.time()
session_log.device_summaries.update({device_name: session_device_summary})
|
AutomationTestSupervisor
|
positive
|
def assign(self, anchors, groundtruth_boxes, groundtruth_labels=None, groundtruth_weights=None, **params):
"""Assign classification and regression targets to each anchor.
For a given set of anchors and groundtruth detections, match anchors
to groundtruth_boxes and assign classification and regression targets to
each anchor as well as weights based on the resulting match (specifying,
e.g., which anchors should not contribute to training loss).
Anchors that are not matched to anything are given a classification target
of self._unmatched_cls_target which can be specified via the constructor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth boxes
groundtruth_labels: a tensor of shape [M, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar inputs). When set
to None, groundtruth_labels assumes a binary problem where all
ground_truth boxes get a positive label (of 1).
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box. The weights
must be in [0., 1.]. If None, all weights are set to 1.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels
which has shape [num_gt_boxes, d_1, d_2, ... d_k].
cls_weights: a float32 tensor with shape [num_anchors]
reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension]
reg_weights: a float32 tensor with shape [num_anchors]
match: a matcher.Match object encoding the match between anchors and
groundtruth boxes, with rows corresponding to groundtruth boxes
and columns corresponding to anchors.
Raises:
ValueError: if anchors or groundtruth_boxes are not of type
box_list.BoxList
"""
if not isinstance(anchors, box_list.BoxList):
raise ValueError('anchors must be an BoxList')
if not isinstance(groundtruth_boxes, box_list.BoxList):
raise ValueError('groundtruth_boxes must be an BoxList')
if groundtruth_labels is None:
groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(), 0))
groundtruth_labels = tf.expand_dims(groundtruth_labels, -1)
unmatched_shape_assert = shape_utils.assert_shape_equal(shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[1:], shape_utils.combined_static_and_dynamic_shape(self._unmatched_cls_target))
labels_and_box_shapes_assert = shape_utils.assert_shape_equal(shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[:1], shape_utils.combined_static_and_dynamic_shape(groundtruth_boxes.get())[:1])
if groundtruth_weights is None:
num_gt_boxes = groundtruth_boxes.num_boxes_static()
if not num_gt_boxes:
num_gt_boxes = groundtruth_boxes.num_boxes()
groundtruth_weights = tf.ones([num_gt_boxes], dtype=tf.float32)
with tf.control_dependencies([unmatched_shape_assert, labels_and_box_shapes_assert]):
match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes, anchors)
match = self._matcher.match(match_quality_matrix, **params)
<DeepExtract>
matched_gt_boxes = match.gather_based_on_match(groundtruth_boxes.get(), unmatched_value=tf.zeros(4), ignored_value=tf.zeros(4))
matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)
if groundtruth_boxes.has_field(KEYPOINTS_FIELD_NAME):
groundtruth_keypoints = groundtruth_boxes.get_field(KEYPOINTS_FIELD_NAME)
matched_keypoints = match.gather_based_on_match(groundtruth_keypoints, unmatched_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]), ignored_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]))
matched_gt_boxlist.add_field(KEYPOINTS_FIELD_NAME, matched_keypoints)
matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors)
match_results_shape = shape_utils.combined_static_and_dynamic_shape(match.match_results)
unmatched_ignored_reg_targets = tf.tile(self._default_regression_target(), [match_results_shape[0], 1])
matched_anchors_mask = match.matched_column_indicator()
reg_targets = tf.where(matched_anchors_mask, matched_reg_targets, unmatched_ignored_reg_targets)
reg_targets = reg_targets
</DeepExtract>
<DeepExtract>
cls_targets = match.gather_based_on_match(groundtruth_labels, unmatched_value=self._unmatched_cls_target, ignored_value=self._unmatched_cls_target)
</DeepExtract>
<DeepExtract>
reg_weights = match.gather_based_on_match(groundtruth_weights, ignored_value=0.0, unmatched_value=0.0)
</DeepExtract>
<DeepExtract>
cls_weights = match.gather_based_on_match(groundtruth_weights, ignored_value=0.0, unmatched_value=self._negative_class_weight)
</DeepExtract>
num_anchors = anchors.num_boxes_static()
if num_anchors is not None:
<DeepExtract>
target_shape = reg_targets.get_shape().as_list()
target_shape[0] = num_anchors
reg_targets.set_shape(target_shape)
reg_targets = reg_targets
</DeepExtract>
<DeepExtract>
target_shape = cls_targets.get_shape().as_list()
target_shape[0] = num_anchors
cls_targets.set_shape(target_shape)
cls_targets = cls_targets
</DeepExtract>
<DeepExtract>
target_shape = reg_weights.get_shape().as_list()
target_shape[0] = num_anchors
reg_weights.set_shape(target_shape)
reg_weights = reg_weights
</DeepExtract>
<DeepExtract>
target_shape = cls_weights.get_shape().as_list()
target_shape[0] = num_anchors
cls_weights.set_shape(target_shape)
cls_weights = cls_weights
</DeepExtract>
return (cls_targets, cls_weights, reg_targets, reg_weights, match)
|
def assign(self, anchors, groundtruth_boxes, groundtruth_labels=None, groundtruth_weights=None, **params):
"""Assign classification and regression targets to each anchor.
For a given set of anchors and groundtruth detections, match anchors
to groundtruth_boxes and assign classification and regression targets to
each anchor as well as weights based on the resulting match (specifying,
e.g., which anchors should not contribute to training loss).
Anchors that are not matched to anything are given a classification target
of self._unmatched_cls_target which can be specified via the constructor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth boxes
groundtruth_labels: a tensor of shape [M, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar inputs). When set
to None, groundtruth_labels assumes a binary problem where all
ground_truth boxes get a positive label (of 1).
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box. The weights
must be in [0., 1.]. If None, all weights are set to 1.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels
which has shape [num_gt_boxes, d_1, d_2, ... d_k].
cls_weights: a float32 tensor with shape [num_anchors]
reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension]
reg_weights: a float32 tensor with shape [num_anchors]
match: a matcher.Match object encoding the match between anchors and
groundtruth boxes, with rows corresponding to groundtruth boxes
and columns corresponding to anchors.
Raises:
ValueError: if anchors or groundtruth_boxes are not of type
box_list.BoxList
"""
if not isinstance(anchors, box_list.BoxList):
raise ValueError('anchors must be an BoxList')
if not isinstance(groundtruth_boxes, box_list.BoxList):
raise ValueError('groundtruth_boxes must be an BoxList')
if groundtruth_labels is None:
groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(), 0))
groundtruth_labels = tf.expand_dims(groundtruth_labels, -1)
unmatched_shape_assert = shape_utils.assert_shape_equal(shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[1:], shape_utils.combined_static_and_dynamic_shape(self._unmatched_cls_target))
labels_and_box_shapes_assert = shape_utils.assert_shape_equal(shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[:1], shape_utils.combined_static_and_dynamic_shape(groundtruth_boxes.get())[:1])
if groundtruth_weights is None:
num_gt_boxes = groundtruth_boxes.num_boxes_static()
if not num_gt_boxes:
num_gt_boxes = groundtruth_boxes.num_boxes()
groundtruth_weights = tf.ones([num_gt_boxes], dtype=tf.float32)
with tf.control_dependencies([unmatched_shape_assert, labels_and_box_shapes_assert]):
match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes, anchors)
match = self._matcher.match(match_quality_matrix, **params)
matched_gt_boxes = match.gather_based_on_match(groundtruth_boxes.get(), unmatched_value=tf.zeros(4), ignored_value=tf.zeros(4))
matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)
if groundtruth_boxes.has_field(KEYPOINTS_FIELD_NAME):
groundtruth_keypoints = groundtruth_boxes.get_field(KEYPOINTS_FIELD_NAME)
matched_keypoints = match.gather_based_on_match(groundtruth_keypoints, unmatched_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]), ignored_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]))
matched_gt_boxlist.add_field(KEYPOINTS_FIELD_NAME, matched_keypoints)
matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors)
match_results_shape = shape_utils.combined_static_and_dynamic_shape(match.match_results)
unmatched_ignored_reg_targets = tf.tile(self._default_regression_target(), [match_results_shape[0], 1])
matched_anchors_mask = match.matched_column_indicator()
reg_targets = tf.where(matched_anchors_mask, matched_reg_targets, unmatched_ignored_reg_targets)
reg_targets = reg_targets
cls_targets = match.gather_based_on_match(groundtruth_labels, unmatched_value=self._unmatched_cls_target, ignored_value=self._unmatched_cls_target)
reg_weights = match.gather_based_on_match(groundtruth_weights, ignored_value=0.0, unmatched_value=0.0)
cls_weights = match.gather_based_on_match(groundtruth_weights, ignored_value=0.0, unmatched_value=self._negative_class_weight)
num_anchors = anchors.num_boxes_static()
if num_anchors is not None:
target_shape = reg_targets.get_shape().as_list()
target_shape[0] = num_anchors
reg_targets.set_shape(target_shape)
reg_targets = reg_targets
target_shape = cls_targets.get_shape().as_list()
target_shape[0] = num_anchors
cls_targets.set_shape(target_shape)
cls_targets = cls_targets
target_shape = reg_weights.get_shape().as_list()
target_shape[0] = num_anchors
reg_weights.set_shape(target_shape)
reg_weights = reg_weights
target_shape = cls_weights.get_shape().as_list()
target_shape[0] = num_anchors
cls_weights.set_shape(target_shape)
cls_weights = cls_weights
return (cls_targets, cls_weights, reg_targets, reg_weights, match)
|
class-balanced-loss
|
positive
|
def run_simple(hostname, port, application, use_reloader=False, use_debugger=False, use_evalex=True, extra_files=None, reloader_interval=1, passthrough_errors=False, processes=None, threaded=False, ssl_context=None):
if use_debugger:
from werkzeug.debug import DebuggedApplication
application = DebuggedApplication(application, use_evalex)
processes = 1 if processes is None else int(processes)
def serve_forever():
make_server(hostname, port, application, processes=processes, threaded=threaded, passthrough_errors=passthrough_errors, ssl_context=ssl_context).serve_forever()
def serve_error_app(tb_str, monitored_files):
from clastic import flaw
err_app = flaw.create_app(tb_str, monitored_files)
err_server = make_server(hostname, port, err_app)
thread.start_new_thread(err_server.serve_forever, ())
return err_server
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
display_hostname = hostname != '*' and hostname or 'localhost'
if ':' in display_hostname:
display_hostname = '[%s]' % display_hostname
print(' * Running on %s://%s:%d/' % (ssl_context is None and 'http' or 'https', display_hostname, port))
if use_reloader:
<DeepExtract>
fam = socket.AF_INET
if ':' in hostname:
fam = getattr(socket, 'AF_INET6', socket.AF_INET)
try:
test_socket = socket.socket(fam, socket.SOCK_STREAM)
test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
test_socket.bind((hostname, port))
test_socket.close()
return True
except socket.error:
if raise_exc:
raise
return False
</DeepExtract>
<DeepExtract>
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
enable_tty_echo()
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
thread.start_new_thread(serve_forever, ())
try:
reloader_loop(extra_files, reloader_interval)
except KeyboardInterrupt:
return
except SystemExit:
mon_list = list(chain(iter_monitor_files(), extra_files or ()))
sys.stderr.write('%s%r\n' % (_MON_PREFIX, mon_list))
raise
try:
sys.exit(restart_with_reloader(error_func=serve_error_app))
except KeyboardInterrupt:
pass
</DeepExtract>
else:
<DeepExtract>
make_server(hostname, port, application, processes=processes, threaded=threaded, passthrough_errors=passthrough_errors, ssl_context=ssl_context).serve_forever()
</DeepExtract>
|
def run_simple(hostname, port, application, use_reloader=False, use_debugger=False, use_evalex=True, extra_files=None, reloader_interval=1, passthrough_errors=False, processes=None, threaded=False, ssl_context=None):
if use_debugger:
from werkzeug.debug import DebuggedApplication
application = DebuggedApplication(application, use_evalex)
processes = 1 if processes is None else int(processes)
def serve_forever():
make_server(hostname, port, application, processes=processes, threaded=threaded, passthrough_errors=passthrough_errors, ssl_context=ssl_context).serve_forever()
def serve_error_app(tb_str, monitored_files):
from clastic import flaw
err_app = flaw.create_app(tb_str, monitored_files)
err_server = make_server(hostname, port, err_app)
thread.start_new_thread(err_server.serve_forever, ())
return err_server
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
display_hostname = hostname != '*' and hostname or 'localhost'
if ':' in display_hostname:
display_hostname = '[%s]' % display_hostname
print(' * Running on %s://%s:%d/' % (ssl_context is None and 'http' or 'https', display_hostname, port))
if use_reloader:
fam = socket.AF_INET
if ':' in hostname:
fam = getattr(socket, 'AF_INET6', socket.AF_INET)
try:
test_socket = socket.socket(fam, socket.SOCK_STREAM)
test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
test_socket.bind((hostname, port))
test_socket.close()
return True
except socket.error:
if raise_exc:
raise
return False
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
enable_tty_echo()
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
thread.start_new_thread(serve_forever, ())
try:
reloader_loop(extra_files, reloader_interval)
except KeyboardInterrupt:
return
except SystemExit:
mon_list = list(chain(iter_monitor_files(), extra_files or ()))
sys.stderr.write('%s%r\n' % (_MON_PREFIX, mon_list))
raise
try:
sys.exit(restart_with_reloader(error_func=serve_error_app))
except KeyboardInterrupt:
pass
else:
make_server(hostname, port, application, processes=processes, threaded=threaded, passthrough_errors=passthrough_errors, ssl_context=ssl_context).serve_forever()
</DeepExtract>
|
clastic
|
positive
|
@pytest.mark.parametrize('short', [True, False])
@mock.patch('cachito.web.status.get_worker_config')
@mock.patch('cachito.web.status.nexus_ok')
@mock.patch('cachito.web.status.athens_ok')
@mock.patch('cachito.web.status.database_ok')
@mock.patch('cachito.web.status.rabbitmq_ok')
@mock.patch('cachito.web.status.workers_status')
@mock.patch('cachito.web.status._can_process')
def test_status_service_not_ok(mock_can_process, mock_workers_status, mock_rabbitmq_ok, mock_database_ok, mock_athens_ok, mock_nexus_ok, mock_get_worker_config, short, test_app):
<DeepExtract>
config = mock.Mock()
config.cachito_nexus_url = 'http://nexus:8081'
config.cachito_nexus_hoster_url = 'http://nexus.example.org' if nexus_hoster else None
config.cachito_athens_url = 'http://athens:3000'
config.broker_url = 'amqp://test@rabbitmq:5672//'
config = config
</DeepExtract>
mock_get_worker_config.return_value = config
mock_nexus_ok.return_value = (True, None)
mock_athens_ok.return_value = (False, 'Athens is currently at war with Sparta')
mock_database_ok.return_value = (True, None)
mock_rabbitmq_ok.return_value = (True, None)
mock_workers_status.return_value = [{'name': 'celery@123456', 'ok': True}]
if short:
err_msg = 'athens unavailable: Athens is currently at war with Sparta'
with pytest.raises(WorkerConfigError, match=err_msg):
status.status(short=True)
return
result = status.status(short=False)
expect_services = [{'name': 'nexus', 'ok': True}, {'name': 'athens', 'ok': False, 'reason': 'Athens is currently at war with Sparta'}, {'name': 'database', 'ok': True}, {'name': 'rabbitmq', 'ok': True}]
assert result == {'can_process': mock_can_process.return_value, 'services': expect_services, 'workers': mock_workers_status.return_value}
mock_get_worker_config.assert_called_once()
mock_nexus_ok.assert_called_once_with(config.cachito_nexus_url)
mock_athens_ok.assert_called_once_with(config.cachito_athens_url)
assert mock_database_ok.call_count == (0 if short else 1)
assert mock_rabbitmq_ok.call_count == (0 if short else 1)
assert mock_workers_status.call_count == (0 if short else 1)
mock_can_process.assert_called_once_with(TEST_PACKAGE_MANAGERS, expect_services, True)
|
@pytest.mark.parametrize('short', [True, False])
@mock.patch('cachito.web.status.get_worker_config')
@mock.patch('cachito.web.status.nexus_ok')
@mock.patch('cachito.web.status.athens_ok')
@mock.patch('cachito.web.status.database_ok')
@mock.patch('cachito.web.status.rabbitmq_ok')
@mock.patch('cachito.web.status.workers_status')
@mock.patch('cachito.web.status._can_process')
def test_status_service_not_ok(mock_can_process, mock_workers_status, mock_rabbitmq_ok, mock_database_ok, mock_athens_ok, mock_nexus_ok, mock_get_worker_config, short, test_app):
config = mock.Mock()
config.cachito_nexus_url = 'http://nexus:8081'
config.cachito_nexus_hoster_url = 'http://nexus.example.org' if nexus_hoster else None
config.cachito_athens_url = 'http://athens:3000'
config.broker_url = 'amqp://test@rabbitmq:5672//'
config = config
mock_get_worker_config.return_value = config
mock_nexus_ok.return_value = (True, None)
mock_athens_ok.return_value = (False, 'Athens is currently at war with Sparta')
mock_database_ok.return_value = (True, None)
mock_rabbitmq_ok.return_value = (True, None)
mock_workers_status.return_value = [{'name': 'celery@123456', 'ok': True}]
if short:
err_msg = 'athens unavailable: Athens is currently at war with Sparta'
with pytest.raises(WorkerConfigError, match=err_msg):
status.status(short=True)
return
result = status.status(short=False)
expect_services = [{'name': 'nexus', 'ok': True}, {'name': 'athens', 'ok': False, 'reason': 'Athens is currently at war with Sparta'}, {'name': 'database', 'ok': True}, {'name': 'rabbitmq', 'ok': True}]
assert result == {'can_process': mock_can_process.return_value, 'services': expect_services, 'workers': mock_workers_status.return_value}
mock_get_worker_config.assert_called_once()
mock_nexus_ok.assert_called_once_with(config.cachito_nexus_url)
mock_athens_ok.assert_called_once_with(config.cachito_athens_url)
assert mock_database_ok.call_count == (0 if short else 1)
assert mock_rabbitmq_ok.call_count == (0 if short else 1)
assert mock_workers_status.call_count == (0 if short else 1)
mock_can_process.assert_called_once_with(TEST_PACKAGE_MANAGERS, expect_services, True)
|
cachito
|
positive
|
def testCustomLoss(self):
"""
d0,d1 -- s0 --loss-- d2
"""
<DeepExtract>
self.net = Containernet(controller=Controller)
self.net.addController('c0')
for i in range(0, 1):
self.s.append(self.net.addSwitch('s%d' % i))
if autolinkswitches:
for i in range(0, len(self.s) - 1):
self.net.addLink(self.s[i], self.s[i + 1])
for i in range(0, 0):
self.h.append(self.net.addHost('h%d' % i))
for i in range(0, 3):
self.d.append(self.net.addDocker('d%d' % i, dimage='ubuntu:trusty'))
</DeepExtract>
self.net.addLink(self.s[0], self.d[0])
self.net.addLink(self.s[0], self.d[1])
self.net.addLink(self.s[0], self.d[2], cls=TCLink, loss=100)
<DeepExtract>
self.net.start()
</DeepExtract>
self.assertTrue(len(self.getContainernetContainers()) == 3)
self.assertTrue(len(self.net.hosts) == 3)
self.assertTrue(self.net.ping([self.d[0]], manualdestip='10.0.0.2', timeout=1) <= 0.0)
self.assertTrue(self.net.ping([self.d[0]], manualdestip='10.0.0.3', timeout=1) >= 100.0)
<DeepExtract>
self.net.stop()
self.s = []
self.h = []
self.d = []
</DeepExtract>
|
def testCustomLoss(self):
"""
d0,d1 -- s0 --loss-- d2
"""
self.net = Containernet(controller=Controller)
self.net.addController('c0')
for i in range(0, 1):
self.s.append(self.net.addSwitch('s%d' % i))
if autolinkswitches:
for i in range(0, len(self.s) - 1):
self.net.addLink(self.s[i], self.s[i + 1])
for i in range(0, 0):
self.h.append(self.net.addHost('h%d' % i))
for i in range(0, 3):
self.d.append(self.net.addDocker('d%d' % i, dimage='ubuntu:trusty'))
self.net.addLink(self.s[0], self.d[0])
self.net.addLink(self.s[0], self.d[1])
self.net.addLink(self.s[0], self.d[2], cls=TCLink, loss=100)
self.net.start()
self.assertTrue(len(self.getContainernetContainers()) == 3)
self.assertTrue(len(self.net.hosts) == 3)
self.assertTrue(self.net.ping([self.d[0]], manualdestip='10.0.0.2', timeout=1) <= 0.0)
self.assertTrue(self.net.ping([self.d[0]], manualdestip='10.0.0.3', timeout=1) >= 100.0)
self.net.stop()
self.s = []
self.h = []
self.d = []
</DeepExtract>
|
containernet
|
positive
|
def __init__(self) -> None:
"""Initialize CrowdStrike connector."""
<DeepExtract>
config_file_path = os.path.dirname(os.path.abspath(__file__)) + '/../config.yml'
if not os.path.isfile(config_file_path):
config = {}
config = yaml.load(open(config_file_path), Loader=yaml.FullLoader)
</DeepExtract>
<DeepExtract>
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_BASE_URL, isNumber=is_number)
base_url = config_value
</DeepExtract>
<DeepExtract>
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_CLIENT_ID, isNumber=is_number)
client_id = config_value
</DeepExtract>
<DeepExtract>
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_CLIENT_SECRET, isNumber=is_number)
client_secret = config_value
</DeepExtract>
<DeepExtract>
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_INTERVAL_SEC, isNumber=True)
self.interval_sec = config_value
</DeepExtract>
<DeepExtract>
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_SCOPES, isNumber=is_number)
scopes_str = config_value
</DeepExtract>
scopes = set()
if scopes_str is not None:
scopes = set(convert_comma_separated_str_to_list(scopes_str))
<DeepExtract>
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_TLP, isNumber=is_number)
tlp = config_value
</DeepExtract>
<DeepExtract>
if tlp_value is None:
tlp_marking = DEFAULT_TLP_MARKING_DEFINITION
tlp_marking = get_tlp_string_marking_definition(tlp_value)
</DeepExtract>
<DeepExtract>
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_CREATE_OBSERVABLES, isNumber=is_number)
create_observables = config_value
</DeepExtract>
if create_observables is None:
create_observables = self._DEFAULT_CREATE_OBSERVABLES
else:
create_observables = bool(create_observables)
<DeepExtract>
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_CREATE_INDICATORS, isNumber=is_number)
create_indicators = config_value
</DeepExtract>
if create_indicators is None:
create_indicators = self._DEFAULT_CREATE_INDICATORS
else:
create_indicators = bool(create_indicators)
<DeepExtract>
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_ACTOR_START_TIMESTAMP, isNumber=True)
actor_start_timestamp = config_value
</DeepExtract>
if is_timestamp_in_future(actor_start_timestamp):
raise ValueError('Actor start timestamp is in the future')
<DeepExtract>
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_REPORT_START_TIMESTAMP, isNumber=True)
report_start_timestamp = config_value
</DeepExtract>
if is_timestamp_in_future(report_start_timestamp):
raise ValueError('Report start timestamp is in the future')
<DeepExtract>
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_REPORT_STATUS, isNumber=is_number)
report_status_str = config_value
</DeepExtract>
<DeepExtract>
report_status = report_status_str._CONFIG_REPORT_STATUS_MAPPING[report_status.lower()]
</DeepExtract>
<DeepExtract>
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_REPORT_TYPE, isNumber=is_number)
report_type = config_value
</DeepExtract>
if not report_type:
report_type = self._DEFAULT_REPORT_TYPE
<DeepExtract>
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_REPORT_INCLUDE_TYPES, isNumber=is_number)
report_include_types_str = config_value
</DeepExtract>
report_include_types = []
if report_include_types_str is not None:
report_include_types = convert_comma_separated_str_to_list(report_include_types_str)
report_guess_malware = bool(self._get_configuration(config, self._CONFIG_REPORT_GUESS_MALWARE))
<DeepExtract>
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_INDICATOR_START_TIMESTAMP, isNumber=True)
indicator_start_timestamp = config_value
</DeepExtract>
if is_timestamp_in_future(indicator_start_timestamp):
raise ValueError('Indicator start timestamp is in the future')
<DeepExtract>
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_INDICATOR_EXCLUDE_TYPES, isNumber=is_number)
indicator_exclude_types_str = config_value
</DeepExtract>
indicator_exclude_types = []
if indicator_exclude_types_str is not None:
indicator_exclude_types = convert_comma_separated_str_to_list(indicator_exclude_types_str)
<DeepExtract>
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_INDICATOR_LOW_SCORE, isNumber=True)
indicator_low_score = config_value
</DeepExtract>
if indicator_low_score is None:
indicator_low_score = self._DEFAULT_INDICATOR_LOW_SCORE
<DeepExtract>
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_INDICATOR_LOW_SCORE_LABELS, isNumber=is_number)
indicator_low_score_labels_str = config_value
</DeepExtract>
indicator_low_score_labels = []
if indicator_low_score_labels_str is not None:
indicator_low_score_labels = convert_comma_separated_str_to_list(indicator_low_score_labels_str)
update_existing_data = bool(self._get_configuration(config, self._CONFIG_UPDATE_EXISTING_DATA))
<DeepExtract>
author = create_organization('CrowdStrike')
</DeepExtract>
self.helper = OpenCTIConnectorHelper(config)
client = CrowdStrikeClient(base_url, client_id, client_secret)
importers: List[BaseImporter] = []
if self._CONFIG_SCOPE_ACTOR in scopes:
actor_importer = ActorImporter(self.helper, client.intel_api.actors, update_existing_data, author, actor_start_timestamp, tlp_marking)
importers.append(actor_importer)
if self._CONFIG_SCOPE_REPORT in scopes:
report_importer = ReportImporter(self.helper, client.intel_api.reports, update_existing_data, author, report_start_timestamp, tlp_marking, report_include_types, report_status, report_type, report_guess_malware)
importers.append(report_importer)
if self._CONFIG_SCOPE_INDICATOR in scopes:
indicator_importer_config = IndicatorImporterConfig(helper=self.helper, indicators_api=client.intel_api.indicators, reports_api=client.intel_api.reports, update_existing_data=update_existing_data, author=author, default_latest_timestamp=indicator_start_timestamp, tlp_marking=tlp_marking, create_observables=create_observables, create_indicators=create_indicators, exclude_types=indicator_exclude_types, report_status=report_status, report_type=report_type, indicator_low_score=indicator_low_score, indicator_low_score_labels=set(indicator_low_score_labels))
indicator_importer = IndicatorImporter(indicator_importer_config)
importers.append(indicator_importer)
if self._CONFIG_SCOPE_YARA_MASTER in scopes:
yara_master_importer = YaraMasterImporter(self.helper, client.intel_api.rules, client.intel_api.reports, author, tlp_marking, update_existing_data, report_status, report_type)
importers.append(yara_master_importer)
if self._CONFIG_SCOPE_SNORT_SURICATA_MASTER in scopes:
snort_master_importer = SnortMasterImporter(self.helper, client.intel_api.rules, client.intel_api.reports, author, tlp_marking, update_existing_data, report_status, report_type)
importers.append(snort_master_importer)
self.importers = importers
|
def __init__(self) -> None:
"""Initialize CrowdStrike connector."""
config_file_path = os.path.dirname(os.path.abspath(__file__)) + '/../config.yml'
if not os.path.isfile(config_file_path):
config = {}
config = yaml.load(open(config_file_path), Loader=yaml.FullLoader)
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_BASE_URL, isNumber=is_number)
base_url = config_value
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_CLIENT_ID, isNumber=is_number)
client_id = config_value
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_CLIENT_SECRET, isNumber=is_number)
client_secret = config_value
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_INTERVAL_SEC, isNumber=True)
self.interval_sec = config_value
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_SCOPES, isNumber=is_number)
scopes_str = config_value
scopes = set()
if scopes_str is not None:
scopes = set(convert_comma_separated_str_to_list(scopes_str))
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_TLP, isNumber=is_number)
tlp = config_value
if tlp_value is None:
tlp_marking = DEFAULT_TLP_MARKING_DEFINITION
tlp_marking = get_tlp_string_marking_definition(tlp_value)
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_CREATE_OBSERVABLES, isNumber=is_number)
create_observables = config_value
if create_observables is None:
create_observables = self._DEFAULT_CREATE_OBSERVABLES
else:
create_observables = bool(create_observables)
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_CREATE_INDICATORS, isNumber=is_number)
create_indicators = config_value
if create_indicators is None:
create_indicators = self._DEFAULT_CREATE_INDICATORS
else:
create_indicators = bool(create_indicators)
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_ACTOR_START_TIMESTAMP, isNumber=True)
actor_start_timestamp = config_value
if is_timestamp_in_future(actor_start_timestamp):
raise ValueError('Actor start timestamp is in the future')
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_REPORT_START_TIMESTAMP, isNumber=True)
report_start_timestamp = config_value
if is_timestamp_in_future(report_start_timestamp):
raise ValueError('Report start timestamp is in the future')
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_REPORT_STATUS, isNumber=is_number)
report_status_str = config_value
report_status = report_status_str._CONFIG_REPORT_STATUS_MAPPING[report_status.lower()]
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_REPORT_TYPE, isNumber=is_number)
report_type = config_value
if not report_type:
report_type = self._DEFAULT_REPORT_TYPE
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_REPORT_INCLUDE_TYPES, isNumber=is_number)
report_include_types_str = config_value
report_include_types = []
if report_include_types_str is not None:
report_include_types = convert_comma_separated_str_to_list(report_include_types_str)
report_guess_malware = bool(self._get_configuration(config, self._CONFIG_REPORT_GUESS_MALWARE))
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_INDICATOR_START_TIMESTAMP, isNumber=True)
indicator_start_timestamp = config_value
if is_timestamp_in_future(indicator_start_timestamp):
raise ValueError('Indicator start timestamp is in the future')
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_INDICATOR_EXCLUDE_TYPES, isNumber=is_number)
indicator_exclude_types_str = config_value
indicator_exclude_types = []
if indicator_exclude_types_str is not None:
indicator_exclude_types = convert_comma_separated_str_to_list(indicator_exclude_types_str)
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_INDICATOR_LOW_SCORE, isNumber=True)
indicator_low_score = config_value
if indicator_low_score is None:
indicator_low_score = self._DEFAULT_INDICATOR_LOW_SCORE
yaml_path = config._get_yaml_path(config_name)
env_var_name = config._get_environment_variable_name(yaml_path)
config_value = get_config_variable(env_var_name, yaml_path, self._CONFIG_INDICATOR_LOW_SCORE_LABELS, isNumber=is_number)
indicator_low_score_labels_str = config_value
indicator_low_score_labels = []
if indicator_low_score_labels_str is not None:
indicator_low_score_labels = convert_comma_separated_str_to_list(indicator_low_score_labels_str)
update_existing_data = bool(self._get_configuration(config, self._CONFIG_UPDATE_EXISTING_DATA))
author = create_organization('CrowdStrike')
self.helper = OpenCTIConnectorHelper(config)
client = CrowdStrikeClient(base_url, client_id, client_secret)
importers: List[BaseImporter] = []
if self._CONFIG_SCOPE_ACTOR in scopes:
actor_importer = ActorImporter(self.helper, client.intel_api.actors, update_existing_data, author, actor_start_timestamp, tlp_marking)
importers.append(actor_importer)
if self._CONFIG_SCOPE_REPORT in scopes:
report_importer = ReportImporter(self.helper, client.intel_api.reports, update_existing_data, author, report_start_timestamp, tlp_marking, report_include_types, report_status, report_type, report_guess_malware)
importers.append(report_importer)
if self._CONFIG_SCOPE_INDICATOR in scopes:
indicator_importer_config = IndicatorImporterConfig(helper=self.helper, indicators_api=client.intel_api.indicators, reports_api=client.intel_api.reports, update_existing_data=update_existing_data, author=author, default_latest_timestamp=indicator_start_timestamp, tlp_marking=tlp_marking, create_observables=create_observables, create_indicators=create_indicators, exclude_types=indicator_exclude_types, report_status=report_status, report_type=report_type, indicator_low_score=indicator_low_score, indicator_low_score_labels=set(indicator_low_score_labels))
indicator_importer = IndicatorImporter(indicator_importer_config)
importers.append(indicator_importer)
if self._CONFIG_SCOPE_YARA_MASTER in scopes:
yara_master_importer = YaraMasterImporter(self.helper, client.intel_api.rules, client.intel_api.reports, author, tlp_marking, update_existing_data, report_status, report_type)
importers.append(yara_master_importer)
if self._CONFIG_SCOPE_SNORT_SURICATA_MASTER in scopes:
snort_master_importer = SnortMasterImporter(self.helper, client.intel_api.rules, client.intel_api.reports, author, tlp_marking, update_existing_data, report_status, report_type)
importers.append(snort_master_importer)
self.importers = importers
|
connectors
|
positive
|
def union(self, x, y):
<DeepExtract>
root = x
while root != roots[root]:
root = roots[root]
while x != roots[x]:
(x, roots[x]) = (roots[x], root)
x_root = root
</DeepExtract>
<DeepExtract>
root = y
while root != roots[root]:
root = roots[root]
while y != roots[y]:
(y, roots[y]) = (roots[y], root)
y_root = root
</DeepExtract>
if x_root == y_root:
return
self.count -= 1
if x_root.rank < y_root.rank:
x_root.root = y_root
elif x_root.rank > y_root.rank:
y_root.root = x_root
else:
x_root.root = y_root
y_root.rank += 1
|
def union(self, x, y):
root = x
while root != roots[root]:
root = roots[root]
while x != roots[x]:
(x, roots[x]) = (roots[x], root)
x_root = root
root = y
while root != roots[root]:
root = roots[root]
while y != roots[y]:
(y, roots[y]) = (roots[y], root)
y_root = root
if x_root == y_root:
return
self.count -= 1
if x_root.rank < y_root.rank:
x_root.root = y_root
elif x_root.rank > y_root.rank:
y_root.root = x_root
else:
x_root.root = y_root
y_root.rank += 1
|
Algorithm_Templates
|
positive
|
def validate_post_login():
"""Function validating parameters passed in uri query after redirection from login form.
Should return True, if everything is ok, or False, if something went wrong.
"""
if request.args.get('error'):
return False
if _fetch_data('csrf') != request.args.get('state'):
return False
code = request.args.get('code')
if not code:
return False
<DeepExtract>
if _session_key not in session:
session[_session_key] = dict()
session[_session_key].update(**kwargs)
session.modified = True
</DeepExtract>
return True
|
def validate_post_login():
"""Function validating parameters passed in uri query after redirection from login form.
Should return True, if everything is ok, or False, if something went wrong.
"""
if request.args.get('error'):
return False
if _fetch_data('csrf') != request.args.get('state'):
return False
code = request.args.get('code')
if not code:
return False
if _session_key not in session:
session[_session_key] = dict()
session[_session_key].update(**kwargs)
session.modified = True
return True
|
acousticbrainz-server
|
positive
|
def prepare_params(kwargs):
ddpg_params = dict()
env_name = kwargs['env_name']
def make_env():
return gym.make(env_name)
kwargs['make_env'] = make_env
<DeepExtract>
if kwargs['make_env'] not in CACHED_ENVS:
env = kwargs['make_env']()
CACHED_ENVS[kwargs['make_env']] = env
tmp_env = CACHED_ENVS[kwargs['make_env']]
</DeepExtract>
assert hasattr(tmp_env, '_max_episode_steps')
kwargs['T'] = tmp_env._max_episode_steps
tmp_env.reset()
kwargs['max_u'] = np.array(kwargs['max_u']) if type(kwargs['max_u']) == list else kwargs['max_u']
kwargs['gamma'] = 1.0 - 1.0 / kwargs['T']
if 'lr' in kwargs:
kwargs['pi_lr'] = kwargs['lr']
kwargs['Q_lr'] = kwargs['lr']
del kwargs['lr']
for name in ['buffer_size', 'hidden', 'layers', 'network_class', 'polyak', 'batch_size', 'Q_lr', 'pi_lr', 'norm_eps', 'norm_clip', 'max_u', 'action_l2', 'clip_obs', 'scope', 'relative_goals']:
ddpg_params[name] = kwargs[name]
kwargs['_' + name] = kwargs[name]
del kwargs[name]
kwargs['ddpg_params'] = ddpg_params
return kwargs
|
def prepare_params(kwargs):
ddpg_params = dict()
env_name = kwargs['env_name']
def make_env():
return gym.make(env_name)
kwargs['make_env'] = make_env
if kwargs['make_env'] not in CACHED_ENVS:
env = kwargs['make_env']()
CACHED_ENVS[kwargs['make_env']] = env
tmp_env = CACHED_ENVS[kwargs['make_env']]
assert hasattr(tmp_env, '_max_episode_steps')
kwargs['T'] = tmp_env._max_episode_steps
tmp_env.reset()
kwargs['max_u'] = np.array(kwargs['max_u']) if type(kwargs['max_u']) == list else kwargs['max_u']
kwargs['gamma'] = 1.0 - 1.0 / kwargs['T']
if 'lr' in kwargs:
kwargs['pi_lr'] = kwargs['lr']
kwargs['Q_lr'] = kwargs['lr']
del kwargs['lr']
for name in ['buffer_size', 'hidden', 'layers', 'network_class', 'polyak', 'batch_size', 'Q_lr', 'pi_lr', 'norm_eps', 'norm_clip', 'max_u', 'action_l2', 'clip_obs', 'scope', 'relative_goals']:
ddpg_params[name] = kwargs[name]
kwargs['_' + name] = kwargs[name]
del kwargs[name]
kwargs['ddpg_params'] = ddpg_params
return kwargs
|
CHER
|
positive
|
def sanitize(self, raise_exception: bool=True) -> None:
"""
Sanitizes the molecule if it has not been done before.
:param raise_exception: if True will raise exception on failed sanitation
:raises MoleculeException: if the molecule could not be sanitized
"""
if self._is_sanitized:
return
try:
AllChem.SanitizeMol(self.rd_mol)
except:
if raise_exception:
raise MoleculeException(f'Unable to sanitize molecule ({self.smiles})')
self.rd_mol = Chem.MolFromSmiles(self.smiles, sanitize=False)
return
self.smiles = Chem.MolToSmiles(self.rd_mol)
<DeepExtract>
self._inchi = None
self._inchi_key = None
self._fingerprints = {}
self._atom_mappings = {}
self._reverse_atom_mappings = {}
</DeepExtract>
self._is_sanitized = True
|
def sanitize(self, raise_exception: bool=True) -> None:
"""
Sanitizes the molecule if it has not been done before.
:param raise_exception: if True will raise exception on failed sanitation
:raises MoleculeException: if the molecule could not be sanitized
"""
if self._is_sanitized:
return
try:
AllChem.SanitizeMol(self.rd_mol)
except:
if raise_exception:
raise MoleculeException(f'Unable to sanitize molecule ({self.smiles})')
self.rd_mol = Chem.MolFromSmiles(self.smiles, sanitize=False)
return
self.smiles = Chem.MolToSmiles(self.rd_mol)
self._inchi = None
self._inchi_key = None
self._fingerprints = {}
self._atom_mappings = {}
self._reverse_atom_mappings = {}
self._is_sanitized = True
|
aizynthfinder
|
positive
|
def same_ivals(f, a, b, tol):
(igral, err, n, ivals) = algorithm_4(f, a, b, tol)
<DeepExtract>
learner = IntegratorLearner(f, bounds=(a, b), tol=tol)
for _ in range(n):
(points, _) = learner.ask(1)
learner.tell_many(points, map(learner.function, points))
learner = learner
</DeepExtract>
print('igral difference', learner.igral - igral, 'err difference', learner.err - err)
return equal_ivals(learner.ivals, ivals, verbose=True)
|
def same_ivals(f, a, b, tol):
(igral, err, n, ivals) = algorithm_4(f, a, b, tol)
learner = IntegratorLearner(f, bounds=(a, b), tol=tol)
for _ in range(n):
(points, _) = learner.ask(1)
learner.tell_many(points, map(learner.function, points))
learner = learner
print('igral difference', learner.igral - igral, 'err difference', learner.err - err)
return equal_ivals(learner.ivals, ivals, verbose=True)
|
adaptive
|
positive
|
def _send(cmd, name):
<DeepExtract>
with self._lock:
if self._fd is not None:
return
fds_to_pass = []
try:
fds_to_pass.append(sys.stderr.fileno())
except Exception:
pass
cmd = 'from billiard.semaphore_tracker import main;main(%d)'
(r, w) = os.pipe()
try:
fds_to_pass.append(r)
exe = spawn.get_executable()
args = [exe] + util._args_from_interpreter_flags()
args += ['-c', cmd % r]
spawnv_passfds(exe, args, fds_to_pass)
except:
os.close(w)
raise
else:
self._fd = w
finally:
os.close(r)
</DeepExtract>
msg = '{0}:{1}\n'.format(cmd, name).encode('ascii')
if len(name) > 512:
raise ValueError('name too long')
nbytes = os.write(self._fd, msg)
assert nbytes == len(msg)
|
def _send(cmd, name):
with self._lock:
if self._fd is not None:
return
fds_to_pass = []
try:
fds_to_pass.append(sys.stderr.fileno())
except Exception:
pass
cmd = 'from billiard.semaphore_tracker import main;main(%d)'
(r, w) = os.pipe()
try:
fds_to_pass.append(r)
exe = spawn.get_executable()
args = [exe] + util._args_from_interpreter_flags()
args += ['-c', cmd % r]
spawnv_passfds(exe, args, fds_to_pass)
except:
os.close(w)
raise
else:
self._fd = w
finally:
os.close(r)
msg = '{0}:{1}\n'.format(cmd, name).encode('ascii')
if len(name) > 512:
raise ValueError('name too long')
nbytes = os.write(self._fd, msg)
assert nbytes == len(msg)
|
billiard
|
positive
|
def step(self):
def flatten(positions, velocities):
system_state = np.concatenate((positions, velocities), axis=1)
system_state_flat = system_state.flatten()
return system_state_flat
def unflatten(system_state_flat):
system_state = system_state_flat.reshape(self.num_bodies, 2 * self.num_dimensions)
positions = system_state[:, :self.num_dimensions]
velocities = system_state[:, self.num_dimensions:]
return (positions, velocities)
def system_first_order_ode(system_state_flat, _):
<DeepExtract>
system_state = system_state_flat.reshape(self.num_bodies, 2 * self.num_dimensions)
positions = system_state[:, :self.num_dimensions]
velocities = system_state[:, self.num_dimensions:]
(positions, velocities) = (positions, velocities)
</DeepExtract>
accelerations = np.zeros_like(velocities)
for i in range(self.num_bodies):
relative_positions = positions - positions[i]
distances = np.linalg.norm(relative_positions, axis=1, keepdims=True)
distances[i] = 1.0
force_vectors = self.GRAVITATIONAL_CONSTANT * relative_positions / distances ** self.num_dimensions
force_vector = np.sum(force_vectors, axis=0)
accelerations[i] = force_vector
<DeepExtract>
system_state = np.concatenate((velocities, accelerations), axis=1)
system_state_flat = system_state.flatten()
d_system_state_flat = system_state_flat
</DeepExtract>
return d_system_state_flat
<DeepExtract>
system_state = np.concatenate((self.body_positions, self.body_velocities), axis=1)
system_state_flat = system_state.flatten()
current_system_state_flat = system_state_flat
</DeepExtract>
(_, next_system_state_flat) = odeint(system_first_order_ode, current_system_state_flat, [0.0, self.dt])
<DeepExtract>
system_state = next_system_state_flat.reshape(self.num_bodies, 2 * self.num_dimensions)
positions = system_state[:, :self.num_dimensions]
velocities = system_state[:, self.num_dimensions:]
(self.body_positions, self.body_velocities) = (positions, velocities)
</DeepExtract>
if self.contained_in_a_box:
ind_below_min = self.body_positions < self.MIN_POS
ind_above_max = self.body_positions > self.MAX_POS
self.body_positions[ind_below_min] += 2.0 * (self.MIN_POS - self.body_positions[ind_below_min])
self.body_positions[ind_above_max] += 2.0 * (self.MAX_POS - self.body_positions[ind_above_max])
self.body_velocities[ind_below_min] *= -1.0
self.body_velocities[ind_above_max] *= -1.0
<DeepExtract>
assert np.all(self.body_positions >= self.MIN_POS) and np.all(self.body_positions <= self.MAX_POS)
</DeepExtract>
|
def step(self):
def flatten(positions, velocities):
system_state = np.concatenate((positions, velocities), axis=1)
system_state_flat = system_state.flatten()
return system_state_flat
def unflatten(system_state_flat):
system_state = system_state_flat.reshape(self.num_bodies, 2 * self.num_dimensions)
positions = system_state[:, :self.num_dimensions]
velocities = system_state[:, self.num_dimensions:]
return (positions, velocities)
def system_first_order_ode(system_state_flat, _):
system_state = system_state_flat.reshape(self.num_bodies, 2 * self.num_dimensions)
positions = system_state[:, :self.num_dimensions]
velocities = system_state[:, self.num_dimensions:]
(positions, velocities) = (positions, velocities)
accelerations = np.zeros_like(velocities)
for i in range(self.num_bodies):
relative_positions = positions - positions[i]
distances = np.linalg.norm(relative_positions, axis=1, keepdims=True)
distances[i] = 1.0
force_vectors = self.GRAVITATIONAL_CONSTANT * relative_positions / distances ** self.num_dimensions
force_vector = np.sum(force_vectors, axis=0)
accelerations[i] = force_vector
system_state = np.concatenate((velocities, accelerations), axis=1)
system_state_flat = system_state.flatten()
d_system_state_flat = system_state_flat
return d_system_state_flat
system_state = np.concatenate((self.body_positions, self.body_velocities), axis=1)
system_state_flat = system_state.flatten()
current_system_state_flat = system_state_flat
(_, next_system_state_flat) = odeint(system_first_order_ode, current_system_state_flat, [0.0, self.dt])
system_state = next_system_state_flat.reshape(self.num_bodies, 2 * self.num_dimensions)
positions = system_state[:, :self.num_dimensions]
velocities = system_state[:, self.num_dimensions:]
(self.body_positions, self.body_velocities) = (positions, velocities)
if self.contained_in_a_box:
ind_below_min = self.body_positions < self.MIN_POS
ind_above_max = self.body_positions > self.MAX_POS
self.body_positions[ind_below_min] += 2.0 * (self.MIN_POS - self.body_positions[ind_below_min])
self.body_positions[ind_above_max] += 2.0 * (self.MAX_POS - self.body_positions[ind_above_max])
self.body_velocities[ind_below_min] *= -1.0
self.body_velocities[ind_above_max] *= -1.0
assert np.all(self.body_positions >= self.MIN_POS) and np.all(self.body_positions <= self.MAX_POS)
</DeepExtract>
|
deep_bisim4control
|
positive
|
def weights_normal_init(model, dev=0.01):
if isinstance(model, list):
for m in model:
<DeepExtract>
if isinstance(m, list):
for m in m:
weights_normal_init(m, dev)
else:
for m in m.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, dev)
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0.0, dev)
</DeepExtract>
else:
for m in model.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, dev)
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0.0, dev)
|
def weights_normal_init(model, dev=0.01):
if isinstance(model, list):
for m in model:
if isinstance(m, list):
for m in m:
weights_normal_init(m, dev)
else:
for m in m.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, dev)
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0.0, dev)
else:
for m in model.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, dev)
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0.0, dev)
|
Detection-PyTorch-Notebook
|
positive
|
def _at_epoch_end(self, metrics_val, val_score_key, epoch, is_best, **kwargs):
"""
Defines behaviour at beginning of each epoch: Executes all callbacks's
`at_epoch_end` method and saves current state if necessary
Parameters
----------
metrics_val : dict
validation metrics
val_score_key : str
validation score key
epoch : int
current epoch
num_epochs : int
total number of epochs
is_best : bool
whether current model is best one so far
**kwargs :
keyword arguments
"""
for cb in self._callbacks:
<DeepExtract>
if 'model' in cb.at_epoch_end(self, val_metrics=metrics_val, val_score_key=val_score_key, curr_epoch=epoch):
self.module = cb.at_epoch_end(self, val_metrics=metrics_val, val_score_key=val_score_key, curr_epoch=epoch).pop('model')
if 'epoch' in cb.at_epoch_end(self, val_metrics=metrics_val, val_score_key=val_score_key, curr_epoch=epoch):
self.start_epoch = cb.at_epoch_end(self, val_metrics=metrics_val, val_score_key=val_score_key, curr_epoch=epoch).pop('epoch')
return super()._update_state(cb.at_epoch_end(self, val_metrics=metrics_val, val_score_key=val_score_key, curr_epoch=epoch))
</DeepExtract>
if epoch % self.save_freq == 0:
<DeepExtract>
if not os.path.join(self.save_path, 'checkpoint_epoch_%d.pkl' % epoch).endswith('.pkl'):
os.path.join(self.save_path, 'checkpoint_epoch_%d.pkl' % epoch) = os.path.join(self.save_path, 'checkpoint_epoch_%d.pkl' % epoch) + '.pkl'
save_checkpoint(os.path.join(self.save_path, 'checkpoint_epoch_%d.pkl' % epoch), self.module, epoch, **kwargs)
</DeepExtract>
if is_best:
<DeepExtract>
if not os.path.join(self.save_path, 'checkpoint_best.pkl').endswith('.pkl'):
os.path.join(self.save_path, 'checkpoint_best.pkl') = os.path.join(self.save_path, 'checkpoint_best.pkl') + '.pkl'
save_checkpoint(os.path.join(self.save_path, 'checkpoint_best.pkl'), self.module, epoch, **kwargs)
</DeepExtract>
|
def _at_epoch_end(self, metrics_val, val_score_key, epoch, is_best, **kwargs):
"""
Defines behaviour at beginning of each epoch: Executes all callbacks's
`at_epoch_end` method and saves current state if necessary
Parameters
----------
metrics_val : dict
validation metrics
val_score_key : str
validation score key
epoch : int
current epoch
num_epochs : int
total number of epochs
is_best : bool
whether current model is best one so far
**kwargs :
keyword arguments
"""
for cb in self._callbacks:
if 'model' in cb.at_epoch_end(self, val_metrics=metrics_val, val_score_key=val_score_key, curr_epoch=epoch):
self.module = cb.at_epoch_end(self, val_metrics=metrics_val, val_score_key=val_score_key, curr_epoch=epoch).pop('model')
if 'epoch' in cb.at_epoch_end(self, val_metrics=metrics_val, val_score_key=val_score_key, curr_epoch=epoch):
self.start_epoch = cb.at_epoch_end(self, val_metrics=metrics_val, val_score_key=val_score_key, curr_epoch=epoch).pop('epoch')
return super()._update_state(cb.at_epoch_end(self, val_metrics=metrics_val, val_score_key=val_score_key, curr_epoch=epoch))
if epoch % self.save_freq == 0:
if not os.path.join(self.save_path, 'checkpoint_epoch_%d.pkl' % epoch).endswith('.pkl'):
os.path.join(self.save_path, 'checkpoint_epoch_%d.pkl' % epoch) = os.path.join(self.save_path, 'checkpoint_epoch_%d.pkl' % epoch) + '.pkl'
save_checkpoint(os.path.join(self.save_path, 'checkpoint_epoch_%d.pkl' % epoch), self.module, epoch, **kwargs)
if is_best:
if not os.path.join(self.save_path, 'checkpoint_best.pkl').endswith('.pkl'):
os.path.join(self.save_path, 'checkpoint_best.pkl') = os.path.join(self.save_path, 'checkpoint_best.pkl') + '.pkl'
save_checkpoint(os.path.join(self.save_path, 'checkpoint_best.pkl'), self.module, epoch, **kwargs)
</DeepExtract>
|
delira
|
positive
|
def delete_index(connection, index_name, logger, err_if_does_not_exist=True, **kwargs):
"""
Deletes the index named index_name
Args:
connection: Elasticsearch client object
index_name: The name of the index
logger: logging object to log at debug and exception level
err_if_does_not_exist: if to raise error if index does not exist already, defaults to True
kwargs:
body: The configuration for the index (settings and mappings)
master_timeout: Specify timeout for connection to master
timeout: Explicit operation timeout
update_all_types: Whether to update the mapping for all fields with the same name across all types or not
wait_for_active_shards: Set the number of active shards to wait for before the operation returns.
Refer https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.client.IndicesClient.delete
"""
if not exists(connection, index_name):
if err_if_does_not_exist:
raise Exception('Failed to delete index {}. It does not exist!'.format(index_name))
else:
return
try:
<DeepExtract>
logger.debug('Removing alias %s from indices: %s', '_all', str([index_name]))
connection.indices.delete_alias(index=[index_name], name='_all', **kwargs)
logger.debug('Alias %s removed from indices %s', '_all', str([index_name]))
</DeepExtract>
except NotFoundError:
logger.warning('No aliases found on on index %s', index_name)
connection.indices.delete(index=index_name, **kwargs)
logger.debug('%s: Delete Index %s: Operation successfully completed', log_prefix, index_name)
|
def delete_index(connection, index_name, logger, err_if_does_not_exist=True, **kwargs):
"""
Deletes the index named index_name
Args:
connection: Elasticsearch client object
index_name: The name of the index
logger: logging object to log at debug and exception level
err_if_does_not_exist: if to raise error if index does not exist already, defaults to True
kwargs:
body: The configuration for the index (settings and mappings)
master_timeout: Specify timeout for connection to master
timeout: Explicit operation timeout
update_all_types: Whether to update the mapping for all fields with the same name across all types or not
wait_for_active_shards: Set the number of active shards to wait for before the operation returns.
Refer https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.client.IndicesClient.delete
"""
if not exists(connection, index_name):
if err_if_does_not_exist:
raise Exception('Failed to delete index {}. It does not exist!'.format(index_name))
else:
return
try:
logger.debug('Removing alias %s from indices: %s', '_all', str([index_name]))
connection.indices.delete_alias(index=[index_name], name='_all', **kwargs)
logger.debug('Alias %s removed from indices %s', '_all', str([index_name]))
except NotFoundError:
logger.warning('No aliases found on on index %s', index_name)
connection.indices.delete(index=index_name, **kwargs)
logger.debug('%s: Delete Index %s: Operation successfully completed', log_prefix, index_name)
|
chatbot_ner
|
positive
|
def make_banner(job, code=None):
"""Return banner text to add based on a *job* and a page's *code*."""
banner = job.banner
if code is not None and job.autoassess is not False:
<DeepExtract>
if job.autoassess is None or job.autoassess is True:
classnames = ['a', 'b', 'book', 'c', 'dab', 'fa', 'fl', 'ga', 'list', 'redirect', 'start', 'stub']
else:
classnames = [klass.strip().lower() for klass in job.autoassess.split(',')]
classes = {klass: 0 for klass in classnames}
for template in code.ifilter_templates(recursive=True):
if template.has('class'):
value = str(template.get('class').value).lower()
if value in classes:
classes[value] += 1
values = tuple(classes.values())
best = max(values)
if best:
confidence = float(best) / sum(values)
if confidence > 0.75:
rank = tuple(classes.keys())[values.index(best)]
if rank in ('fa', 'fl', 'ga'):
(assess, reason) = (rank.upper(), 'inherit')
else:
(assess, reason) = (self._upperfirst(rank), 'inherit')
(assess, reason) = (None, None)
</DeepExtract>
if assess:
banner += '|class=' + assess
if reason:
banner += '|auto=' + reason
if job.append:
banner += '|' + '|'.join(job.append.split(','))
return '{{' + banner + '}}'
|
def make_banner(job, code=None):
"""Return banner text to add based on a *job* and a page's *code*."""
banner = job.banner
if code is not None and job.autoassess is not False:
if job.autoassess is None or job.autoassess is True:
classnames = ['a', 'b', 'book', 'c', 'dab', 'fa', 'fl', 'ga', 'list', 'redirect', 'start', 'stub']
else:
classnames = [klass.strip().lower() for klass in job.autoassess.split(',')]
classes = {klass: 0 for klass in classnames}
for template in code.ifilter_templates(recursive=True):
if template.has('class'):
value = str(template.get('class').value).lower()
if value in classes:
classes[value] += 1
values = tuple(classes.values())
best = max(values)
if best:
confidence = float(best) / sum(values)
if confidence > 0.75:
rank = tuple(classes.keys())[values.index(best)]
if rank in ('fa', 'fl', 'ga'):
(assess, reason) = (rank.upper(), 'inherit')
else:
(assess, reason) = (self._upperfirst(rank), 'inherit')
(assess, reason) = (None, None)
if assess:
banner += '|class=' + assess
if reason:
banner += '|auto=' + reason
if job.append:
banner += '|' + '|'.join(job.append.split(','))
return '{{' + banner + '}}'
|
earwigbot
|
positive
|
@APP.route('/')
@APP.route('/counts')
@APP.route('/trees')
@APP.route('/sents')
@APP.route('/brackets')
@APP.route('/fragments')
@requires_auth
def main():
"""Main search form & results page."""
output = None
if request.path != '/':
output = request.path.lstrip('/')
elif 'output' in request.args:
output = request.args['output']
<DeepExtract>
selected = set()
if 'texts' in request.args:
for a in filter(None, request.args['texts'].replace('.', ',').split(',')):
if '-' in a:
(b, c) = a.split('-')
selected.update((n for n in range(int(b), int(c))))
else:
selected.add(int(a))
else:
selected.update(range(len(TEXTS)))
if METADATA is not None and request.args.get('subset'):
(key, val) = request.args['subset'].split('=')
selected &= set((METADATA[key] == val).nonzero()[0])
selected = selected
</DeepExtract>
args = dict(form=request.args, texts=TEXTS, selectedtexts=selected, output='counts', havetgrep='tgrep2' in CORPORA, havefrag='frag' in CORPORA, default=[a for a in ['tgrep2', 'frag', 'regex'] if a in CORPORA][0], metadata=METADATA, categoricalcolumns=None if METADATA is None else [col for col in METADATA.columns if iscategorical(METADATA[col])])
if output:
if output not in DISPATCH:
return ('Invalid argument', 404)
elif request.args.get('export'):
return export(request.args, output)
args['output'] = output
args['results'] = DISPATCH[output](request.args)
if DEBUG:
return render_template('searchresults.html', **args)
else:
return Response(stream_template('searchresults.html', **args))
return render_template('search.html', **args)
|
@APP.route('/')
@APP.route('/counts')
@APP.route('/trees')
@APP.route('/sents')
@APP.route('/brackets')
@APP.route('/fragments')
@requires_auth
def main():
"""Main search form & results page."""
output = None
if request.path != '/':
output = request.path.lstrip('/')
elif 'output' in request.args:
output = request.args['output']
selected = set()
if 'texts' in request.args:
for a in filter(None, request.args['texts'].replace('.', ',').split(',')):
if '-' in a:
(b, c) = a.split('-')
selected.update((n for n in range(int(b), int(c))))
else:
selected.add(int(a))
else:
selected.update(range(len(TEXTS)))
if METADATA is not None and request.args.get('subset'):
(key, val) = request.args['subset'].split('=')
selected &= set((METADATA[key] == val).nonzero()[0])
selected = selected
args = dict(form=request.args, texts=TEXTS, selectedtexts=selected, output='counts', havetgrep='tgrep2' in CORPORA, havefrag='frag' in CORPORA, default=[a for a in ['tgrep2', 'frag', 'regex'] if a in CORPORA][0], metadata=METADATA, categoricalcolumns=None if METADATA is None else [col for col in METADATA.columns if iscategorical(METADATA[col])])
if output:
if output not in DISPATCH:
return ('Invalid argument', 404)
elif request.args.get('export'):
return export(request.args, output)
args['output'] = output
args['results'] = DISPATCH[output](request.args)
if DEBUG:
return render_template('searchresults.html', **args)
else:
return Response(stream_template('searchresults.html', **args))
return render_template('search.html', **args)
|
disco-dop
|
positive
|
def prompt(self, console: io.IO, step: str, args: Dict[str, Any]) -> Dict[str, Any]:
"""Extracts user arguments through the command-line.
Args:
console: Object to use for user I/O.
step: Message to present to user regarding what step they are on.
args: Dictionary holding prompts answered by user and set up
command-line arguments.
Returns: A Copy of args + the new parameter collected.
"""
new_args = dict(args)
if self._is_valid_passed_arg(console, step, args.get(self.PARAMETER), self._validate):
return new_args
<DeepExtract>
if 'django_superuser_login' in args:
msg = 'Enter a password for the Django superuser "{}"'.format(args['django_superuser_login'])
else:
msg = 'Enter a password for the Django superuser'
</DeepExtract>
question = '{} {}'.format(step, msg)
<DeepExtract>
console.tell(question)
while True:
password1 = console.getpass('Password: ')
try:
_password_validate(password1)
except ValueError as e:
console.error(e)
continue
password2 = console.getpass('Password (again): ')
if password1 != password2:
console.error('Passwords do not match, please try again')
continue
answer = password1
</DeepExtract>
new_args[self.PARAMETER] = answer
return new_args
|
def prompt(self, console: io.IO, step: str, args: Dict[str, Any]) -> Dict[str, Any]:
"""Extracts user arguments through the command-line.
Args:
console: Object to use for user I/O.
step: Message to present to user regarding what step they are on.
args: Dictionary holding prompts answered by user and set up
command-line arguments.
Returns: A Copy of args + the new parameter collected.
"""
new_args = dict(args)
if self._is_valid_passed_arg(console, step, args.get(self.PARAMETER), self._validate):
return new_args
if 'django_superuser_login' in args:
msg = 'Enter a password for the Django superuser "{}"'.format(args['django_superuser_login'])
else:
msg = 'Enter a password for the Django superuser'
question = '{} {}'.format(step, msg)
console.tell(question)
while True:
password1 = console.getpass('Password: ')
try:
_password_validate(password1)
except ValueError as e:
console.error(e)
continue
password2 = console.getpass('Password (again): ')
if password1 != password2:
console.error('Passwords do not match, please try again')
continue
answer = password1
new_args[self.PARAMETER] = answer
return new_args
|
django-cloud-deploy
|
positive
|
def main(argv):
env_name = FLAGS.env_name
data_name = FLAGS.data_name
seed = FLAGS.seed
policy_load_dir = FLAGS.policy_load_dir
data_load_dir = FLAGS.data_load_dir
gamma = FLAGS.gamma
assert 0 <= gamma < 1.0
learning_rate = FLAGS.learning_rate
nstep_returns = FLAGS.nstep_returns
num_steps = FLAGS.num_steps
batch_size = FLAGS.batch_size
<DeepExtract>
env = tf_py_environment.TFPyEnvironment(suites.load_mujoco(env_name))
actor_net = actor_distribution_network.ActorDistributionNetwork(env.observation_spec(), env.action_spec(), fc_layer_params=(256, 256), continuous_projection_net=tanh_normal_projection_network.TanhNormalProjectionNetwork)
policy = actor_policy.ActorPolicy(time_step_spec=env.time_step_spec(), action_spec=env.action_spec(), actor_network=actor_net, training=False)
policy = greedy_policy.GreedyPolicy(policy)
checkpoint = tf.train.Checkpoint(policy=policy)
directory = os.path.join(policy_load_dir, env_name, 'train/policy')
checkpoint_filename = tf.train.latest_checkpoint(directory)
print('Loading policy from %s' % checkpoint_filename)
checkpoint.restore(checkpoint_filename).assert_existing_objects_matched()
policy = policy.wrapped_policy
(target_policy, env) = (policy, env)
</DeepExtract>
directory = os.path.join(data_load_dir, 'yifan_%s_%s' % (env_name, data_name))
print('Loading dataset.')
onpolicy_dataset = TFAgentsOnpolicyDataset(env, target_policy, 1000)
write_dataset = TFOffpolicyDataset(onpolicy_dataset.spec)
batch_size = 20
num_trajectory = 10
for batch_num in range(1 + (num_trajectory - 1) // batch_size):
print(batch_num)
num_trajectory_after_batch = min(num_trajectory, batch_size * (batch_num + 1))
num_trajectory_to_get = num_trajectory_after_batch - batch_num * batch_size
(episodes, valid_steps) = onpolicy_dataset.get_episode(batch_size=num_trajectory_to_get)
<DeepExtract>
num_episodes = 1 if tf.rank(valid_steps) == 1 else tf.shape(valid_steps)[0]
for ep_id in range(num_episodes):
if tf.rank(valid_steps) == 1:
this_valid_ids = valid_steps
this_episode = episodes
else:
this_valid_ids = valid_steps[ep_id, ...]
this_episode = tf.nest.map_structure(lambda t: t[ep_id, ...], episodes)
episode_length = tf.shape(this_valid_ids)[0]
for step_id in range(episode_length):
this_valid_id = this_valid_ids[step_id]
this_step = tf.nest.map_structure(lambda t: t[step_id, ...], this_episode)
if this_valid_id:
write_dataset.add_step(this_step)
</DeepExtract>
dataset = write_dataset
'\n dataset = Dataset.load(directory)\n '
all_steps = dataset.get_all_steps()
max_reward = tf.reduce_max(all_steps.reward)
min_reward = tf.reduce_min(all_steps.reward)
print('num loaded steps', dataset.num_steps)
print('num loaded total steps', dataset.num_total_steps)
print('num loaded episodes', dataset.num_episodes)
print('num loaded total episodes', dataset.num_total_episodes)
print('min reward', min_reward, 'max reward', max_reward)
estimate = estimator_lib.get_fullbatch_average(dataset, gamma=gamma)
print('data per step avg', estimate)
dataset = PerturbedDataset(dataset, num_perturbations=None, perturbation_scale=1.0)
value_network = ValueNetwork((dataset.spec.observation, dataset.spec.action), fc_layer_params=(64, 64), output_dim=None)
optimizer = tf.keras.optimizers.Adam(learning_rate)
estimator = NeuralQLearning(dataset.spec, value_network, optimizer, gamma, num_qvalues=None, num_samples=1)
for step in range(num_steps):
batch = dataset.get_step(batch_size, num_steps=nstep_returns + 1)
(loss, _) = estimator.train_step(batch, target_policy)
if step % 100 == 0 or step == num_steps - 1:
print('step', step, 'loss', loss)
estimate = estimator.estimate_average_reward(dataset, target_policy)
print('estimated per step avg', estimate)
print('Done!')
|
def main(argv):
env_name = FLAGS.env_name
data_name = FLAGS.data_name
seed = FLAGS.seed
policy_load_dir = FLAGS.policy_load_dir
data_load_dir = FLAGS.data_load_dir
gamma = FLAGS.gamma
assert 0 <= gamma < 1.0
learning_rate = FLAGS.learning_rate
nstep_returns = FLAGS.nstep_returns
num_steps = FLAGS.num_steps
batch_size = FLAGS.batch_size
env = tf_py_environment.TFPyEnvironment(suites.load_mujoco(env_name))
actor_net = actor_distribution_network.ActorDistributionNetwork(env.observation_spec(), env.action_spec(), fc_layer_params=(256, 256), continuous_projection_net=tanh_normal_projection_network.TanhNormalProjectionNetwork)
policy = actor_policy.ActorPolicy(time_step_spec=env.time_step_spec(), action_spec=env.action_spec(), actor_network=actor_net, training=False)
policy = greedy_policy.GreedyPolicy(policy)
checkpoint = tf.train.Checkpoint(policy=policy)
directory = os.path.join(policy_load_dir, env_name, 'train/policy')
checkpoint_filename = tf.train.latest_checkpoint(directory)
print('Loading policy from %s' % checkpoint_filename)
checkpoint.restore(checkpoint_filename).assert_existing_objects_matched()
policy = policy.wrapped_policy
(target_policy, env) = (policy, env)
directory = os.path.join(data_load_dir, 'yifan_%s_%s' % (env_name, data_name))
print('Loading dataset.')
onpolicy_dataset = TFAgentsOnpolicyDataset(env, target_policy, 1000)
write_dataset = TFOffpolicyDataset(onpolicy_dataset.spec)
batch_size = 20
num_trajectory = 10
for batch_num in range(1 + (num_trajectory - 1) // batch_size):
print(batch_num)
num_trajectory_after_batch = min(num_trajectory, batch_size * (batch_num + 1))
num_trajectory_to_get = num_trajectory_after_batch - batch_num * batch_size
(episodes, valid_steps) = onpolicy_dataset.get_episode(batch_size=num_trajectory_to_get)
num_episodes = 1 if tf.rank(valid_steps) == 1 else tf.shape(valid_steps)[0]
for ep_id in range(num_episodes):
if tf.rank(valid_steps) == 1:
this_valid_ids = valid_steps
this_episode = episodes
else:
this_valid_ids = valid_steps[ep_id, ...]
this_episode = tf.nest.map_structure(lambda t: t[ep_id, ...], episodes)
episode_length = tf.shape(this_valid_ids)[0]
for step_id in range(episode_length):
this_valid_id = this_valid_ids[step_id]
this_step = tf.nest.map_structure(lambda t: t[step_id, ...], this_episode)
if this_valid_id:
write_dataset.add_step(this_step)
dataset = write_dataset
'\n dataset = Dataset.load(directory)\n '
all_steps = dataset.get_all_steps()
max_reward = tf.reduce_max(all_steps.reward)
min_reward = tf.reduce_min(all_steps.reward)
print('num loaded steps', dataset.num_steps)
print('num loaded total steps', dataset.num_total_steps)
print('num loaded episodes', dataset.num_episodes)
print('num loaded total episodes', dataset.num_total_episodes)
print('min reward', min_reward, 'max reward', max_reward)
estimate = estimator_lib.get_fullbatch_average(dataset, gamma=gamma)
print('data per step avg', estimate)
dataset = PerturbedDataset(dataset, num_perturbations=None, perturbation_scale=1.0)
value_network = ValueNetwork((dataset.spec.observation, dataset.spec.action), fc_layer_params=(64, 64), output_dim=None)
optimizer = tf.keras.optimizers.Adam(learning_rate)
estimator = NeuralQLearning(dataset.spec, value_network, optimizer, gamma, num_qvalues=None, num_samples=1)
for step in range(num_steps):
batch = dataset.get_step(batch_size, num_steps=nstep_returns + 1)
(loss, _) = estimator.train_step(batch, target_policy)
if step % 100 == 0 or step == num_steps - 1:
print('step', step, 'loss', loss)
estimate = estimator.estimate_average_reward(dataset, target_policy)
print('estimated per step avg', estimate)
print('Done!')
|
dice_rl
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.