before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def __init__(self, image, mount_point=None):
"""
Raises CommandDoesNotExistException if the command is not present on the system.
:param image: instance of NspawnImage
:param mount_point: str, directory where the filesystem will be mounted
"""
<DeepExtract>
command_exists('losetup', ['losetup', '-V'], 'losetup is not present on your system')
command_exists('partprobe', ['partprobe', '-v'], 'partprobe is not present on your system')
command_exists('mount', ['mount', '-V'], 'mount is not present on your system')
</DeepExtract>
self.mount_point_exists = False
super(NspawnImageFS, self).__init__(image, mount_point=mount_point)
self.image = image
|
def __init__(self, image, mount_point=None):
"""
Raises CommandDoesNotExistException if the command is not present on the system.
:param image: instance of NspawnImage
:param mount_point: str, directory where the filesystem will be mounted
"""
command_exists('losetup', ['losetup', '-V'], 'losetup is not present on your system')
command_exists('partprobe', ['partprobe', '-v'], 'partprobe is not present on your system')
command_exists('mount', ['mount', '-V'], 'mount is not present on your system')
self.mount_point_exists = False
super(NspawnImageFS, self).__init__(image, mount_point=mount_point)
self.image = image
|
conu
|
positive
|
def __init__(self, actor, critic, a_lr, c_lr, discount=0.99, tau=0.001, center_reward=False, normalize=False):
if actor.is_recurrent or critic.is_recurrent:
self.recurrent = True
else:
self.recurrent = False
self.behavioral_actor = actor
self.behavioral_critic = critic
self.target_actor = copy.deepcopy(actor)
self.target_critic = copy.deepcopy(critic)
<DeepExtract>
for (param, target_param) in zip(self.behavioral_critic.parameters(), self.target_critic.parameters()):
target_param.data.copy_(1.0 * param.data + (1 - 1.0) * target_param.data)
for (param, target_param) in zip(self.behavioral_actor.parameters(), self.target_actor.parameters()):
target_param.data.copy_(1.0 * param.data + (1 - 1.0) * target_param.data)
</DeepExtract>
self.actor_optimizer = torch.optim.Adam(self.behavioral_actor.parameters(), lr=a_lr)
self.critic_optimizer = torch.optim.Adam(self.behavioral_critic.parameters(), lr=c_lr, weight_decay=0.01)
self.discount = discount
self.tau = tau
self.center_reward = center_reward
self.normalize = normalize
|
def __init__(self, actor, critic, a_lr, c_lr, discount=0.99, tau=0.001, center_reward=False, normalize=False):
if actor.is_recurrent or critic.is_recurrent:
self.recurrent = True
else:
self.recurrent = False
self.behavioral_actor = actor
self.behavioral_critic = critic
self.target_actor = copy.deepcopy(actor)
self.target_critic = copy.deepcopy(critic)
for (param, target_param) in zip(self.behavioral_critic.parameters(), self.target_critic.parameters()):
target_param.data.copy_(1.0 * param.data + (1 - 1.0) * target_param.data)
for (param, target_param) in zip(self.behavioral_actor.parameters(), self.target_actor.parameters()):
target_param.data.copy_(1.0 * param.data + (1 - 1.0) * target_param.data)
self.actor_optimizer = torch.optim.Adam(self.behavioral_actor.parameters(), lr=a_lr)
self.critic_optimizer = torch.optim.Adam(self.behavioral_critic.parameters(), lr=c_lr, weight_decay=0.01)
self.discount = discount
self.tau = tau
self.center_reward = center_reward
self.normalize = normalize
|
apex
|
positive
|
def test(self, batch_size, criterion_nlu, criterion_nlg, test_nlu=True, test_nlg=True, sample_size=20, epoch=-1):
nlu_loss = nlg_loss = None
nlu_scorer = nlg_scorer = None
batch_amount = 0
if test_nlu:
nlu_scorer = IntentPredSlotFillScorer(intent_acc=self.with_intent)
nlu_loss = 0
for (b_idx, batch) in enumerate(tqdm(self.test_nlu_data_loader)):
with torch.no_grad():
<DeepExtract>
self.nlu.eval()
inputs = batch['inputs'].to(self.device)
targets = batch['labels'].clone().detach().to(self.device)
(slot_logits, outputs, decisions, intent_logits) = self.nlu(inputs, _BOS, labels=targets, beam_size=sample_size, tf_ratio=teacher_forcing_ratio if not True else 0.0)
intent_prediction = intent_logits.argmax(-1)
if self.dual_inference is not None:
(best, best_intent) = self.dual_inference.forward_nlu(inputs, slot_logits, decisions, intent_logits, intent_prediction, self.nlg)
else:
best = decisions[:, 0].detach()
best_intent = intent_logits.detach()
outputs_indices = best.cpu().clone().numpy()
slot_prediction = outputs_indices
if self.with_intent:
intent_prediction = best_intent.cpu()
intent_prediction = intent_prediction.clone().numpy()
if nlu_scorer:
targets_clone = targets.detach().cpu().long().numpy()
targets_clone = [self.train_data_engine.untokenize_nlu_slot_seq(target) for target in targets_clone]
slot_prediction = [self.train_data_engine.untokenize_nlu_slot_seq(prediction) for prediction in slot_prediction]
nlu_scorer.update(targets_clone, slot_prediction, intent_labels=batch['intent'].clone().cpu().numpy() if self.with_intent else None, intent_prediction=intent_prediction if self.with_intent else None)
if True and result_path:
self._record_nlu_test_result(result_path, inputs, targets_clone, slot_prediction)
(batch_loss, batch_logits, slot_prediction, intent_logits, intent_prediction, _, _, _) = (torch.tensor([0]), slot_logits, outputs_indices, intent_logits, intent_prediction, None, 0, 0)
</DeepExtract>
nlu_loss += batch_loss.item()
batch_amount += 1
nlu_loss /= batch_amount
nlu_scorer.print_avg_scores()
batch_amount = 0
if test_nlg:
nlg_scorer = SequenceScorer()
nlg_loss = 0
for (b_idx, batch) in enumerate(tqdm(self.test_nlg_data_loader)):
with torch.no_grad():
<DeepExtract>
self.nlg.eval()
attrs = (batch['slot_key'].clone().detach().to(self.device), torch.tensor(batch['slot_key_lens']).to(self.device), batch['slot_value'].clone().detach().to(self.device), torch.tensor(batch['slot_value_lens']).to(self.device), batch['intent'].clone().detach().to(self.device))
labels = batch['target'].clone().detach().to(self.device)
refs = batch['multi_refs']
nlu_slot_seqs = batch['dual_nlu_labels'].clone().detach().to(self.device)
(logits, outputs, decisions, semantic_embs) = self.nlg(attrs, _BOS, labels, beam_size=sample_size, tf_ratio=0.0, st=nlg_st)
(batch_size, _, seq_length, vocab_size) = logits.size()
if self.dual_inference is not None:
best = self.dual_inference.forward_nlg(attrs, logits, decisions, nlu_slot_seqs, sample_size, self.nlu)
else:
best = decisions[:, 0].detach()
outputs_indices = best.cpu().clone().numpy()
outputs_indices = np.argmax(outputs_indices, axis=-1)
if nlg_scorer:
labels_clone = labels.detach().cpu().numpy()
nlg_scorer.update(labels_clone, refs, outputs_indices)
(sup_loss, rl_loss, nlg_joint_prob, reward) = criterion_nlg(logits.cpu(), labels.cpu(), decisions=decisions.cpu(), n_supervise=1, calculate_reward=reinforce)
has_rl = isinstance(rl_loss, torch.Tensor)
if not True:
if supervised and isinstance(sup_loss, torch.Tensor):
sup_loss.backward(retain_graph=retain_graph or has_rl or self.dim_loss)
if reinforce and has_rl:
rl_loss.backward(retain_graph=retain_graph)
if optimize and (not self.dim_loss):
if max_norm:
clip_grad_norm_(self.nlg_parameters, max_norm)
self.nlg_optimizer.step()
self.nlg_optimizer.zero_grad()
(batch_loss, batch_logits, batch_decode_result, _, _, _, _) = (sup_loss + rl_loss, logits, outputs, nlg_joint_prob, reward, attrs, semantic_embs)
</DeepExtract>
nlg_loss += batch_loss.item()
batch_amount += 1
nlg_loss /= batch_amount
nlg_scorer.print_avg_scores()
<DeepExtract>
filename = self.valid_log_path if True else self.train_log_path
nlu_loss = 'None' if nlu_loss is None else '{:.4f}'.format(nlu_loss)
nlg_loss = 'None' if nlg_loss is None else '{:.3f}'.format(nlg_loss)
if nlu_scorer is not None:
(intent_acc, slot_f1_p_r) = nlu_scorer.get_avg_scores()
intent_acc = '{:.4f}'.format(intent_acc)
slot_f1 = '{:.4f}'.format(slot_f1_p_r[0])
else:
slot_f1 = '-1.0'
if nlg_scorer is not None:
(_, bleu, _, rouge, _, _) = nlg_scorer.get_avg_scores()
bleu = '{:.4f}'.format(bleu)
rouge = ' '.join(['{:.4f}'.format(s) for s in rouge])
else:
(bleu, rouge) = ('-1.0', '-1.0 -1.0 -1.0')
with open(filename, 'a') as file:
file.write(f'{epoch},{nlu_loss},{nlg_loss},{intent_acc},{slot_f1},{bleu},{rouge}\n')
</DeepExtract>
if test_nlg:
nlg_scorer.write_avg_scores_to_file(self.test_result_path)
if test_nlu:
nlu_scorer.write_avg_scores_to_file(self.test_result_path)
|
def test(self, batch_size, criterion_nlu, criterion_nlg, test_nlu=True, test_nlg=True, sample_size=20, epoch=-1):
nlu_loss = nlg_loss = None
nlu_scorer = nlg_scorer = None
batch_amount = 0
if test_nlu:
nlu_scorer = IntentPredSlotFillScorer(intent_acc=self.with_intent)
nlu_loss = 0
for (b_idx, batch) in enumerate(tqdm(self.test_nlu_data_loader)):
with torch.no_grad():
self.nlu.eval()
inputs = batch['inputs'].to(self.device)
targets = batch['labels'].clone().detach().to(self.device)
(slot_logits, outputs, decisions, intent_logits) = self.nlu(inputs, _BOS, labels=targets, beam_size=sample_size, tf_ratio=teacher_forcing_ratio if not True else 0.0)
intent_prediction = intent_logits.argmax(-1)
if self.dual_inference is not None:
(best, best_intent) = self.dual_inference.forward_nlu(inputs, slot_logits, decisions, intent_logits, intent_prediction, self.nlg)
else:
best = decisions[:, 0].detach()
best_intent = intent_logits.detach()
outputs_indices = best.cpu().clone().numpy()
slot_prediction = outputs_indices
if self.with_intent:
intent_prediction = best_intent.cpu()
intent_prediction = intent_prediction.clone().numpy()
if nlu_scorer:
targets_clone = targets.detach().cpu().long().numpy()
targets_clone = [self.train_data_engine.untokenize_nlu_slot_seq(target) for target in targets_clone]
slot_prediction = [self.train_data_engine.untokenize_nlu_slot_seq(prediction) for prediction in slot_prediction]
nlu_scorer.update(targets_clone, slot_prediction, intent_labels=batch['intent'].clone().cpu().numpy() if self.with_intent else None, intent_prediction=intent_prediction if self.with_intent else None)
if True and result_path:
self._record_nlu_test_result(result_path, inputs, targets_clone, slot_prediction)
(batch_loss, batch_logits, slot_prediction, intent_logits, intent_prediction, _, _, _) = (torch.tensor([0]), slot_logits, outputs_indices, intent_logits, intent_prediction, None, 0, 0)
nlu_loss += batch_loss.item()
batch_amount += 1
nlu_loss /= batch_amount
nlu_scorer.print_avg_scores()
batch_amount = 0
if test_nlg:
nlg_scorer = SequenceScorer()
nlg_loss = 0
for (b_idx, batch) in enumerate(tqdm(self.test_nlg_data_loader)):
with torch.no_grad():
self.nlg.eval()
attrs = (batch['slot_key'].clone().detach().to(self.device), torch.tensor(batch['slot_key_lens']).to(self.device), batch['slot_value'].clone().detach().to(self.device), torch.tensor(batch['slot_value_lens']).to(self.device), batch['intent'].clone().detach().to(self.device))
labels = batch['target'].clone().detach().to(self.device)
refs = batch['multi_refs']
nlu_slot_seqs = batch['dual_nlu_labels'].clone().detach().to(self.device)
(logits, outputs, decisions, semantic_embs) = self.nlg(attrs, _BOS, labels, beam_size=sample_size, tf_ratio=0.0, st=nlg_st)
(batch_size, _, seq_length, vocab_size) = logits.size()
if self.dual_inference is not None:
best = self.dual_inference.forward_nlg(attrs, logits, decisions, nlu_slot_seqs, sample_size, self.nlu)
else:
best = decisions[:, 0].detach()
outputs_indices = best.cpu().clone().numpy()
outputs_indices = np.argmax(outputs_indices, axis=-1)
if nlg_scorer:
labels_clone = labels.detach().cpu().numpy()
nlg_scorer.update(labels_clone, refs, outputs_indices)
(sup_loss, rl_loss, nlg_joint_prob, reward) = criterion_nlg(logits.cpu(), labels.cpu(), decisions=decisions.cpu(), n_supervise=1, calculate_reward=reinforce)
has_rl = isinstance(rl_loss, torch.Tensor)
if not True:
if supervised and isinstance(sup_loss, torch.Tensor):
sup_loss.backward(retain_graph=retain_graph or has_rl or self.dim_loss)
if reinforce and has_rl:
rl_loss.backward(retain_graph=retain_graph)
if optimize and (not self.dim_loss):
if max_norm:
clip_grad_norm_(self.nlg_parameters, max_norm)
self.nlg_optimizer.step()
self.nlg_optimizer.zero_grad()
(batch_loss, batch_logits, batch_decode_result, _, _, _, _) = (sup_loss + rl_loss, logits, outputs, nlg_joint_prob, reward, attrs, semantic_embs)
nlg_loss += batch_loss.item()
batch_amount += 1
nlg_loss /= batch_amount
nlg_scorer.print_avg_scores()
filename = self.valid_log_path if True else self.train_log_path
nlu_loss = 'None' if nlu_loss is None else '{:.4f}'.format(nlu_loss)
nlg_loss = 'None' if nlg_loss is None else '{:.3f}'.format(nlg_loss)
if nlu_scorer is not None:
(intent_acc, slot_f1_p_r) = nlu_scorer.get_avg_scores()
intent_acc = '{:.4f}'.format(intent_acc)
slot_f1 = '{:.4f}'.format(slot_f1_p_r[0])
else:
slot_f1 = '-1.0'
if nlg_scorer is not None:
(_, bleu, _, rouge, _, _) = nlg_scorer.get_avg_scores()
bleu = '{:.4f}'.format(bleu)
rouge = ' '.join(['{:.4f}'.format(s) for s in rouge])
else:
(bleu, rouge) = ('-1.0', '-1.0 -1.0 -1.0')
with open(filename, 'a') as file:
file.write(f'{epoch},{nlu_loss},{nlg_loss},{intent_acc},{slot_f1},{bleu},{rouge}\n')
if test_nlg:
nlg_scorer.write_avg_scores_to_file(self.test_result_path)
if test_nlu:
nlu_scorer.write_avg_scores_to_file(self.test_result_path)
|
DuaLUG
|
positive
|
def test_removed_reduce(self):
"""Builtin reduce is removed - moved to functools."""
code = 'reduce(lambda x, y: x + y, [1, 2, 3, 4, 5])'
new_code = 'from functools import reduce\n' + code
sugg = "'reduce' from functools (not imported)"
<DeepExtract>
(before, after) = (up_to_version((3, 0)), from_version((3, 0)))
</DeepExtract>
<DeepExtract>
details = 'Running following code :\n---\n{0}\n---'.format(code)
if PythonEnvRange(before, interpreters).contains_current_env():
exc = get_exception(code)
self.assertTrue(exc is None, 'Exc thrown : ' + str(exc) + details)
</DeepExtract>
<DeepExtract>
sugg = sorted(listify(sugg, [], str))
(error_type, error_msg) = NAMEERROR
details = 'Running following code :\n---\n{0}\n---'.format(code)
if PythonEnvRange(after, interpreters).contains_current_env():
exc = get_exception(code)
self.assertFalse(exc is None, 'No exc thrown.' + details)
(type_caught, value, traceback) = exc
suggestions = sorted(get_suggestions_for_exception(value, traceback))
self.log_exception(code, exc, suggestions)
self.assertTrue(isinstance(value, type_caught))
self.assertTrue(issubclass(type_caught, error_type), '{0} ({1}) not a subclass of {2}'.format(type_caught, value, error_type) + details)
msg = next((a for a in value.args if isinstance(a, str)), '')
if error_msg:
self.assertRegexp(msg, error_msg, details)
self.assertEqual(suggestions, sugg, details)
</DeepExtract>
<DeepExtract>
details = 'Running following code :\n---\n{0}\n---'.format(new_code)
if PythonEnvRange(after, interpreters).contains_current_env():
exc = get_exception(new_code)
self.assertTrue(exc is None, 'Exc thrown : ' + str(exc) + details)
</DeepExtract>
|
def test_removed_reduce(self):
"""Builtin reduce is removed - moved to functools."""
code = 'reduce(lambda x, y: x + y, [1, 2, 3, 4, 5])'
new_code = 'from functools import reduce\n' + code
sugg = "'reduce' from functools (not imported)"
(before, after) = (up_to_version((3, 0)), from_version((3, 0)))
details = 'Running following code :\n---\n{0}\n---'.format(code)
if PythonEnvRange(before, interpreters).contains_current_env():
exc = get_exception(code)
self.assertTrue(exc is None, 'Exc thrown : ' + str(exc) + details)
sugg = sorted(listify(sugg, [], str))
(error_type, error_msg) = NAMEERROR
details = 'Running following code :\n---\n{0}\n---'.format(code)
if PythonEnvRange(after, interpreters).contains_current_env():
exc = get_exception(code)
self.assertFalse(exc is None, 'No exc thrown.' + details)
(type_caught, value, traceback) = exc
suggestions = sorted(get_suggestions_for_exception(value, traceback))
self.log_exception(code, exc, suggestions)
self.assertTrue(isinstance(value, type_caught))
self.assertTrue(issubclass(type_caught, error_type), '{0} ({1}) not a subclass of {2}'.format(type_caught, value, error_type) + details)
msg = next((a for a in value.args if isinstance(a, str)), '')
if error_msg:
self.assertRegexp(msg, error_msg, details)
self.assertEqual(suggestions, sugg, details)
details = 'Running following code :\n---\n{0}\n---'.format(new_code)
if PythonEnvRange(after, interpreters).contains_current_env():
exc = get_exception(new_code)
self.assertTrue(exc is None, 'Exc thrown : ' + str(exc) + details)
</DeepExtract>
|
DidYouMean-Python
|
positive
|
def test_get_z(self):
<DeepExtract>
self = Mpfr_t()
mpfr_init2(self, 20)
x = self
</DeepExtract>
mpfr_const_pi(x, MPFR_RNDN)
z = Mpz_t()
rv = mpfr_get_z(z, x, MPFR_RNDD)
self.assertEqual(mpz_get_str(10, z), '3')
self.assertLess(rv, 0)
rv = mpfr_get_z(z, x, MPFR_RNDU)
self.assertEqual(mpz_get_str(10, z), '4')
self.assertGreater(rv, 0)
mpfr_set_d(x, 123.0, MPFR_RNDN)
rv = mpfr_get_z(z, x, MPFR_RNDU)
self.assertEqual(mpz_get_str(10, z), '123')
self.assertEqual(rv, 0)
for x in [posinf(), neginf(), nan()]:
mpfr_clear_flags()
rv = mpfr_get_z(z, x, MPFR_RNDN)
self.assertEqual(mpfr_flags_save(), MPFR_FLAGS_ERANGE)
self.assertEqual(rv, 0)
self.assertEqual(mpz_get_str(10, z), '0')
|
def test_get_z(self):
self = Mpfr_t()
mpfr_init2(self, 20)
x = self
mpfr_const_pi(x, MPFR_RNDN)
z = Mpz_t()
rv = mpfr_get_z(z, x, MPFR_RNDD)
self.assertEqual(mpz_get_str(10, z), '3')
self.assertLess(rv, 0)
rv = mpfr_get_z(z, x, MPFR_RNDU)
self.assertEqual(mpz_get_str(10, z), '4')
self.assertGreater(rv, 0)
mpfr_set_d(x, 123.0, MPFR_RNDN)
rv = mpfr_get_z(z, x, MPFR_RNDU)
self.assertEqual(mpz_get_str(10, z), '123')
self.assertEqual(rv, 0)
for x in [posinf(), neginf(), nan()]:
mpfr_clear_flags()
rv = mpfr_get_z(z, x, MPFR_RNDN)
self.assertEqual(mpfr_flags_save(), MPFR_FLAGS_ERANGE)
self.assertEqual(rv, 0)
self.assertEqual(mpz_get_str(10, z), '0')
|
bigfloat
|
positive
|
@patch('deepparse.parser.address_parser.DataLoader')
@patch('deepparse.parser.address_parser.Experiment')
@patch('deepparse.parser.address_parser.SGD')
@patch('deepparse.parser.address_parser.ModelFactory')
@patch('deepparse.parser.address_parser.EmbeddingsModelFactory')
@patch('deepparse.parser.address_parser.VectorizerFactory')
@patch('deepparse.parser.address_parser.DataProcessorFactory')
@patch('deepparse.parser.address_parser.DataPadder')
def test_givenABPEmbModel_whenTestCPU_thenInstantiateExperimentProperly(self, data_padder_mock, data_processor_factory_mock, vectorizer_factory_mock, embeddings_model_factory_mock, model_factory_mock, optimizer_mock, experiment_mock, data_loader_mock):
model_factory_mock().create.return_value = self.model_mock
self.address_parser = AddressParser(model_type=self.a_bpemb_model_type, device=self.a_device, verbose=self.verbose)
<DeepExtract>
if dataset_container is None:
dataset_container = self.mocked_data_container
if num_workers is None:
num_workers = self.a_number_of_workers
self.address_parser.test(dataset_container, self.a_batch_size, num_workers=num_workers, callbacks=self.a_callbacks_list, seed=self.a_seed)
</DeepExtract>
<DeepExtract>
experiment_mock.assert_called_with('./checkpoint', self.model_mock, device=self.a_device, loss_function=nll_loss, batch_metrics=[accuracy], logging=False)
</DeepExtract>
|
@patch('deepparse.parser.address_parser.DataLoader')
@patch('deepparse.parser.address_parser.Experiment')
@patch('deepparse.parser.address_parser.SGD')
@patch('deepparse.parser.address_parser.ModelFactory')
@patch('deepparse.parser.address_parser.EmbeddingsModelFactory')
@patch('deepparse.parser.address_parser.VectorizerFactory')
@patch('deepparse.parser.address_parser.DataProcessorFactory')
@patch('deepparse.parser.address_parser.DataPadder')
def test_givenABPEmbModel_whenTestCPU_thenInstantiateExperimentProperly(self, data_padder_mock, data_processor_factory_mock, vectorizer_factory_mock, embeddings_model_factory_mock, model_factory_mock, optimizer_mock, experiment_mock, data_loader_mock):
model_factory_mock().create.return_value = self.model_mock
self.address_parser = AddressParser(model_type=self.a_bpemb_model_type, device=self.a_device, verbose=self.verbose)
if dataset_container is None:
dataset_container = self.mocked_data_container
if num_workers is None:
num_workers = self.a_number_of_workers
self.address_parser.test(dataset_container, self.a_batch_size, num_workers=num_workers, callbacks=self.a_callbacks_list, seed=self.a_seed)
experiment_mock.assert_called_with('./checkpoint', self.model_mock, device=self.a_device, loss_function=nll_loss, batch_metrics=[accuracy], logging=False)
</DeepExtract>
|
deepparse
|
positive
|
def simple_test(self, img, img_metas, rescale=False):
<DeepExtract>
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
x = x
</DeepExtract>
outs = self.bbox_head(x)
bbox_inputs = outs + (img_metas, self.test_cfg, rescale)
bbox_list = self.bbox_head.get_bboxes(*bbox_inputs)
bbox_results = [bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) for (det_bboxes, det_labels) in bbox_list]
return bbox_results[0]
|
def simple_test(self, img, img_metas, rescale=False):
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
x = x
outs = self.bbox_head(x)
bbox_inputs = outs + (img_metas, self.test_cfg, rescale)
bbox_list = self.bbox_head.get_bboxes(*bbox_inputs)
bbox_results = [bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) for (det_bboxes, det_labels) in bbox_list]
return bbox_results[0]
|
ATSS-EfficientDet-PyTorch
|
positive
|
def assignments(self, assignment_type=None):
""" Returns the assignments (and problems) for the represented course. """
assignment_type_name = None if assignment_type is None else assignment_type['name']
<DeepExtract>
assignment_type_key = sanitize_cache_key(f"{self.course_id}_{f'assignments_{assignment_type_name}'}")
</DeepExtract>
assignments = cache.get(assignment_type_key)
if not assignments:
<DeepExtract>
all_assignments_key = sanitize_cache_key(f"{self.course_id}_{'assignments'}")
</DeepExtract>
assignments = cache.get(all_assignments_key)
if not assignments:
structure = self._get_structure()
assignments = CourseStructure.course_structure_to_assignments(structure, graded=True, assignment_type=None)
cache.set(all_assignments_key, assignments)
if assignment_type:
assignment_type['name'] = assignment_type['name'].lower()
assignments = [assignment for assignment in assignments if assignment['assignment_type'].lower() == assignment_type['name']]
self.add_child_data_to_parent_blocks(assignments, self._build_graded_answer_distribution_url)
self.attach_data_to_parents(assignments, self._build_assignment_url)
cache.set(assignment_type_key, assignments)
return assignments
|
def assignments(self, assignment_type=None):
""" Returns the assignments (and problems) for the represented course. """
assignment_type_name = None if assignment_type is None else assignment_type['name']
assignment_type_key = sanitize_cache_key(f"{self.course_id}_{f'assignments_{assignment_type_name}'}")
assignments = cache.get(assignment_type_key)
if not assignments:
all_assignments_key = sanitize_cache_key(f"{self.course_id}_{'assignments'}")
assignments = cache.get(all_assignments_key)
if not assignments:
structure = self._get_structure()
assignments = CourseStructure.course_structure_to_assignments(structure, graded=True, assignment_type=None)
cache.set(all_assignments_key, assignments)
if assignment_type:
assignment_type['name'] = assignment_type['name'].lower()
assignments = [assignment for assignment in assignments if assignment['assignment_type'].lower() == assignment_type['name']]
self.add_child_data_to_parent_blocks(assignments, self._build_graded_answer_distribution_url)
self.attach_data_to_parents(assignments, self._build_assignment_url)
cache.set(assignment_type_key, assignments)
return assignments
|
edx-analytics-dashboard
|
positive
|
def __init__(self):
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
self.Worker_L2I = worker_L2I(args, train_data_iterator.get_num_labels(), train_data_iterator.get_observation_size())
self.Worker_I2L = worker_I2L(args)
self.image_LM = [tf.placeholder(tf.float32, shape=(args.batch_size,)) for _ in range(args.nr_gpu)]
self.trade_off_I2L = tf.placeholder(tf.float32, shape=())
self.trade_off_L2I = tf.placeholder(tf.float32, shape=())
self.I2L_grads = []
self.train_uidx = 0
<DeepExtract>
self.nlls_I2L = self.Worker_I2L.model.nlls
self.soft_labels = self.Worker_I2L.model.predictions
(nlls_L2I, loss_gen_test) = self.Worker_L2I.GetLoss()
(nlls_L2I_train_bpd_list, nlls_L2I_test_bpd_list, consistent_loss_list) = ([None for _ in xrange(args.nr_gpu)], [None for _ in xrange(args.nr_gpu)], [None for _ in xrange(args.nr_gpu)])
(overall_cost_I2L_list, overall_cost_L2I_list, nlls_I2L_batchMean_list) = ([None for _ in xrange(args.nr_gpu)], [None for _ in xrange(args.nr_gpu)], [None for _ in xrange(args.nr_gpu)])
(grads_I2L_list, grads_L2I_list) = ([None for _ in xrange(args.nr_gpu)], [None for _ in xrange(args.nr_gpu)])
for i in range(args.nr_gpu):
with tf.device('/gpu:%d' % i):
nlls_L2I_train_bpd = tf.reduce_mean(nlls_L2I[i]) / (np.log(2.0) * 32 * 32 * 3)
nlls_L2I_test_bpd = tf.reduce_mean(loss_gen_test[i]) / (np.log(2.0) * 32 * 32 * 3 * args.batch_size)
if args.L2I_normalization:
consistent_loss = tf.reduce_mean((self.image_LM[i] * np.log(2.0) + self.nlls_I2L[i] + tf.log(0.1) - nlls_L2I[i] / (32.0 * 32 * 3)) ** 2.0)
else:
consistent_loss = tf.reduce_mean((self.image_LM[i] * np.log(2.0) + (self.nlls_I2L[i] + tf.log(0.1) - nlls_L2I[i]) / 3072.0 + args.bias) ** 2.0)
nlls_L2I_train_bpd_list[i] = nlls_L2I_train_bpd
nlls_L2I_test_bpd_list[i] = nlls_L2I_test_bpd
consistent_loss_list[i] = consistent_loss
nlls_I2L_batchMean = tf.reduce_mean(self.nlls_I2L[i])
overall_cost_I2L = nlls_I2L_batchMean + self.trade_off_I2L ** 2.0 * consistent_loss
overall_cost_L2I = nlls_L2I_train_bpd + self.trade_off_L2I ** 2.0 * consistent_loss
nlls_I2L_batchMean_list[i] = nlls_I2L_batchMean
overall_cost_I2L_list[i] = overall_cost_I2L
overall_cost_L2I_list[i] = overall_cost_L2I
if args.oneside is None:
grads_I2L_list[i] = tf.gradients(overall_cost_I2L, self.Worker_I2L.model.trainable_variables)
grads_L2I_list[i] = tf.gradients(overall_cost_L2I, self.Worker_L2I.all_params)
elif args.oneside == 'I2L':
grads_I2L_list[i] = tf.gradients(overall_cost_I2L, self.Worker_I2L.model.trainable_variables)
elif args.oneside == 'L2I':
grads_L2I_list[i] = tf.gradients(overall_cost_L2I, self.Worker_L2I.all_params)
with tf.device('/gpu:0'):
for i in range(1, args.nr_gpu):
nlls_L2I_train_bpd_list[0] += nlls_L2I_train_bpd_list[i]
nlls_L2I_test_bpd_list[0] += nlls_L2I_test_bpd_list[i]
consistent_loss_list[0] += consistent_loss_list[i]
overall_cost_I2L_list[0] += overall_cost_I2L_list[i]
overall_cost_L2I_list[0] += overall_cost_L2I_list[i]
nlls_I2L_batchMean_list[0] += nlls_I2L_batchMean_list[i]
if args.oneside != 'L2I':
for j in range(len(grads_I2L_list[0])):
grads_I2L_list[0][j] += grads_I2L_list[i][j]
if args.oneside != 'I2L':
for j in range(len(grads_L2I_list[0])):
grads_L2I_list[0][j] += grads_L2I_list[i][j]
if args.oneside != 'L2I':
for j in range(len(grads_I2L_list[0])):
grads_I2L_list[0][j] /= args.nr_gpu * 1.0
if args.oneside != 'I2L':
for j in range(len(grads_L2I_list[0])):
grads_L2I_list[0][j] /= args.nr_gpu * 1.0
if args.oneside is None:
self.Worker_I2L.model.Update(grads_I2L_list[0])
self.Worker_L2I.Update(grads_L2I_list[0])
elif args.oneside == 'I2L':
self.Worker_I2L.model.Update(grads_I2L_list[0])
elif args.oneside == 'L2I':
self.Worker_L2I.Update(grads_L2I_list[0])
self.nlls_L2I_train_bpd = nlls_L2I_train_bpd_list[0] / args.nr_gpu
self.nlls_L2I_test_bpd = nlls_L2I_test_bpd_list[0] / args.nr_gpu
self.consistent_loss = consistent_loss_list[0] / args.nr_gpu
self.nlls_I2L_batchMean = nlls_I2L_batchMean_list[0] / args.nr_gpu
self.overall_cost_I2L = overall_cost_I2L_list[0] / args.nr_gpu
self.overall_cost_L2I = overall_cost_L2I_list[0] / args.nr_gpu
self.Worker_L2I.build_sample_from_model()
</DeepExtract>
self.lr_l2i = self.Worker_L2I.args.learning_rate
self.current_epoch = 0
self.assign_op = lambda ref_, val_: tf.assign(ref_, val_)
|
def __init__(self):
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
self.Worker_L2I = worker_L2I(args, train_data_iterator.get_num_labels(), train_data_iterator.get_observation_size())
self.Worker_I2L = worker_I2L(args)
self.image_LM = [tf.placeholder(tf.float32, shape=(args.batch_size,)) for _ in range(args.nr_gpu)]
self.trade_off_I2L = tf.placeholder(tf.float32, shape=())
self.trade_off_L2I = tf.placeholder(tf.float32, shape=())
self.I2L_grads = []
self.train_uidx = 0
self.nlls_I2L = self.Worker_I2L.model.nlls
self.soft_labels = self.Worker_I2L.model.predictions
(nlls_L2I, loss_gen_test) = self.Worker_L2I.GetLoss()
(nlls_L2I_train_bpd_list, nlls_L2I_test_bpd_list, consistent_loss_list) = ([None for _ in xrange(args.nr_gpu)], [None for _ in xrange(args.nr_gpu)], [None for _ in xrange(args.nr_gpu)])
(overall_cost_I2L_list, overall_cost_L2I_list, nlls_I2L_batchMean_list) = ([None for _ in xrange(args.nr_gpu)], [None for _ in xrange(args.nr_gpu)], [None for _ in xrange(args.nr_gpu)])
(grads_I2L_list, grads_L2I_list) = ([None for _ in xrange(args.nr_gpu)], [None for _ in xrange(args.nr_gpu)])
for i in range(args.nr_gpu):
with tf.device('/gpu:%d' % i):
nlls_L2I_train_bpd = tf.reduce_mean(nlls_L2I[i]) / (np.log(2.0) * 32 * 32 * 3)
nlls_L2I_test_bpd = tf.reduce_mean(loss_gen_test[i]) / (np.log(2.0) * 32 * 32 * 3 * args.batch_size)
if args.L2I_normalization:
consistent_loss = tf.reduce_mean((self.image_LM[i] * np.log(2.0) + self.nlls_I2L[i] + tf.log(0.1) - nlls_L2I[i] / (32.0 * 32 * 3)) ** 2.0)
else:
consistent_loss = tf.reduce_mean((self.image_LM[i] * np.log(2.0) + (self.nlls_I2L[i] + tf.log(0.1) - nlls_L2I[i]) / 3072.0 + args.bias) ** 2.0)
nlls_L2I_train_bpd_list[i] = nlls_L2I_train_bpd
nlls_L2I_test_bpd_list[i] = nlls_L2I_test_bpd
consistent_loss_list[i] = consistent_loss
nlls_I2L_batchMean = tf.reduce_mean(self.nlls_I2L[i])
overall_cost_I2L = nlls_I2L_batchMean + self.trade_off_I2L ** 2.0 * consistent_loss
overall_cost_L2I = nlls_L2I_train_bpd + self.trade_off_L2I ** 2.0 * consistent_loss
nlls_I2L_batchMean_list[i] = nlls_I2L_batchMean
overall_cost_I2L_list[i] = overall_cost_I2L
overall_cost_L2I_list[i] = overall_cost_L2I
if args.oneside is None:
grads_I2L_list[i] = tf.gradients(overall_cost_I2L, self.Worker_I2L.model.trainable_variables)
grads_L2I_list[i] = tf.gradients(overall_cost_L2I, self.Worker_L2I.all_params)
elif args.oneside == 'I2L':
grads_I2L_list[i] = tf.gradients(overall_cost_I2L, self.Worker_I2L.model.trainable_variables)
elif args.oneside == 'L2I':
grads_L2I_list[i] = tf.gradients(overall_cost_L2I, self.Worker_L2I.all_params)
with tf.device('/gpu:0'):
for i in range(1, args.nr_gpu):
nlls_L2I_train_bpd_list[0] += nlls_L2I_train_bpd_list[i]
nlls_L2I_test_bpd_list[0] += nlls_L2I_test_bpd_list[i]
consistent_loss_list[0] += consistent_loss_list[i]
overall_cost_I2L_list[0] += overall_cost_I2L_list[i]
overall_cost_L2I_list[0] += overall_cost_L2I_list[i]
nlls_I2L_batchMean_list[0] += nlls_I2L_batchMean_list[i]
if args.oneside != 'L2I':
for j in range(len(grads_I2L_list[0])):
grads_I2L_list[0][j] += grads_I2L_list[i][j]
if args.oneside != 'I2L':
for j in range(len(grads_L2I_list[0])):
grads_L2I_list[0][j] += grads_L2I_list[i][j]
if args.oneside != 'L2I':
for j in range(len(grads_I2L_list[0])):
grads_I2L_list[0][j] /= args.nr_gpu * 1.0
if args.oneside != 'I2L':
for j in range(len(grads_L2I_list[0])):
grads_L2I_list[0][j] /= args.nr_gpu * 1.0
if args.oneside is None:
self.Worker_I2L.model.Update(grads_I2L_list[0])
self.Worker_L2I.Update(grads_L2I_list[0])
elif args.oneside == 'I2L':
self.Worker_I2L.model.Update(grads_I2L_list[0])
elif args.oneside == 'L2I':
self.Worker_L2I.Update(grads_L2I_list[0])
self.nlls_L2I_train_bpd = nlls_L2I_train_bpd_list[0] / args.nr_gpu
self.nlls_L2I_test_bpd = nlls_L2I_test_bpd_list[0] / args.nr_gpu
self.consistent_loss = consistent_loss_list[0] / args.nr_gpu
self.nlls_I2L_batchMean = nlls_I2L_batchMean_list[0] / args.nr_gpu
self.overall_cost_I2L = overall_cost_I2L_list[0] / args.nr_gpu
self.overall_cost_L2I = overall_cost_L2I_list[0] / args.nr_gpu
self.Worker_L2I.build_sample_from_model()
self.lr_l2i = self.Worker_L2I.args.learning_rate
self.current_epoch = 0
self.assign_op = lambda ref_, val_: tf.assign(ref_, val_)
|
DualLearning
|
positive
|
def rotate_and_crop(cv_image, old_pos_cor, max_angle=0, random_crop=True, random_width=0.2, random_height=0.2, **kwargs):
"""
online crop image, rotate the text horizontally and crop image
Args:
cv_image (np.array): image
old_pos_cor (list): bounding box coordinate
max_angle (int): max rotate angle
random_crop (bool): whether to random crop, training stage sets True
random_width (int): random width, expand roi_width range (1+[0, random_width])*roi_width
random_height (int): random height, expand roi_height range(1+[0, random_height])*roi_height
**kwargs (None): backup parameter
Returns:
np.array: image after rotation and crop
"""
random_th = 0.3
pos_cor = old_pos_cor.copy()
<DeepExtract>
pi_num = 3.141592653
pos_cor = list(map(float, pos_cor))
x_1 = pos_cor[0]
y_1 = pos_cor[1]
x_2 = pos_cor[2]
y_2 = pos_cor[3]
x_x = x_2 - x_1
y_y = y_2 - y_1
if x_x == 0.0:
angle_temp = pi_num / 2.0
else:
angle_temp = math.atan(abs(y_y / x_x))
if x_x < 0.0 and y_y >= 0.0:
angle_temp = pi_num - angle_temp
elif x_x < 0.0 and y_y < 0.0:
angle_temp = pi_num + angle_temp
elif x_x >= 0.0 and y_y < 0.0:
angle_temp = pi_num * 2.0 - angle_temp
else:
angle_temp = math.atan(abs(y_y / x_x))
rotate_angle = angle_temp / pi_num * 180
</DeepExtract>
if random.random() > random_th:
random_angle = (random.random() * 2 - 1) * max_angle
rotate_angle += random_angle
(height, width) = cv_image.shape[:2]
<DeepExtract>
(x_1, y_1, x_2, y_2, x_3, y_3, x_4, y_4) = pos_cor
dst_x1 = max(0, min(x_1, x_2, x_3, x_4))
dst_y1 = max(0, min(y_1, y_2, y_3, y_4))
dst_x2 = min(width - 1, max(x_1, x_2, x_3, x_4))
dst_y2 = min(height - 1, max(y_1, y_2, y_3, y_4))
(dst_x, dst_y) = (dst_x1, dst_y1)
width = dst_x2 - dst_x1 + 1
height = dst_y2 - dst_y1 + 1
(roi_x, roi_y, roi_width, roi_height) = [dst_x, dst_y, width, height]
</DeepExtract>
roi_larger_x = int(max(0, roi_x - roi_width / 2))
roi_larger_y = int(max(0, roi_y - roi_height))
right_x = int(min(width, roi_x + roi_width / 2 * 3))
bottom_y = int(min(height, roi_y + 2 * roi_height))
roi_larger_img = cv_image[roi_larger_y:bottom_y, roi_larger_x:right_x, :]
for label_i in range(4):
pos_cor[2 * label_i] -= roi_larger_x
pos_cor[2 * label_i + 1] -= roi_larger_y
(roi_larger_h, roi_larger_w) = roi_larger_img.shape[:2]
radis = int(math.sqrt(roi_larger_h ** 2 + roi_larger_w ** 2)) + 2
larger_img_for_rotate = np.zeros((radis, radis, 3))
roi_x = int((radis - roi_larger_w) / 2)
roi_y = int((radis - roi_larger_h) / 2)
larger_img_for_rotate[roi_y:roi_y + roi_larger_h, roi_x:roi_x + roi_larger_w, :] = roi_larger_img
for label_i in range(4):
pos_cor[2 * label_i] += roi_x
pos_cor[2 * label_i + 1] += roi_y
(height, width) = larger_img_for_rotate.shape[:2]
rotate_matrix = cv2.getRotationMatrix2D((height / 2, width / 2), rotate_angle, 1)
rotate_img = cv2.warpAffine(larger_img_for_rotate, rotate_matrix, (height, width))
<DeepExtract>
box2 = [0] * 8
(box2[0], box2[1]) = trans_rot_affine(rotate_matrix, pos_cor[0], pos_cor[1])
(box2[2], box2[3]) = trans_rot_affine(rotate_matrix, pos_cor[2], pos_cor[3])
(box2[4], box2[5]) = trans_rot_affine(rotate_matrix, pos_cor[4], pos_cor[5])
(box2[6], box2[7]) = trans_rot_affine(rotate_matrix, pos_cor[6], pos_cor[7])
pos_cor = box2
</DeepExtract>
(height, width) = rotate_img.shape[:2]
<DeepExtract>
(x_1, y_1, x_2, y_2, x_3, y_3, x_4, y_4) = pos_cor
dst_x1 = max(0, min(x_1, x_2, x_3, x_4))
dst_y1 = max(0, min(y_1, y_2, y_3, y_4))
dst_x2 = min(width - 1, max(x_1, x_2, x_3, x_4))
dst_y2 = min(height - 1, max(y_1, y_2, y_3, y_4))
(dst_x, dst_y) = (dst_x1, dst_y1)
width = dst_x2 - dst_x1 + 1
height = dst_y2 - dst_y1 + 1
(roi_x, roi_y, roi_width, roi_height) = [dst_x, dst_y, width, height]
</DeepExtract>
if random_crop and random.random() > random_th:
add_width = int(random_width * roi_width * random.random())
add_height = int(random_height * roi_height * random.random())
per_w = random.random()
per_h = random.random()
roi_x = max(0, roi_x - int(per_w * add_width))
roi_y = max(0, roi_y - int(per_h * add_height))
x_rt = min(width - 1, roi_x + add_width + roi_width)
y_rt = min(height - 1, roi_y + add_height + roi_height)
roi_width = max(1, x_rt - roi_x)
roi_height = max(1, y_rt - roi_y)
roi_img = rotate_img[roi_y:roi_y + roi_height, roi_x:roi_x + roi_width, :]
return roi_img
|
def rotate_and_crop(cv_image, old_pos_cor, max_angle=0, random_crop=True, random_width=0.2, random_height=0.2, **kwargs):
"""
online crop image, rotate the text horizontally and crop image
Args:
cv_image (np.array): image
old_pos_cor (list): bounding box coordinate
max_angle (int): max rotate angle
random_crop (bool): whether to random crop, training stage sets True
random_width (int): random width, expand roi_width range (1+[0, random_width])*roi_width
random_height (int): random height, expand roi_height range(1+[0, random_height])*roi_height
**kwargs (None): backup parameter
Returns:
np.array: image after rotation and crop
"""
random_th = 0.3
pos_cor = old_pos_cor.copy()
pi_num = 3.141592653
pos_cor = list(map(float, pos_cor))
x_1 = pos_cor[0]
y_1 = pos_cor[1]
x_2 = pos_cor[2]
y_2 = pos_cor[3]
x_x = x_2 - x_1
y_y = y_2 - y_1
if x_x == 0.0:
angle_temp = pi_num / 2.0
else:
angle_temp = math.atan(abs(y_y / x_x))
if x_x < 0.0 and y_y >= 0.0:
angle_temp = pi_num - angle_temp
elif x_x < 0.0 and y_y < 0.0:
angle_temp = pi_num + angle_temp
elif x_x >= 0.0 and y_y < 0.0:
angle_temp = pi_num * 2.0 - angle_temp
else:
angle_temp = math.atan(abs(y_y / x_x))
rotate_angle = angle_temp / pi_num * 180
if random.random() > random_th:
random_angle = (random.random() * 2 - 1) * max_angle
rotate_angle += random_angle
(height, width) = cv_image.shape[:2]
(x_1, y_1, x_2, y_2, x_3, y_3, x_4, y_4) = pos_cor
dst_x1 = max(0, min(x_1, x_2, x_3, x_4))
dst_y1 = max(0, min(y_1, y_2, y_3, y_4))
dst_x2 = min(width - 1, max(x_1, x_2, x_3, x_4))
dst_y2 = min(height - 1, max(y_1, y_2, y_3, y_4))
(dst_x, dst_y) = (dst_x1, dst_y1)
width = dst_x2 - dst_x1 + 1
height = dst_y2 - dst_y1 + 1
(roi_x, roi_y, roi_width, roi_height) = [dst_x, dst_y, width, height]
roi_larger_x = int(max(0, roi_x - roi_width / 2))
roi_larger_y = int(max(0, roi_y - roi_height))
right_x = int(min(width, roi_x + roi_width / 2 * 3))
bottom_y = int(min(height, roi_y + 2 * roi_height))
roi_larger_img = cv_image[roi_larger_y:bottom_y, roi_larger_x:right_x, :]
for label_i in range(4):
pos_cor[2 * label_i] -= roi_larger_x
pos_cor[2 * label_i + 1] -= roi_larger_y
(roi_larger_h, roi_larger_w) = roi_larger_img.shape[:2]
radis = int(math.sqrt(roi_larger_h ** 2 + roi_larger_w ** 2)) + 2
larger_img_for_rotate = np.zeros((radis, radis, 3))
roi_x = int((radis - roi_larger_w) / 2)
roi_y = int((radis - roi_larger_h) / 2)
larger_img_for_rotate[roi_y:roi_y + roi_larger_h, roi_x:roi_x + roi_larger_w, :] = roi_larger_img
for label_i in range(4):
pos_cor[2 * label_i] += roi_x
pos_cor[2 * label_i + 1] += roi_y
(height, width) = larger_img_for_rotate.shape[:2]
rotate_matrix = cv2.getRotationMatrix2D((height / 2, width / 2), rotate_angle, 1)
rotate_img = cv2.warpAffine(larger_img_for_rotate, rotate_matrix, (height, width))
box2 = [0] * 8
(box2[0], box2[1]) = trans_rot_affine(rotate_matrix, pos_cor[0], pos_cor[1])
(box2[2], box2[3]) = trans_rot_affine(rotate_matrix, pos_cor[2], pos_cor[3])
(box2[4], box2[5]) = trans_rot_affine(rotate_matrix, pos_cor[4], pos_cor[5])
(box2[6], box2[7]) = trans_rot_affine(rotate_matrix, pos_cor[6], pos_cor[7])
pos_cor = box2
(height, width) = rotate_img.shape[:2]
(x_1, y_1, x_2, y_2, x_3, y_3, x_4, y_4) = pos_cor
dst_x1 = max(0, min(x_1, x_2, x_3, x_4))
dst_y1 = max(0, min(y_1, y_2, y_3, y_4))
dst_x2 = min(width - 1, max(x_1, x_2, x_3, x_4))
dst_y2 = min(height - 1, max(y_1, y_2, y_3, y_4))
(dst_x, dst_y) = (dst_x1, dst_y1)
width = dst_x2 - dst_x1 + 1
height = dst_y2 - dst_y1 + 1
(roi_x, roi_y, roi_width, roi_height) = [dst_x, dst_y, width, height]
if random_crop and random.random() > random_th:
add_width = int(random_width * roi_width * random.random())
add_height = int(random_height * roi_height * random.random())
per_w = random.random()
per_h = random.random()
roi_x = max(0, roi_x - int(per_w * add_width))
roi_y = max(0, roi_y - int(per_h * add_height))
x_rt = min(width - 1, roi_x + add_width + roi_width)
y_rt = min(height - 1, roi_y + add_height + roi_height)
roi_width = max(1, x_rt - roi_x)
roi_height = max(1, y_rt - roi_y)
roi_img = rotate_img[roi_y:roi_y + roi_height, roi_x:roi_x + roi_width, :]
return roi_img
|
DAVAR-Lab-OCR
|
positive
|
def test_add(self, mock_call):
"""Verify Veracity can add files."""
<DeepExtract>
self.wc.add(self.path)
</DeepExtract>
calls = [call(('vv', 'add', self.path))]
mock_call.assert_has_calls(calls)
|
def test_add(self, mock_call):
"""Verify Veracity can add files."""
self.wc.add(self.path)
calls = [call(('vv', 'add', self.path))]
mock_call.assert_has_calls(calls)
|
doorstop
|
positive
|
def test_forward1(self):
(N, C) = (8, 3)
<DeepExtract>
if H is not None:
x = np.random.randn(N, C, H, W).astype(dtype)
else:
x = np.random.randn(N, C).astype(dtype)
gamma = np.random.randn(C).astype(dtype)
beta = np.random.randn(C).astype(dtype)
mean = np.random.randn(C).astype(dtype)
var = np.abs(np.random.randn(C).astype(dtype))
(x, gamma, beta, mean, var) = (x, gamma, beta, mean, var)
</DeepExtract>
cy = chainer.links.BatchNormalization(3)(x)
y = dezero.layers.BatchNorm()(x)
self.assertTrue(array_allclose(y.data, cy.data))
|
def test_forward1(self):
(N, C) = (8, 3)
if H is not None:
x = np.random.randn(N, C, H, W).astype(dtype)
else:
x = np.random.randn(N, C).astype(dtype)
gamma = np.random.randn(C).astype(dtype)
beta = np.random.randn(C).astype(dtype)
mean = np.random.randn(C).astype(dtype)
var = np.abs(np.random.randn(C).astype(dtype))
(x, gamma, beta, mean, var) = (x, gamma, beta, mean, var)
cy = chainer.links.BatchNormalization(3)(x)
y = dezero.layers.BatchNorm()(x)
self.assertTrue(array_allclose(y.data, cy.data))
|
deep-learning-from-scratch-3
|
positive
|
def covariance_to_internal_jacobian(external_values, constr):
"""Jacobian of ``covariance_to_internal``.
For reference see docstring of ``jacobian_covariance_from_internal``. In
comparison to that function, however, here we want to differentiate the
reverse graph
external --> cov --> cholesky --> internal
Again use the vectors :math:`c` and :math:`x` to denote the external and
internal values, respectively. To solve for the jacobian we make use of the
identity
.. math::
\\frac{\\mathrm{d}x}{\\mathrm{d}c} = (\\frac{\\mathrm{d}c}{\\mathrm{d}x})^{-1}
Args:
external_values (np.ndarray): Row-wise half-vectorized covariance matrix
Returns:
deriv: The Jacobian matrix.
"""
cov = cov_params_to_matrix(external_values)
chol = robust_cholesky(cov)
internal = chol[np.tril_indices(len(chol))]
<DeepExtract>
chol = chol_params_to_lower_triangular_matrix(internal)
dim = len(chol)
K = _commutation_matrix(dim)
L = _elimination_matrix(dim)
left = np.eye(dim ** 2) + K
right = np.kron(chol, np.eye(dim))
intermediate = left @ right
deriv = L @ intermediate @ L.T
deriv = deriv
</DeepExtract>
deriv = np.linalg.pinv(deriv)
return deriv
|
def covariance_to_internal_jacobian(external_values, constr):
"""Jacobian of ``covariance_to_internal``.
For reference see docstring of ``jacobian_covariance_from_internal``. In
comparison to that function, however, here we want to differentiate the
reverse graph
external --> cov --> cholesky --> internal
Again use the vectors :math:`c` and :math:`x` to denote the external and
internal values, respectively. To solve for the jacobian we make use of the
identity
.. math::
\\frac{\\mathrm{d}x}{\\mathrm{d}c} = (\\frac{\\mathrm{d}c}{\\mathrm{d}x})^{-1}
Args:
external_values (np.ndarray): Row-wise half-vectorized covariance matrix
Returns:
deriv: The Jacobian matrix.
"""
cov = cov_params_to_matrix(external_values)
chol = robust_cholesky(cov)
internal = chol[np.tril_indices(len(chol))]
chol = chol_params_to_lower_triangular_matrix(internal)
dim = len(chol)
K = _commutation_matrix(dim)
L = _elimination_matrix(dim)
left = np.eye(dim ** 2) + K
right = np.kron(chol, np.eye(dim))
intermediate = left @ right
deriv = L @ intermediate @ L.T
deriv = deriv
deriv = np.linalg.pinv(deriv)
return deriv
|
estimagic
|
positive
|
@unittest.skipIf(Env.is_community(), 'Community can not test mcm operations')
def test_mcm(self):
mcm = F.search_client_mcm()
clusters = mcm.list_clusters()['clusters']
self.assertEqual(len(clusters), 2)
cluster_name = clusters[0]['clusterName']
date = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
if 'TRAVIS' in os.environ:
instance = os.environ['TRAVIS_JOB_NUMBER']
else:
instance = 'unknown'
python_version = platform.python_version().replace('.', '')[:2]
python_version += os.environ.get('TEST_TYPE', '')
def user_id(number):
return 'python{}-{}-{}-{}'.format(python_version, date, instance, number)
mcm.assign_user_id(user_id(0), cluster_name)
mcm.assign_user_ids([user_id(1), user_id(2)], cluster_name)
def get_user_id(number):
while True:
try:
return mcm.get_user_id(user_id(number))
except RequestException as exception:
if exception.status_code != 404:
raise exception
for number in range(3):
self.assertEqual(get_user_id(number), {'userID': user_id(number), 'clusterName': cluster_name, 'nbRecords': 0, 'dataSize': 0})
for number in range(3):
users_ids = [user['userID'] for user in mcm.search_user_ids(user_id(number))['hits']]
self.assertIn(user_id(number), users_ids)
users = mcm.list_user_ids()
self.assertIsInstance(users, dict)
self.assertIsInstance(users['userIDs'], list)
self.assertTrue(len(users['userIDs']) > 0)
users = mcm.get_top_user_ids()
self.assertIsInstance(users, dict)
self.assertIsInstance(users['topUsers'], dict)
self.assertTrue(len(users['topUsers']) > 0)
result = None
def remove_user_id(number):
while True:
try:
return mcm.remove_user_id(user_id(number))
except RequestException as exception:
if exception.status_code != 400:
raise exception
for number in range(3):
<DeepExtract>
while True:
try:
return mcm.remove_user_id(user_id(number))
except RequestException as exception:
if exception.status_code != 400:
raise exception
</DeepExtract>
def assert_remove(number):
while True:
try:
mcm.get_user_id(user_id(number))
except RequestException as exception:
if exception.status_code == 404:
return
for number in range(3):
<DeepExtract>
while True:
try:
mcm.get_user_id(user_id(number))
except RequestException as exception:
if exception.status_code == 404:
return
</DeepExtract>
has_pending_mappings = mcm.has_pending_mappings({'retrieveMappings': True})
self.assertIsNotNone(has_pending_mappings)
self.assertIsInstance(has_pending_mappings['pending'], bool)
self.assertTrue('clusters' in has_pending_mappings)
self.assertIsInstance(has_pending_mappings['clusters'], dict)
has_pending_mappings = mcm.has_pending_mappings({'retrieveMappings': False})
self.assertIsInstance(has_pending_mappings['pending'], bool)
self.assertFalse('clusters' in has_pending_mappings)
has_pending_mappings = mcm.has_pending_mappings()
self.assertIsInstance(has_pending_mappings['pending'], bool)
self.assertFalse('clusters' in has_pending_mappings)
mcm.close()
|
@unittest.skipIf(Env.is_community(), 'Community can not test mcm operations')
def test_mcm(self):
mcm = F.search_client_mcm()
clusters = mcm.list_clusters()['clusters']
self.assertEqual(len(clusters), 2)
cluster_name = clusters[0]['clusterName']
date = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
if 'TRAVIS' in os.environ:
instance = os.environ['TRAVIS_JOB_NUMBER']
else:
instance = 'unknown'
python_version = platform.python_version().replace('.', '')[:2]
python_version += os.environ.get('TEST_TYPE', '')
def user_id(number):
return 'python{}-{}-{}-{}'.format(python_version, date, instance, number)
mcm.assign_user_id(user_id(0), cluster_name)
mcm.assign_user_ids([user_id(1), user_id(2)], cluster_name)
def get_user_id(number):
while True:
try:
return mcm.get_user_id(user_id(number))
except RequestException as exception:
if exception.status_code != 404:
raise exception
for number in range(3):
self.assertEqual(get_user_id(number), {'userID': user_id(number), 'clusterName': cluster_name, 'nbRecords': 0, 'dataSize': 0})
for number in range(3):
users_ids = [user['userID'] for user in mcm.search_user_ids(user_id(number))['hits']]
self.assertIn(user_id(number), users_ids)
users = mcm.list_user_ids()
self.assertIsInstance(users, dict)
self.assertIsInstance(users['userIDs'], list)
self.assertTrue(len(users['userIDs']) > 0)
users = mcm.get_top_user_ids()
self.assertIsInstance(users, dict)
self.assertIsInstance(users['topUsers'], dict)
self.assertTrue(len(users['topUsers']) > 0)
result = None
def remove_user_id(number):
while True:
try:
return mcm.remove_user_id(user_id(number))
except RequestException as exception:
if exception.status_code != 400:
raise exception
for number in range(3):
while True:
try:
return mcm.remove_user_id(user_id(number))
except RequestException as exception:
if exception.status_code != 400:
raise exception
def assert_remove(number):
while True:
try:
mcm.get_user_id(user_id(number))
except RequestException as exception:
if exception.status_code == 404:
return
for number in range(3):
while True:
try:
mcm.get_user_id(user_id(number))
except RequestException as exception:
if exception.status_code == 404:
return
has_pending_mappings = mcm.has_pending_mappings({'retrieveMappings': True})
self.assertIsNotNone(has_pending_mappings)
self.assertIsInstance(has_pending_mappings['pending'], bool)
self.assertTrue('clusters' in has_pending_mappings)
self.assertIsInstance(has_pending_mappings['clusters'], dict)
has_pending_mappings = mcm.has_pending_mappings({'retrieveMappings': False})
self.assertIsInstance(has_pending_mappings['pending'], bool)
self.assertFalse('clusters' in has_pending_mappings)
has_pending_mappings = mcm.has_pending_mappings()
self.assertIsInstance(has_pending_mappings['pending'], bool)
self.assertFalse('clusters' in has_pending_mappings)
mcm.close()
|
algoliasearch-client-python
|
positive
|
def __init__(self, env, format='gif', path=None, metadata=None, enabled=True, base_path=None):
"""Overrides original constructor to add support for generating gifs."""
self.format = format
modes = env.metadata.get('render.modes', [])
self.enabled = enabled
if not self.enabled:
return
self.ansi_mode = False
if 'rgb_array' not in modes:
if 'ansi' in modes:
self.ansi_mode = True
else:
self.enabled = False
return
if path is not None and base_path is not None:
raise error.Error('You can pass one of `path` or `base_path`.')
self.last_frame = None
self.env = env
required_ext = '.json' if self.ansi_mode else '.' + format
if path is None:
if base_path is not None:
path = base_path + required_ext
else:
with tempfile.NamedTemporaryFile(suffix=required_ext, delete=False) as f:
path = f.name
self.path = path
(path_base, actual_ext) = os.path.splitext(self.path)
if actual_ext != required_ext:
hint = " HINT: The environment is text-only, therefore we're recording its text output in a structured JSON format." if self.ansi_mode else ''
raise error.Error('Invalid path given: {} -- must have file extension {}.{}'.format(self.path, required_ext, hint))
<DeepExtract>
open(path, 'a').close()
</DeepExtract>
self.frames_per_sec = env.metadata.get('video.frames_per_second', 30)
self.encoder = None
self.broken = False
self.metadata = metadata or {}
self.metadata['content_type'] = 'video/vnd.openai.ansivid' if self.ansi_mode else 'video/' + self.format
self.metadata_path = '{}.meta.json'.format(path_base)
self.empty = True
|
def __init__(self, env, format='gif', path=None, metadata=None, enabled=True, base_path=None):
"""Overrides original constructor to add support for generating gifs."""
self.format = format
modes = env.metadata.get('render.modes', [])
self.enabled = enabled
if not self.enabled:
return
self.ansi_mode = False
if 'rgb_array' not in modes:
if 'ansi' in modes:
self.ansi_mode = True
else:
self.enabled = False
return
if path is not None and base_path is not None:
raise error.Error('You can pass one of `path` or `base_path`.')
self.last_frame = None
self.env = env
required_ext = '.json' if self.ansi_mode else '.' + format
if path is None:
if base_path is not None:
path = base_path + required_ext
else:
with tempfile.NamedTemporaryFile(suffix=required_ext, delete=False) as f:
path = f.name
self.path = path
(path_base, actual_ext) = os.path.splitext(self.path)
if actual_ext != required_ext:
hint = " HINT: The environment is text-only, therefore we're recording its text output in a structured JSON format." if self.ansi_mode else ''
raise error.Error('Invalid path given: {} -- must have file extension {}.{}'.format(self.path, required_ext, hint))
open(path, 'a').close()
self.frames_per_sec = env.metadata.get('video.frames_per_second', 30)
self.encoder = None
self.broken = False
self.metadata = metadata or {}
self.metadata['content_type'] = 'video/vnd.openai.ansivid' if self.ansi_mode else 'video/' + self.format
self.metadata_path = '{}.meta.json'.format(path_base)
self.empty = True
|
cherry
|
positive
|
def test_fail_toomany(self):
thingnode = colander.SchemaNode(colander.String())
thingnode2 = colander.SchemaNode(colander.String())
class MySchema(colander.SequenceSchema):
thing = thingnode
thing2 = thingnode2
<DeepExtract>
from colander import Invalid
try:
MySchema(*arg, **kw)
except Invalid as e:
e = e
else:
raise AssertionError('Invalid not raised')
</DeepExtract>
self.assertEqual(e.msg, 'Sequence schemas must have exactly one child node')
|
def test_fail_toomany(self):
thingnode = colander.SchemaNode(colander.String())
thingnode2 = colander.SchemaNode(colander.String())
class MySchema(colander.SequenceSchema):
thing = thingnode
thing2 = thingnode2
from colander import Invalid
try:
MySchema(*arg, **kw)
except Invalid as e:
e = e
else:
raise AssertionError('Invalid not raised')
self.assertEqual(e.msg, 'Sequence schemas must have exactly one child node')
|
colander
|
positive
|
def VAELoss(loss: str, vocab_size: int, beta_start: float, beta_end: float, beta_steps: int):
"""Add beta * KL to the loss and return relevant loss layer."""
<DeepExtract>
if loss == 'multi':
layer = deepr.layers.MultiLogLikelihood(inputs=('logits', 'targetPositivesOneHot'), outputs='loss')
elif loss == 'l2':
layer = L2Loss(inputs=('logits', 'targetPositivesOneHot'), outputs='loss')
elif loss == 'multi_css':
layer = MultiLogLikelihoodCSS(vocab_size=vocab_size)
elif loss == 'bpr':
layer = BPRLoss(vocab_size=vocab_size)
elif loss == 'ns':
layer = NegativeSampling(vocab_size=vocab_size)
else:
raise ValueError(f"Unknown loss option {loss} (must be 'multi', 'multi_css' or 'bpr')")
layer = layer
</DeepExtract>
return deepr.layers.DAG(deepr.layers.Select(inputs=tuple(list(layer.inputs) + ['KL'])), layer, deepr.layers.AddWithWeight(inputs=('loss', 'KL'), outputs='loss', start=beta_start, end=beta_end, steps=beta_steps), deepr.layers.Select(inputs=layer.outputs))
|
def VAELoss(loss: str, vocab_size: int, beta_start: float, beta_end: float, beta_steps: int):
"""Add beta * KL to the loss and return relevant loss layer."""
if loss == 'multi':
layer = deepr.layers.MultiLogLikelihood(inputs=('logits', 'targetPositivesOneHot'), outputs='loss')
elif loss == 'l2':
layer = L2Loss(inputs=('logits', 'targetPositivesOneHot'), outputs='loss')
elif loss == 'multi_css':
layer = MultiLogLikelihoodCSS(vocab_size=vocab_size)
elif loss == 'bpr':
layer = BPRLoss(vocab_size=vocab_size)
elif loss == 'ns':
layer = NegativeSampling(vocab_size=vocab_size)
else:
raise ValueError(f"Unknown loss option {loss} (must be 'multi', 'multi_css' or 'bpr')")
layer = layer
return deepr.layers.DAG(deepr.layers.Select(inputs=tuple(list(layer.inputs) + ['KL'])), layer, deepr.layers.AddWithWeight(inputs=('loss', 'KL'), outputs='loss', start=beta_start, end=beta_end, steps=beta_steps), deepr.layers.Select(inputs=layer.outputs))
|
deepr
|
positive
|
def forward(self, x, y=None):
"""
Forward
:param x: Input signal.
:param y: Target outputs
:return: Output or hidden states
"""
batch_size = x.size()[0]
time_length = x.size()[1]
if self._with_bias:
<DeepExtract>
if x.is_cuda:
bias = Variable(torch.ones((x.size()[0], x.size()[1], 1), dtype=self.dtype).cuda(), requires_grad=False)
else:
bias = Variable(torch.ones((x.size()[0], x.size()[1], 1), dtype=self.dtype), requires_grad=False)
x = torch.cat((bias, x), dim=2)
</DeepExtract>
if self.training:
for b in range(batch_size):
if not self._averaged:
self.xTx.data.add_(x[b].t().mm(x[b]).data)
self.xTy.data.add_(x[b].t().mm(y[b]).data)
else:
self.xTx.data.add_((x[b].t().mm(x[b]) / time_length).data)
self.xTy.data.add_((x[b].t().mm(y[b]) / time_length).data)
self._n_samples += 1.0
if self._with_bias:
return x[:, :, 1:]
else:
return x
elif not self.training:
outputs = Variable(torch.zeros(batch_size, time_length, self._output_dim, dtype=self._dtype), requires_grad=False)
outputs = outputs.cuda() if self.w_out.is_cuda else outputs
for b in range(batch_size):
outputs[b] = torch.mm(self.w_out, x[b].t()).t()
if self._softmax_output:
return self._softmax(outputs)
elif self._normalize_output:
return torch.abs(outputs) / torch.sum(torch.abs(outputs), axis=2).reshape(outputs.size(0), outputs.size(1), 1)
else:
return outputs
|
def forward(self, x, y=None):
"""
Forward
:param x: Input signal.
:param y: Target outputs
:return: Output or hidden states
"""
batch_size = x.size()[0]
time_length = x.size()[1]
if self._with_bias:
if x.is_cuda:
bias = Variable(torch.ones((x.size()[0], x.size()[1], 1), dtype=self.dtype).cuda(), requires_grad=False)
else:
bias = Variable(torch.ones((x.size()[0], x.size()[1], 1), dtype=self.dtype), requires_grad=False)
x = torch.cat((bias, x), dim=2)
if self.training:
for b in range(batch_size):
if not self._averaged:
self.xTx.data.add_(x[b].t().mm(x[b]).data)
self.xTy.data.add_(x[b].t().mm(y[b]).data)
else:
self.xTx.data.add_((x[b].t().mm(x[b]) / time_length).data)
self.xTy.data.add_((x[b].t().mm(y[b]) / time_length).data)
self._n_samples += 1.0
if self._with_bias:
return x[:, :, 1:]
else:
return x
elif not self.training:
outputs = Variable(torch.zeros(batch_size, time_length, self._output_dim, dtype=self._dtype), requires_grad=False)
outputs = outputs.cuda() if self.w_out.is_cuda else outputs
for b in range(batch_size):
outputs[b] = torch.mm(self.w_out, x[b].t()).t()
if self._softmax_output:
return self._softmax(outputs)
elif self._normalize_output:
return torch.abs(outputs) / torch.sum(torch.abs(outputs), axis=2).reshape(outputs.size(0), outputs.size(1), 1)
else:
return outputs
|
EchoTorch
|
positive
|
def evaluate(self, x, target=None):
<DeepExtract>
(batch, _, h, w) = x.size()
assert batch == 1
stride_rate = 2.0 / 3.0
crop_size = self.crop_size
stride = int(crop_size * stride_rate)
with torch.cuda.device_of(x):
scores = x.new().resize_(batch, self.nclass, h, w).zero_().cuda()
for scale in self.scales:
long_size = int(math.ceil(self.base_size * scale))
if h > w:
height = long_size
width = int(1.0 * w * long_size / h + 0.5)
short_size = width
else:
width = long_size
height = int(1.0 * h * long_size / w + 0.5)
short_size = height
'\n short_size = int(math.ceil(self.base_size * scale))\n if h > w:\n width = short_size\n height = int(1.0 * h * short_size / w)\n long_size = height\n else:\n height = short_size\n width = int(1.0 * w * short_size / h)\n long_size = width\n '
cur_img = resize_image(x, height, width, **self.module._up_kwargs)
if long_size <= crop_size:
pad_img = pad_image(cur_img, self.module.mean, self.module.std, crop_size)
outputs = module_inference(self.module, pad_img, self.flip)
outputs = crop_image(outputs, 0, height, 0, width)
else:
if short_size < crop_size:
pad_img = pad_image(cur_img, self.module.mean, self.module.std, crop_size)
else:
pad_img = cur_img
(_, _, ph, pw) = pad_img.size()
assert ph >= height and pw >= width
h_grids = int(math.ceil(1.0 * (ph - crop_size) / stride)) + 1
w_grids = int(math.ceil(1.0 * (pw - crop_size) / stride)) + 1
with torch.cuda.device_of(x):
outputs = x.new().resize_(batch, self.nclass, ph, pw).zero_().cuda()
count_norm = x.new().resize_(batch, 1, ph, pw).zero_().cuda()
for idh in range(h_grids):
for idw in range(w_grids):
h0 = idh * stride
w0 = idw * stride
h1 = min(h0 + crop_size, ph)
w1 = min(w0 + crop_size, pw)
crop_img = crop_image(pad_img, h0, h1, w0, w1)
pad_crop_img = pad_image(crop_img, self.module.mean, self.module.std, crop_size)
output = module_inference(self.module, pad_crop_img, self.flip)
outputs[:, :, h0:h1, w0:w1] += crop_image(output, 0, h1 - h0, 0, w1 - w0)
count_norm[:, :, h0:h1, w0:w1] += 1
assert (count_norm == 0).sum() == 0
outputs = outputs / count_norm
outputs = outputs[:, :, :height, :width]
score = resize_image(outputs, h, w, **self.module._up_kwargs)
scores += score
pred = scores
</DeepExtract>
if isinstance(pred, (tuple, list)):
pred = pred[0]
if target is None:
return pred
(correct, labeled) = batch_pix_accuracy(pred.data, target.data)
(inter, union) = batch_intersection_union(pred.data, target.data, self.nclass)
return (correct, labeled, inter, union)
|
def evaluate(self, x, target=None):
(batch, _, h, w) = x.size()
assert batch == 1
stride_rate = 2.0 / 3.0
crop_size = self.crop_size
stride = int(crop_size * stride_rate)
with torch.cuda.device_of(x):
scores = x.new().resize_(batch, self.nclass, h, w).zero_().cuda()
for scale in self.scales:
long_size = int(math.ceil(self.base_size * scale))
if h > w:
height = long_size
width = int(1.0 * w * long_size / h + 0.5)
short_size = width
else:
width = long_size
height = int(1.0 * h * long_size / w + 0.5)
short_size = height
'\n short_size = int(math.ceil(self.base_size * scale))\n if h > w:\n width = short_size\n height = int(1.0 * h * short_size / w)\n long_size = height\n else:\n height = short_size\n width = int(1.0 * w * short_size / h)\n long_size = width\n '
cur_img = resize_image(x, height, width, **self.module._up_kwargs)
if long_size <= crop_size:
pad_img = pad_image(cur_img, self.module.mean, self.module.std, crop_size)
outputs = module_inference(self.module, pad_img, self.flip)
outputs = crop_image(outputs, 0, height, 0, width)
else:
if short_size < crop_size:
pad_img = pad_image(cur_img, self.module.mean, self.module.std, crop_size)
else:
pad_img = cur_img
(_, _, ph, pw) = pad_img.size()
assert ph >= height and pw >= width
h_grids = int(math.ceil(1.0 * (ph - crop_size) / stride)) + 1
w_grids = int(math.ceil(1.0 * (pw - crop_size) / stride)) + 1
with torch.cuda.device_of(x):
outputs = x.new().resize_(batch, self.nclass, ph, pw).zero_().cuda()
count_norm = x.new().resize_(batch, 1, ph, pw).zero_().cuda()
for idh in range(h_grids):
for idw in range(w_grids):
h0 = idh * stride
w0 = idw * stride
h1 = min(h0 + crop_size, ph)
w1 = min(w0 + crop_size, pw)
crop_img = crop_image(pad_img, h0, h1, w0, w1)
pad_crop_img = pad_image(crop_img, self.module.mean, self.module.std, crop_size)
output = module_inference(self.module, pad_crop_img, self.flip)
outputs[:, :, h0:h1, w0:w1] += crop_image(output, 0, h1 - h0, 0, w1 - w0)
count_norm[:, :, h0:h1, w0:w1] += 1
assert (count_norm == 0).sum() == 0
outputs = outputs / count_norm
outputs = outputs[:, :, :height, :width]
score = resize_image(outputs, h, w, **self.module._up_kwargs)
scores += score
pred = scores
if isinstance(pred, (tuple, list)):
pred = pred[0]
if target is None:
return pred
(correct, labeled) = batch_pix_accuracy(pred.data, target.data)
(inter, union) = batch_intersection_union(pred.data, target.data, self.nclass)
return (correct, labeled, inter, union)
|
DANet
|
positive
|
def cls_loss(index, label_dict, pred_dict):
pmask = label_dict[maps_dict.GT_PMASK][index]
nmask = label_dict[maps_dict.GT_NMASK][index]
gt_cls = label_dict[maps_dict.GT_CLS][index]
cls_mask = pmask + nmask
cls_mask = tf.reduce_max(cls_mask, axis=-1)
pred_cls = pred_dict[maps_dict.PRED_CLS][index]
norm_param = tf.maximum(1.0, tf.reduce_sum(cls_mask))
if self.cls_activation == 'Sigmoid':
gt_cls = tf.cast(tf.one_hot(gt_cls - 1, depth=len(self.cls_list), on_value=1, off_value=0, axis=-1), tf.float32)
if self.cls_loss_type == 'Is-Not':
if self.cls_activation == 'Softmax':
cls_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=gt_cls, logits=pred_cls)
else:
cls_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=gt_cls, logits=pred_cls)
cls_loss = tf.reduce_mean(cls_loss, axis=-1)
elif self.cls_loss_type == 'Focal-loss':
cls_loss = model_util.focal_loss_producer(pred_cls, gt_cls)
cls_loss = tf.reduce_mean(cls_loss, axis=-1)
elif self.cls_loss_type == 'Center-ness':
base_xyz = pred_dict[maps_dict.KEY_OUTPUT_XYZ][index]
base_xyz = tf.stop_gradient(base_xyz)
assigned_boxes_3d = label_dict[maps_dict.GT_BOXES_ANCHORS_3D][index]
<DeepExtract>
(bs, pts_num, _) = base_xyz.get_shape().as_list()
assigned_boxes_3d = tf.reduce_sum(assigned_boxes_3d * tf.expand_dims(pmask, axis=-1), axis=2)
pmask = tf.reduce_max(pmask, axis=2)
canonical_xyz = base_xyz - assigned_boxes_3d[:, :, :3]
canonical_xyz = tf.reshape(canonical_xyz, [bs * pts_num, 1, 3])
rys = tf.reshape(assigned_boxes_3d[:, :, -1], [bs * pts_num])
canonical_xyz = rotate_points(canonical_xyz, -rys)
canonical_xyz = tf.reshape(canonical_xyz, [bs, pts_num, 3])
distance_front = assigned_boxes_3d[:, :, 3] / 2.0 - canonical_xyz[:, :, 0]
distance_back = canonical_xyz[:, :, 0] + assigned_boxes_3d[:, :, 3] / 2.0
distance_bottom = 0 - canonical_xyz[:, :, 1]
distance_top = canonical_xyz[:, :, 1] + assigned_boxes_3d[:, :, 4]
distance_left = assigned_boxes_3d[:, :, 5] / 2.0 - canonical_xyz[:, :, 2]
distance_right = canonical_xyz[:, :, 2] + assigned_boxes_3d[:, :, 5] / 2.0
ctr_ness_l = tf.minimum(distance_front, distance_back) / tf.maximum(distance_front, distance_back) * pmask
ctr_ness_w = tf.minimum(distance_left, distance_right) / tf.maximum(distance_left, distance_right) * pmask
ctr_ness_h = tf.minimum(distance_bottom, distance_top) / tf.maximum(distance_bottom, distance_top) * pmask
ctr_ness = tf.maximum(ctr_ness_l * ctr_ness_h * ctr_ness_w, epsilon)
ctr_ness = tf.pow(ctr_ness, 1 / 3.0)
(min_ctr_ness, max_ctr_ness) = self.ctr_ness_range
ctr_ness_range = max_ctr_ness - min_ctr_ness
ctr_ness *= ctr_ness_range
ctr_ness += min_ctr_ness
ctr_ness = ctr_ness
</DeepExtract>
gt_cls = gt_cls * tf.expand_dims(ctr_ness, axis=-1)
cls_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=gt_cls, logits=pred_cls)
cls_loss = tf.reduce_mean(cls_loss, axis=-1)
cls_loss = tf.reduce_sum(cls_loss * cls_mask) / norm_param
cls_loss = tf.identity(cls_loss, 'cls_loss%d' % index)
tf.summary.scalar('cls_loss%d' % index, cls_loss)
tf.add_to_collection(tf.GraphKeys.LOSSES, cls_loss)
|
def cls_loss(index, label_dict, pred_dict):
pmask = label_dict[maps_dict.GT_PMASK][index]
nmask = label_dict[maps_dict.GT_NMASK][index]
gt_cls = label_dict[maps_dict.GT_CLS][index]
cls_mask = pmask + nmask
cls_mask = tf.reduce_max(cls_mask, axis=-1)
pred_cls = pred_dict[maps_dict.PRED_CLS][index]
norm_param = tf.maximum(1.0, tf.reduce_sum(cls_mask))
if self.cls_activation == 'Sigmoid':
gt_cls = tf.cast(tf.one_hot(gt_cls - 1, depth=len(self.cls_list), on_value=1, off_value=0, axis=-1), tf.float32)
if self.cls_loss_type == 'Is-Not':
if self.cls_activation == 'Softmax':
cls_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=gt_cls, logits=pred_cls)
else:
cls_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=gt_cls, logits=pred_cls)
cls_loss = tf.reduce_mean(cls_loss, axis=-1)
elif self.cls_loss_type == 'Focal-loss':
cls_loss = model_util.focal_loss_producer(pred_cls, gt_cls)
cls_loss = tf.reduce_mean(cls_loss, axis=-1)
elif self.cls_loss_type == 'Center-ness':
base_xyz = pred_dict[maps_dict.KEY_OUTPUT_XYZ][index]
base_xyz = tf.stop_gradient(base_xyz)
assigned_boxes_3d = label_dict[maps_dict.GT_BOXES_ANCHORS_3D][index]
(bs, pts_num, _) = base_xyz.get_shape().as_list()
assigned_boxes_3d = tf.reduce_sum(assigned_boxes_3d * tf.expand_dims(pmask, axis=-1), axis=2)
pmask = tf.reduce_max(pmask, axis=2)
canonical_xyz = base_xyz - assigned_boxes_3d[:, :, :3]
canonical_xyz = tf.reshape(canonical_xyz, [bs * pts_num, 1, 3])
rys = tf.reshape(assigned_boxes_3d[:, :, -1], [bs * pts_num])
canonical_xyz = rotate_points(canonical_xyz, -rys)
canonical_xyz = tf.reshape(canonical_xyz, [bs, pts_num, 3])
distance_front = assigned_boxes_3d[:, :, 3] / 2.0 - canonical_xyz[:, :, 0]
distance_back = canonical_xyz[:, :, 0] + assigned_boxes_3d[:, :, 3] / 2.0
distance_bottom = 0 - canonical_xyz[:, :, 1]
distance_top = canonical_xyz[:, :, 1] + assigned_boxes_3d[:, :, 4]
distance_left = assigned_boxes_3d[:, :, 5] / 2.0 - canonical_xyz[:, :, 2]
distance_right = canonical_xyz[:, :, 2] + assigned_boxes_3d[:, :, 5] / 2.0
ctr_ness_l = tf.minimum(distance_front, distance_back) / tf.maximum(distance_front, distance_back) * pmask
ctr_ness_w = tf.minimum(distance_left, distance_right) / tf.maximum(distance_left, distance_right) * pmask
ctr_ness_h = tf.minimum(distance_bottom, distance_top) / tf.maximum(distance_bottom, distance_top) * pmask
ctr_ness = tf.maximum(ctr_ness_l * ctr_ness_h * ctr_ness_w, epsilon)
ctr_ness = tf.pow(ctr_ness, 1 / 3.0)
(min_ctr_ness, max_ctr_ness) = self.ctr_ness_range
ctr_ness_range = max_ctr_ness - min_ctr_ness
ctr_ness *= ctr_ness_range
ctr_ness += min_ctr_ness
ctr_ness = ctr_ness
gt_cls = gt_cls * tf.expand_dims(ctr_ness, axis=-1)
cls_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=gt_cls, logits=pred_cls)
cls_loss = tf.reduce_mean(cls_loss, axis=-1)
cls_loss = tf.reduce_sum(cls_loss * cls_mask) / norm_param
cls_loss = tf.identity(cls_loss, 'cls_loss%d' % index)
tf.summary.scalar('cls_loss%d' % index, cls_loss)
tf.add_to_collection(tf.GraphKeys.LOSSES, cls_loss)
|
3DSSD
|
positive
|
def step(self, closure=None, **kargs):
with kargs['timer']('grad.apply_grad', epoch=self.conf.epoch_):
<DeepExtract>
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
param_state = self.state[p]
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(p.data, alpha=weight_decay)
if self.clip_grad:
d_p = self._clip_gradient(d_p, param_state)
if momentum != 0:
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.zeros_like(d_p)
buf.add_(d_p)
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
p.grad.data = d_p
</DeepExtract>
with kargs['timer']('grad.recover_hat_params', epoch=self.conf.epoch_):
(params, _) = get_data(self.param_groups, self.param_names, is_get_grad=False)
(grads, shapes) = get_data(self.param_groups, self.param_names, is_get_grad=True)
with kargs['timer']('grad.compress', epoch=self.conf.epoch_):
<DeepExtract>
(selected_values, selected_indices, n_bits) = ([], [], [])
for ((idx, param_name), grad) in zip(self.param_names, grads):
_grad = grad.data.view(-1) + self.memory_of_grads[param_name]
compress_ratio = self._get_compress_ratio()
(_selected_values, _selected_indices, _n_bits) = compress_or_quantize(grad=_grad, comm_op=self.comm_op, compressor_fn=self.compressor_fn, compress_ratio=compress_ratio, quantize_level=self.quantize_level, is_biased=self.is_biased)
selected_values.append(_selected_values)
selected_indices.append(_selected_indices)
n_bits.append(_n_bits)
if self.is_compress_op:
(_, nmask) = self.compressor_fn.get_mask(_grad, _selected_indices)
self.memory_of_grads[param_name] = _grad * nmask
if self.mask_momentum:
self.state[self.param_groups[idx]['params'][0]]['momentum_buffer'].mul_(nmask.view(grad.size()))
else:
pass
self.selected_shapes = [len(_value) for _value in selected_values]
flatten_selected_values = flatten(selected_values)
flatten_selected_indices = flatten(selected_indices) if selected_indices[0] is not None else None
(selected_values, selected_indices, n_bits) = (flatten_selected_values, flatten_selected_indices, sum(n_bits))
</DeepExtract>
with kargs['timer']('grad.sync', epoch=self.conf.epoch_):
<DeepExtract>
if self.is_compress_op:
message_to_send = torch.cat([selected_values, selected_indices])
if self.comm_device == 'cpu':
message_to_send = message_to_send.cpu().pin_memory()
synced_message = self.world_aggregator._agg(message_to_send, communication_scheme='all_gather')
else:
message_to_send = selected_values
if self.comm_device == 'cpu':
message_to_send = message_to_send.cpu().pin_memory()
synced_message = self.world_aggregator._agg(message_to_send, op='sum', communication_scheme='all_reduce')
message_size = len(message_to_send)
(synced_message, message_size) = (synced_message, message_size)
</DeepExtract>
with kargs['timer']('grad.recover_info', epoch=self.conf.epoch_):
<DeepExtract>
_message_size = int(message_size / 2)
if self.is_compress_op:
empty_grads = torch.zeros_like(flatten(params))
for message in synced_message:
(q_values, q_indices) = self.compressor_fn.uncompress(message[:_message_size], message[_message_size:], self.selected_shapes, shapes)
empty_grads[q_indices] += q_values
_update = empty_grads / self.n_nodes
else:
_update = synced_message / self.n_nodes
updated_flatten_params = flatten(params).add(-self.param_groups[0]['lr'], recover_device(_update, device=flatten(params).device))
updated_flatten_params = updated_flatten_params
</DeepExtract>
with kargs['timer']('grad.update_model', epoch=self.conf.epoch_):
unflatten(params, updated_flatten_params, shapes)
return n_bits
|
def step(self, closure=None, **kargs):
with kargs['timer']('grad.apply_grad', epoch=self.conf.epoch_):
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
param_state = self.state[p]
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(p.data, alpha=weight_decay)
if self.clip_grad:
d_p = self._clip_gradient(d_p, param_state)
if momentum != 0:
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.zeros_like(d_p)
buf.add_(d_p)
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
p.grad.data = d_p
with kargs['timer']('grad.recover_hat_params', epoch=self.conf.epoch_):
(params, _) = get_data(self.param_groups, self.param_names, is_get_grad=False)
(grads, shapes) = get_data(self.param_groups, self.param_names, is_get_grad=True)
with kargs['timer']('grad.compress', epoch=self.conf.epoch_):
(selected_values, selected_indices, n_bits) = ([], [], [])
for ((idx, param_name), grad) in zip(self.param_names, grads):
_grad = grad.data.view(-1) + self.memory_of_grads[param_name]
compress_ratio = self._get_compress_ratio()
(_selected_values, _selected_indices, _n_bits) = compress_or_quantize(grad=_grad, comm_op=self.comm_op, compressor_fn=self.compressor_fn, compress_ratio=compress_ratio, quantize_level=self.quantize_level, is_biased=self.is_biased)
selected_values.append(_selected_values)
selected_indices.append(_selected_indices)
n_bits.append(_n_bits)
if self.is_compress_op:
(_, nmask) = self.compressor_fn.get_mask(_grad, _selected_indices)
self.memory_of_grads[param_name] = _grad * nmask
if self.mask_momentum:
self.state[self.param_groups[idx]['params'][0]]['momentum_buffer'].mul_(nmask.view(grad.size()))
else:
pass
self.selected_shapes = [len(_value) for _value in selected_values]
flatten_selected_values = flatten(selected_values)
flatten_selected_indices = flatten(selected_indices) if selected_indices[0] is not None else None
(selected_values, selected_indices, n_bits) = (flatten_selected_values, flatten_selected_indices, sum(n_bits))
with kargs['timer']('grad.sync', epoch=self.conf.epoch_):
if self.is_compress_op:
message_to_send = torch.cat([selected_values, selected_indices])
if self.comm_device == 'cpu':
message_to_send = message_to_send.cpu().pin_memory()
synced_message = self.world_aggregator._agg(message_to_send, communication_scheme='all_gather')
else:
message_to_send = selected_values
if self.comm_device == 'cpu':
message_to_send = message_to_send.cpu().pin_memory()
synced_message = self.world_aggregator._agg(message_to_send, op='sum', communication_scheme='all_reduce')
message_size = len(message_to_send)
(synced_message, message_size) = (synced_message, message_size)
with kargs['timer']('grad.recover_info', epoch=self.conf.epoch_):
_message_size = int(message_size / 2)
if self.is_compress_op:
empty_grads = torch.zeros_like(flatten(params))
for message in synced_message:
(q_values, q_indices) = self.compressor_fn.uncompress(message[:_message_size], message[_message_size:], self.selected_shapes, shapes)
empty_grads[q_indices] += q_values
_update = empty_grads / self.n_nodes
else:
_update = synced_message / self.n_nodes
updated_flatten_params = flatten(params).add(-self.param_groups[0]['lr'], recover_device(_update, device=flatten(params).device))
updated_flatten_params = updated_flatten_params
with kargs['timer']('grad.update_model', epoch=self.conf.epoch_):
unflatten(params, updated_flatten_params, shapes)
return n_bits
|
ChocoSGD
|
positive
|
def iter(self, k, reverse=False):
c = self.db.cursor()
c.jump(k)
tup = c.get()
if reverse:
<DeepExtract>
raise NotImplementedError
</DeepExtract>
if not tup:
c.jump_back()
tup = c.get()
return itertools.chain((tup,), it) if tup else it
else:
<DeepExtract>
raise NotImplementedError
</DeepExtract>
return itertools.chain((tup,), it) if tup else it
|
def iter(self, k, reverse=False):
c = self.db.cursor()
c.jump(k)
tup = c.get()
if reverse:
raise NotImplementedError
if not tup:
c.jump_back()
tup = c.get()
return itertools.chain((tup,), it) if tup else it
else:
raise NotImplementedError
return itertools.chain((tup,), it) if tup else it
|
acid
|
positive
|
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None):
super(SABottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
<DeepExtract>
self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, stride=stride, bias=False)
</DeepExtract>
self.bn1 = norm_layer(width)
<DeepExtract>
self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation)
</DeepExtract>
self.bn2 = norm_layer(width)
<DeepExtract>
self.conv3 = nn.Conv2d(width, planes * self.expansion, kernel_size=1, stride=stride, bias=False)
</DeepExtract>
self.bn3 = norm_layer(planes * self.expansion)
self.sa = sa_layer(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
|
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None):
super(SABottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, stride=stride, bias=False)
self.bn1 = norm_layer(width)
self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation)
self.bn2 = norm_layer(width)
self.conv3 = nn.Conv2d(width, planes * self.expansion, kernel_size=1, stride=stride, bias=False)
self.bn3 = norm_layer(planes * self.expansion)
self.sa = sa_layer(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
|
awesome-attention-mechanism-in-cv
|
positive
|
def register(self, obj=None, force=False):
if obj is None:
def wrapper(fn_or_class):
name = fn_or_class.__name__
<DeepExtract>
if name in self._obj_map and (not force):
raise KeyError('An object named "{}" was already registered in "{}" registry'.format(name, self._name))
self._obj_map[name] = fn_or_class
</DeepExtract>
return fn_or_class
return wrapper
name = obj.__name__
<DeepExtract>
if name in self._obj_map and (not force):
raise KeyError('An object named "{}" was already registered in "{}" registry'.format(name, self._name))
self._obj_map[name] = obj
</DeepExtract>
|
def register(self, obj=None, force=False):
if obj is None:
def wrapper(fn_or_class):
name = fn_or_class.__name__
if name in self._obj_map and (not force):
raise KeyError('An object named "{}" was already registered in "{}" registry'.format(name, self._name))
self._obj_map[name] = fn_or_class
return fn_or_class
return wrapper
name = obj.__name__
if name in self._obj_map and (not force):
raise KeyError('An object named "{}" was already registered in "{}" registry'.format(name, self._name))
self._obj_map[name] = obj
</DeepExtract>
|
Dassl.pytorch
|
positive
|
def __init__(self, num_classes, loss, version=1.0, fc_dims=None, dropout_p=None, **kwargs):
super(SqueezeNet, self).__init__()
self.loss = loss
self.feature_dim = 512
if version not in [1.0, 1.1]:
raise ValueError('Unsupported SqueezeNet version {version}:1.0 or 1.1 expected'.format(version=version))
if version == 1.0:
self.features = nn.Sequential(nn.Conv2d(3, 96, kernel_size=7, stride=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(96, 16, 64, 64), Fire(128, 16, 64, 64), Fire(128, 32, 128, 128), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(256, 32, 128, 128), Fire(256, 48, 192, 192), Fire(384, 48, 192, 192), Fire(384, 64, 256, 256), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(512, 64, 256, 256))
else:
self.features = nn.Sequential(nn.Conv2d(3, 64, kernel_size=3, stride=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(64, 16, 64, 64), Fire(128, 16, 64, 64), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(128, 32, 128, 128), Fire(256, 32, 128, 128), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(256, 48, 192, 192), Fire(384, 48, 192, 192), Fire(384, 64, 256, 256), Fire(512, 64, 256, 256))
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
<DeepExtract>
if fc_dims is None:
self.feature_dim = 512
self.fc = None
assert isinstance(fc_dims, (list, tuple)), 'fc_dims must be either list or tuple, but got {}'.format(type(fc_dims))
layers = []
for dim in fc_dims:
layers.append(nn.Linear(512, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
512 = dim
self.feature_dim = fc_dims[-1]
self.fc = nn.Sequential(*layers)
</DeepExtract>
self.classifier = nn.Linear(self.feature_dim, num_classes)
<DeepExtract>
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
</DeepExtract>
|
def __init__(self, num_classes, loss, version=1.0, fc_dims=None, dropout_p=None, **kwargs):
super(SqueezeNet, self).__init__()
self.loss = loss
self.feature_dim = 512
if version not in [1.0, 1.1]:
raise ValueError('Unsupported SqueezeNet version {version}:1.0 or 1.1 expected'.format(version=version))
if version == 1.0:
self.features = nn.Sequential(nn.Conv2d(3, 96, kernel_size=7, stride=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(96, 16, 64, 64), Fire(128, 16, 64, 64), Fire(128, 32, 128, 128), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(256, 32, 128, 128), Fire(256, 48, 192, 192), Fire(384, 48, 192, 192), Fire(384, 64, 256, 256), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(512, 64, 256, 256))
else:
self.features = nn.Sequential(nn.Conv2d(3, 64, kernel_size=3, stride=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(64, 16, 64, 64), Fire(128, 16, 64, 64), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(128, 32, 128, 128), Fire(256, 32, 128, 128), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(256, 48, 192, 192), Fire(384, 48, 192, 192), Fire(384, 64, 256, 256), Fire(512, 64, 256, 256))
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
if fc_dims is None:
self.feature_dim = 512
self.fc = None
assert isinstance(fc_dims, (list, tuple)), 'fc_dims must be either list or tuple, but got {}'.format(type(fc_dims))
layers = []
for dim in fc_dims:
layers.append(nn.Linear(512, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
512 = dim
self.feature_dim = fc_dims[-1]
self.fc = nn.Sequential(*layers)
self.classifier = nn.Linear(self.feature_dim, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
</DeepExtract>
|
deep-person-reid
|
positive
|
def silhouette_samples(X, labels, *, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficient is only defined if number of labels
is 2 ``<= n_labels <= n_samples - 1``.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array-like of shape (n_samples_a, n_samples_a) if metric == "precomputed" or (n_samples_a, n_features) otherwise
An array of pairwise distances between samples, or a feature array.
labels : array-like of shape (n_samples,)
Label values for each sample.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`.
If ``X`` is the distance array itself, use "precomputed" as the metric.
Precomputed distance matrices must have 0 along the diagonal.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array-like of shape (n_samples,)
Silhouette Coefficients for each sample.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<https://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
(X, labels) = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
if metric == 'precomputed':
atol = np.finfo(X.dtype).eps * 100
if np.any(np.abs(np.diagonal(X)) > atol):
raise ValueError('The precomputed distance matrix contains non-zero elements on the diagonal. Use np.fill_diagonal(X, 0).')
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples = len(labels)
label_freqs = np.bincount(labels)
<DeepExtract>
if not 1 < len(le.classes_) < n_samples:
raise ValueError('Number of labels is %d. Valid values are 2 to n_samples - 1 (inclusive)' % len(le.classes_))
</DeepExtract>
kwds['metric'] = metric
reduce_func = functools.partial(_silhouette_reduce, labels=labels, label_freqs=label_freqs)
results = zip(*pairwise_distances_chunked(X, reduce_func=reduce_func, **kwds))
(intra_clust_dists, inter_clust_dists) = results
intra_clust_dists = np.concatenate(intra_clust_dists)
inter_clust_dists = np.concatenate(inter_clust_dists)
denom = (label_freqs - 1).take(labels, mode='clip')
with np.errstate(divide='ignore', invalid='ignore'):
intra_clust_dists /= denom
sil_samples = inter_clust_dists - intra_clust_dists
with np.errstate(divide='ignore', invalid='ignore'):
sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists)
return np.nan_to_num(sil_samples)
|
def silhouette_samples(X, labels, *, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficient is only defined if number of labels
is 2 ``<= n_labels <= n_samples - 1``.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array-like of shape (n_samples_a, n_samples_a) if metric == "precomputed" or (n_samples_a, n_features) otherwise
An array of pairwise distances between samples, or a feature array.
labels : array-like of shape (n_samples,)
Label values for each sample.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`.
If ``X`` is the distance array itself, use "precomputed" as the metric.
Precomputed distance matrices must have 0 along the diagonal.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array-like of shape (n_samples,)
Silhouette Coefficients for each sample.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<https://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
(X, labels) = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
if metric == 'precomputed':
atol = np.finfo(X.dtype).eps * 100
if np.any(np.abs(np.diagonal(X)) > atol):
raise ValueError('The precomputed distance matrix contains non-zero elements on the diagonal. Use np.fill_diagonal(X, 0).')
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples = len(labels)
label_freqs = np.bincount(labels)
if not 1 < len(le.classes_) < n_samples:
raise ValueError('Number of labels is %d. Valid values are 2 to n_samples - 1 (inclusive)' % len(le.classes_))
kwds['metric'] = metric
reduce_func = functools.partial(_silhouette_reduce, labels=labels, label_freqs=label_freqs)
results = zip(*pairwise_distances_chunked(X, reduce_func=reduce_func, **kwds))
(intra_clust_dists, inter_clust_dists) = results
intra_clust_dists = np.concatenate(intra_clust_dists)
inter_clust_dists = np.concatenate(inter_clust_dists)
denom = (label_freqs - 1).take(labels, mode='clip')
with np.errstate(divide='ignore', invalid='ignore'):
intra_clust_dists /= denom
sil_samples = inter_clust_dists - intra_clust_dists
with np.errstate(divide='ignore', invalid='ignore'):
sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists)
return np.nan_to_num(sil_samples)
|
autogoal
|
positive
|
def get(self, request, *args, **kwargs):
allocation_change_obj = get_object_or_404(AllocationChangeRequest, pk=self.kwargs.get('pk'))
allocation_change_form = AllocationChangeForm(initial={'justification': allocation_change_obj.justification, 'end_date_extension': allocation_change_obj.end_date_extension})
allocation_change_form.fields['justification'].disabled = True
if allocation_change_obj.status.name != 'Pending':
allocation_change_form.fields['end_date_extension'].disabled = True
if not self.request.user.is_staff and (not self.request.user.is_superuser):
allocation_change_form.fields['end_date_extension'].disabled = True
note_form = AllocationChangeNoteForm(initial={'notes': allocation_change_obj.notes})
<DeepExtract>
context = super().get_context_data(**kwargs)
pk = self.kwargs.get('pk')
allocation_obj = get_object_or_404(Allocation, pk=pk)
allocation_users = allocation_obj.allocationuser_set.exclude(status__name__in=['Removed']).order_by('user__username')
alloc_attr_set = allocation_obj.get_attribute_set(self.request.user)
attributes_with_usage = [a for a in alloc_attr_set if hasattr(a, 'allocationattributeusage')]
attributes = alloc_attr_set
allocation_changes = allocation_obj.allocationchangerequest_set.all().order_by('-pk')
guage_data = []
invalid_attributes = []
for attribute in attributes_with_usage:
try:
guage_data.append(generate_guauge_data_from_usage(attribute.allocation_attribute_type.name, float(attribute.value), float(attribute.allocationattributeusage.value)))
except ValueError:
logger.error("Allocation attribute '%s' is not an int but has a usage", attribute.allocation_attribute_type.name)
invalid_attributes.append(attribute)
for a in invalid_attributes:
attributes_with_usage.remove(a)
context['allocation_users'] = allocation_users
context['guage_data'] = guage_data
context['attributes_with_usage'] = attributes_with_usage
context['attributes'] = attributes
context['allocation_changes'] = allocation_changes
context['is_allowed_to_update_project'] = allocation_obj.project.has_perm(self.request.user, ProjectPermission.UPDATE)
noteset = allocation_obj.allocationusernote_set
notes = noteset.all() if self.request.user.is_superuser else noteset.filter(is_private=False)
context['notes'] = notes
context['ALLOCATION_ENABLE_ALLOCATION_RENEWAL'] = ALLOCATION_ENABLE_ALLOCATION_RENEWAL
context = context
</DeepExtract>
context['allocation_change_form'] = allocation_change_form
context['note_form'] = note_form
return render(request, self.template_name, context)
|
def get(self, request, *args, **kwargs):
allocation_change_obj = get_object_or_404(AllocationChangeRequest, pk=self.kwargs.get('pk'))
allocation_change_form = AllocationChangeForm(initial={'justification': allocation_change_obj.justification, 'end_date_extension': allocation_change_obj.end_date_extension})
allocation_change_form.fields['justification'].disabled = True
if allocation_change_obj.status.name != 'Pending':
allocation_change_form.fields['end_date_extension'].disabled = True
if not self.request.user.is_staff and (not self.request.user.is_superuser):
allocation_change_form.fields['end_date_extension'].disabled = True
note_form = AllocationChangeNoteForm(initial={'notes': allocation_change_obj.notes})
context = super().get_context_data(**kwargs)
pk = self.kwargs.get('pk')
allocation_obj = get_object_or_404(Allocation, pk=pk)
allocation_users = allocation_obj.allocationuser_set.exclude(status__name__in=['Removed']).order_by('user__username')
alloc_attr_set = allocation_obj.get_attribute_set(self.request.user)
attributes_with_usage = [a for a in alloc_attr_set if hasattr(a, 'allocationattributeusage')]
attributes = alloc_attr_set
allocation_changes = allocation_obj.allocationchangerequest_set.all().order_by('-pk')
guage_data = []
invalid_attributes = []
for attribute in attributes_with_usage:
try:
guage_data.append(generate_guauge_data_from_usage(attribute.allocation_attribute_type.name, float(attribute.value), float(attribute.allocationattributeusage.value)))
except ValueError:
logger.error("Allocation attribute '%s' is not an int but has a usage", attribute.allocation_attribute_type.name)
invalid_attributes.append(attribute)
for a in invalid_attributes:
attributes_with_usage.remove(a)
context['allocation_users'] = allocation_users
context['guage_data'] = guage_data
context['attributes_with_usage'] = attributes_with_usage
context['attributes'] = attributes
context['allocation_changes'] = allocation_changes
context['is_allowed_to_update_project'] = allocation_obj.project.has_perm(self.request.user, ProjectPermission.UPDATE)
noteset = allocation_obj.allocationusernote_set
notes = noteset.all() if self.request.user.is_superuser else noteset.filter(is_private=False)
context['notes'] = notes
context['ALLOCATION_ENABLE_ALLOCATION_RENEWAL'] = ALLOCATION_ENABLE_ALLOCATION_RENEWAL
context = context
context['allocation_change_form'] = allocation_change_form
context['note_form'] = note_form
return render(request, self.template_name, context)
|
coldfront
|
positive
|
def main() -> None:
if not os.path.exists(DIR_LOG):
os.makedirs(DIR_LOG, exist_ok=True)
now = datetime.datetime.now()
now_s = now.strftime('%Y-%m-%d_%H:%M:%S')
fn_log = now_s + '.bugzoo.log'
fn_log = os.path.join(DIR_LOG, fn_log)
def on_error(msg: str, unexpected: bool=False) -> None:
msg = indent(msg, 2)
ftr = '\n\nSee log file for details: {}'.format(fn_log)
hdr = 'An {} occurred during execution:\n\n'
hdr = hdr.format('unexpected error' if unexpected else 'error')
msg = RULE + hdr + msg + ftr
print(msg)
sys.exit(1)
try:
log_formatter = logging.Formatter('%(levelname)s:%(name)s:%(asctime)s: %(message)s')
log_to_file = logging.handlers.WatchedFileHandler(fn_log, mode='w')
log_to_file.setFormatter(log_formatter)
log_to_file.setLevel(logging.DEBUG)
logging.getLogger('bugzoo').setLevel(logging.DEBUG)
logging.getLogger('bugzoo').addHandler(log_to_file)
with BugZooCLI() as app:
app.run()
except BugZooException as err:
logger.exception('An error occurred')
<DeepExtract>
err.message = indent(err.message, 2)
ftr = '\n\nSee log file for details: {}'.format(fn_log)
hdr = 'An {} occurred during execution:\n\n'
hdr = hdr.format('unexpected error' if False else 'error')
err.message = RULE + hdr + err.message + ftr
print(err.message)
sys.exit(1)
</DeepExtract>
except Exception as err:
logger.exception('An unexpected error occurred')
msg = repr(err)
<DeepExtract>
msg = indent(msg, 2)
ftr = '\n\nSee log file for details: {}'.format(fn_log)
hdr = 'An {} occurred during execution:\n\n'
hdr = hdr.format('unexpected error' if True else 'error')
msg = RULE + hdr + msg + ftr
print(msg)
sys.exit(1)
</DeepExtract>
except KeyboardInterrupt:
logger.info('Command cancelled by keyboard interrupt.')
sys.exit(1)
|
def main() -> None:
if not os.path.exists(DIR_LOG):
os.makedirs(DIR_LOG, exist_ok=True)
now = datetime.datetime.now()
now_s = now.strftime('%Y-%m-%d_%H:%M:%S')
fn_log = now_s + '.bugzoo.log'
fn_log = os.path.join(DIR_LOG, fn_log)
def on_error(msg: str, unexpected: bool=False) -> None:
msg = indent(msg, 2)
ftr = '\n\nSee log file for details: {}'.format(fn_log)
hdr = 'An {} occurred during execution:\n\n'
hdr = hdr.format('unexpected error' if unexpected else 'error')
msg = RULE + hdr + msg + ftr
print(msg)
sys.exit(1)
try:
log_formatter = logging.Formatter('%(levelname)s:%(name)s:%(asctime)s: %(message)s')
log_to_file = logging.handlers.WatchedFileHandler(fn_log, mode='w')
log_to_file.setFormatter(log_formatter)
log_to_file.setLevel(logging.DEBUG)
logging.getLogger('bugzoo').setLevel(logging.DEBUG)
logging.getLogger('bugzoo').addHandler(log_to_file)
with BugZooCLI() as app:
app.run()
except BugZooException as err:
logger.exception('An error occurred')
err.message = indent(err.message, 2)
ftr = '\n\nSee log file for details: {}'.format(fn_log)
hdr = 'An {} occurred during execution:\n\n'
hdr = hdr.format('unexpected error' if False else 'error')
err.message = RULE + hdr + err.message + ftr
print(err.message)
sys.exit(1)
except Exception as err:
logger.exception('An unexpected error occurred')
msg = repr(err)
msg = indent(msg, 2)
ftr = '\n\nSee log file for details: {}'.format(fn_log)
hdr = 'An {} occurred during execution:\n\n'
hdr = hdr.format('unexpected error' if True else 'error')
msg = RULE + hdr + msg + ftr
print(msg)
sys.exit(1)
except KeyboardInterrupt:
logger.info('Command cancelled by keyboard interrupt.')
sys.exit(1)
|
BugZoo
|
positive
|
def drawFieldCells(self, cells):
<DeepExtract>
field = self.layout.get('field')
</DeepExtract>
for cell in cells:
((x, y), spriteNames) = cell
if not len(spriteNames):
<DeepExtract>
(char, fg, bg) = self.characters.get(' ', self.defaultChar)
</DeepExtract>
else:
<DeepExtract>
(char, fg, bg) = self.characters.get(spriteNames[0], self.defaultChar)
</DeepExtract>
for spriteName in spriteNames[1:]:
if bg is not None:
break
<DeepExtract>
(_char, _fg, bg) = self.characters.get(spriteName, self.defaultChar)
</DeepExtract>
field.change_cell(x, y, char, TextStyle(fg, bg))
|
def drawFieldCells(self, cells):
field = self.layout.get('field')
for cell in cells:
((x, y), spriteNames) = cell
if not len(spriteNames):
(char, fg, bg) = self.characters.get(' ', self.defaultChar)
else:
(char, fg, bg) = self.characters.get(spriteNames[0], self.defaultChar)
for spriteName in spriteNames[1:]:
if bg is not None:
break
(_char, _fg, bg) = self.characters.get(spriteName, self.defaultChar)
field.change_cell(x, y, char, TextStyle(fg, bg))
|
Asciifarm
|
positive
|
def _create_uniform_grid_exclude_border(n_dim, order):
assert order > 0
assert n_dim > 0
x_data = np.arange(1, order + 1) / (order + 1.0)
<DeepExtract>
[x_data] * n_dim = [np.asarray(arg).reshape(len(arg), -1) for arg in [x_data] * n_dim]
shapes = [arg.shape for arg in [x_data] * n_dim]
size = np.prod(shapes, 0)[0] * np.sum(shapes, 0)[1]
if size > 10 ** 9:
raise MemoryError('Too large sets')
out = [x_data] * n_dim[0]
for arg in [x_data] * n_dim[1:]:
out = np.hstack([np.tile(out, len(arg)).reshape(-1, out.shape[1]), np.tile(arg.T, len(out)).reshape(arg.shape[1], -1).T])
x_data = out
</DeepExtract>
return x_data
|
def _create_uniform_grid_exclude_border(n_dim, order):
assert order > 0
assert n_dim > 0
x_data = np.arange(1, order + 1) / (order + 1.0)
[x_data] * n_dim = [np.asarray(arg).reshape(len(arg), -1) for arg in [x_data] * n_dim]
shapes = [arg.shape for arg in [x_data] * n_dim]
size = np.prod(shapes, 0)[0] * np.sum(shapes, 0)[1]
if size > 10 ** 9:
raise MemoryError('Too large sets')
out = [x_data] * n_dim[0]
for arg in [x_data] * n_dim[1:]:
out = np.hstack([np.tile(out, len(arg)).reshape(-1, out.shape[1]), np.tile(arg.T, len(out)).reshape(arg.shape[1], -1).T])
x_data = out
return x_data
|
deephyper
|
positive
|
def download_file_from_google_drive(file_id, save_path):
"""Download files from google drive.
Reference: https://stackoverflow.com/questions/25010369/wget-curl-large-file-from-google-drive
Args:
file_id (str): File id.
save_path (str): Save path.
"""
session = requests.Session()
URL = 'https://docs.google.com/uc?export=download'
params = {'id': file_id}
response = session.get(URL, params=params, stream=True)
<DeepExtract>
for (key, value) in response.cookies.items():
if key.startswith('download_warning'):
token = value
token = None
</DeepExtract>
if token:
params['confirm'] = token
response = session.get(URL, params=params, stream=True)
response_file_size = session.get(URL, params=params, stream=True, headers={'Range': 'bytes=0-2'})
if 'Content-Range' in response_file_size.headers:
file_size = int(response_file_size.headers['Content-Range'].split('/')[1])
else:
file_size = None
<DeepExtract>
if file_size is not None:
pbar = tqdm(total=math.ceil(file_size / chunk_size), unit='chunk')
readable_file_size = sizeof_fmt(file_size)
else:
pbar = None
with open(save_path, 'wb') as f:
downloaded_size = 0
for chunk in response.iter_content(chunk_size):
downloaded_size += chunk_size
if pbar is not None:
pbar.update(1)
pbar.set_description(f'Download {sizeof_fmt(downloaded_size)} / {readable_file_size}')
if chunk:
f.write(chunk)
if pbar is not None:
pbar.close()
</DeepExtract>
|
def download_file_from_google_drive(file_id, save_path):
"""Download files from google drive.
Reference: https://stackoverflow.com/questions/25010369/wget-curl-large-file-from-google-drive
Args:
file_id (str): File id.
save_path (str): Save path.
"""
session = requests.Session()
URL = 'https://docs.google.com/uc?export=download'
params = {'id': file_id}
response = session.get(URL, params=params, stream=True)
for (key, value) in response.cookies.items():
if key.startswith('download_warning'):
token = value
token = None
if token:
params['confirm'] = token
response = session.get(URL, params=params, stream=True)
response_file_size = session.get(URL, params=params, stream=True, headers={'Range': 'bytes=0-2'})
if 'Content-Range' in response_file_size.headers:
file_size = int(response_file_size.headers['Content-Range'].split('/')[1])
else:
file_size = None
if file_size is not None:
pbar = tqdm(total=math.ceil(file_size / chunk_size), unit='chunk')
readable_file_size = sizeof_fmt(file_size)
else:
pbar = None
with open(save_path, 'wb') as f:
downloaded_size = 0
for chunk in response.iter_content(chunk_size):
downloaded_size += chunk_size
if pbar is not None:
pbar.update(1)
pbar.set_description(f'Download {sizeof_fmt(downloaded_size)} / {readable_file_size}')
if chunk:
f.write(chunk)
if pbar is not None:
pbar.close()
</DeepExtract>
|
BasicSR
|
positive
|
def setup_environment():
"""Perform environment setup work. The default setup is a no-op, but this
function allows the user to specify a Python source file that performs
custom setup work that may be necessary to their computing environment.
"""
custom_module_path = os.environ.get('TORCH_DETECTRON_ENV_MODULE')
if custom_module_path:
<DeepExtract>
module = import_file('fcos_core.utils.env.custom_module', custom_module_path)
assert hasattr(module, 'setup_environment') and callable(module.setup_environment), "Custom environment module defined in {} does not have the required callable attribute 'setup_environment'.".format(custom_module_path)
module.setup_environment()
</DeepExtract>
else:
pass
|
def setup_environment():
"""Perform environment setup work. The default setup is a no-op, but this
function allows the user to specify a Python source file that performs
custom setup work that may be necessary to their computing environment.
"""
custom_module_path = os.environ.get('TORCH_DETECTRON_ENV_MODULE')
if custom_module_path:
module = import_file('fcos_core.utils.env.custom_module', custom_module_path)
assert hasattr(module, 'setup_environment') and callable(module.setup_environment), "Custom environment module defined in {} does not have the required callable attribute 'setup_environment'.".format(custom_module_path)
module.setup_environment()
else:
pass
|
EveryPixelMatters
|
positive
|
def actionNewGroup_triggered(self):
if self.backend.sessions is None:
return
self.backend.sessions.newGroup()
oldtext = self.groupComboBox.currentText()
<DeepExtract>
groupsById = self.backend.sessions.groupsById
box = self.groupComboBox
box.clear()
index = 0
setindex = None
for group in sorted(groupsById.keys()):
name = groupsById[group].name
box.addItem(name)
if name == oldtext:
setindex = index
index += 1
if setindex is not None:
box.setCurrentIndex(setindex)
box.setEnabled(True)
</DeepExtract>
self.allGroupsFigureDirty = True
<DeepExtract>
current = self.tabWidget.currentWidget().objectName()
if current == 'allGroupsTab':
self.updateAssignPlot()
elif current == 'oneGroupTab':
self.updateGroupInfo()
elif current == 'compareTab':
self.updateCompareTab()
</DeepExtract>
|
def actionNewGroup_triggered(self):
if self.backend.sessions is None:
return
self.backend.sessions.newGroup()
oldtext = self.groupComboBox.currentText()
groupsById = self.backend.sessions.groupsById
box = self.groupComboBox
box.clear()
index = 0
setindex = None
for group in sorted(groupsById.keys()):
name = groupsById[group].name
box.addItem(name)
if name == oldtext:
setindex = index
index += 1
if setindex is not None:
box.setCurrentIndex(setindex)
box.setEnabled(True)
self.allGroupsFigureDirty = True
current = self.tabWidget.currentWidget().objectName()
if current == 'allGroupsTab':
self.updateAssignPlot()
elif current == 'oneGroupTab':
self.updateGroupInfo()
elif current == 'compareTab':
self.updateCompareTab()
</DeepExtract>
|
combinato
|
positive
|
def Process(self, start_token):
"""Runs the pass on a token stream.
Args:
start_token: The first token in the stream.
"""
if start_token is None:
return
<DeepExtract>
def IsScopeToken(token):
return token.type is javascripttokens.JavaScriptTokenType.IDENTIFIER and token.string == 'goog.scope'
scope_tokens = [t for t in start_token if IsScopeToken(t)]
for token in scope_tokens:
scope_context = token.metadata.context
if not (scope_context.type == ecmametadatapass.EcmaContext.STATEMENT and scope_context.parent.type == ecmametadatapass.EcmaContext.ROOT):
self._MaybeReportError(error.Error(errors.INVALID_USE_OF_GOOG_SCOPE, 'goog.scope call not in global scope', token))
for token in scope_tokens[1:]:
self._MaybeReportError(error.Error(errors.EXTRA_GOOG_SCOPE_USAGE, 'More than one goog.scope call in file.', token))
</DeepExtract>
if self._closurized_namespaces:
context = start_token.metadata.context
root_context = context.GetRoot()
<DeepExtract>
assert root_context.type is ecmametadatapass.EcmaContext.ROOT
global_alias_map = {}
for context in root_context.children:
if context.type == ecmametadatapass.EcmaContext.STATEMENT:
for statement_child in context.children:
if statement_child.type == ecmametadatapass.EcmaContext.VAR:
match = scopeutil.MatchModuleAlias(statement_child)
if match:
symbol = match[1]
if scopeutil.IsInClosurizedNamespace(symbol, self._closurized_namespaces):
global_alias_map[match[0]] = symbol
for context in root_context.children:
self._ProcessBlock(context, global_alias_map)
</DeepExtract>
|
def Process(self, start_token):
"""Runs the pass on a token stream.
Args:
start_token: The first token in the stream.
"""
if start_token is None:
return
def IsScopeToken(token):
return token.type is javascripttokens.JavaScriptTokenType.IDENTIFIER and token.string == 'goog.scope'
scope_tokens = [t for t in start_token if IsScopeToken(t)]
for token in scope_tokens:
scope_context = token.metadata.context
if not (scope_context.type == ecmametadatapass.EcmaContext.STATEMENT and scope_context.parent.type == ecmametadatapass.EcmaContext.ROOT):
self._MaybeReportError(error.Error(errors.INVALID_USE_OF_GOOG_SCOPE, 'goog.scope call not in global scope', token))
for token in scope_tokens[1:]:
self._MaybeReportError(error.Error(errors.EXTRA_GOOG_SCOPE_USAGE, 'More than one goog.scope call in file.', token))
if self._closurized_namespaces:
context = start_token.metadata.context
root_context = context.GetRoot()
assert root_context.type is ecmametadatapass.EcmaContext.ROOT
global_alias_map = {}
for context in root_context.children:
if context.type == ecmametadatapass.EcmaContext.STATEMENT:
for statement_child in context.children:
if statement_child.type == ecmametadatapass.EcmaContext.VAR:
match = scopeutil.MatchModuleAlias(statement_child)
if match:
symbol = match[1]
if scopeutil.IsInClosurizedNamespace(symbol, self._closurized_namespaces):
global_alias_map[match[0]] = symbol
for context in root_context.children:
self._ProcessBlock(context, global_alias_map)
</DeepExtract>
|
closure-linter
|
positive
|
def getAllCas(self):
"""
**Description**
Used to retrieve the list of :code:`(groupId, caContent)` pair for this discovery information. The retrieved
pairs could be from different Greengrass groups. This is designed for users who want to iterate through all
available cores/groups/CAs at the same time, regardless of which group those CAs belong to.
**Syntax**
.. code:: python
myDiscoveryInfo.getAllCas()
**Parameters**
None
**Returns**
List of :code:`(groupId, caContent)` string pair, where :code:`caContent` is the CA content string and
:code:`groupId` is the group id that this CA belongs to.
"""
<DeepExtract>
groups_dict = self.toObjectAtGroupLevel()
group_list = list(groups_dict.values())
</DeepExtract>
ca_list = list()
for group in group_list:
for ca in group.caList:
ca_list.append((group.groupId, ca))
return ca_list
|
def getAllCas(self):
"""
**Description**
Used to retrieve the list of :code:`(groupId, caContent)` pair for this discovery information. The retrieved
pairs could be from different Greengrass groups. This is designed for users who want to iterate through all
available cores/groups/CAs at the same time, regardless of which group those CAs belong to.
**Syntax**
.. code:: python
myDiscoveryInfo.getAllCas()
**Parameters**
None
**Returns**
List of :code:`(groupId, caContent)` string pair, where :code:`caContent` is the CA content string and
:code:`groupId` is the group id that this CA belongs to.
"""
groups_dict = self.toObjectAtGroupLevel()
group_list = list(groups_dict.values())
ca_list = list()
for group in group_list:
for ca in group.caList:
ca_list.append((group.groupId, ca))
return ca_list
|
aws-iot-device-sdk-python
|
positive
|
def reset(self):
self.phase = random.randint(0, self.phaselen)
self.time = 0
self.counter = 0
<DeepExtract>
if self.phase is None:
self.phase = self.phase
if self.phase > self.phaselen:
self.phase = 0
pos = np.copy(self.trajectory.qpos[self.phase * self.simrate])
pos[0] *= self.speed
pos[0] += (self.trajectory.qpos[-1, 0] - self.trajectory.qpos[0, 0]) * self.counter * self.speed
pos[1] = 0
vel = np.copy(self.trajectory.qvel[self.phase * self.simrate])
vel[0] *= self.speed
(qpos, qvel) = (pos, vel)
</DeepExtract>
self.sim.set_qpos(qpos)
self.sim.set_qvel(qvel)
self.cassie_state = self.sim.step_pd(self.u)
self.speed = random.randint(0, 10) / 10
<DeepExtract>
if self.phase is None:
self.phase = self.phase
if self.phase > self.phaselen:
self.phase = 0
pos = np.copy(self.trajectory.qpos[self.phase * self.simrate])
pos[0] *= self.speed
pos[0] += (self.trajectory.qpos[-1, 0] - self.trajectory.qpos[0, 0]) * self.counter * self.speed
pos[1] = 0
vel = np.copy(self.trajectory.qvel[self.phase * self.simrate])
vel[0] *= self.speed
(ref_pos, ref_vel) = (pos, vel)
</DeepExtract>
self.prev_action = ref_pos[self.pos_idx]
return self.get_full_state()
|
def reset(self):
self.phase = random.randint(0, self.phaselen)
self.time = 0
self.counter = 0
if self.phase is None:
self.phase = self.phase
if self.phase > self.phaselen:
self.phase = 0
pos = np.copy(self.trajectory.qpos[self.phase * self.simrate])
pos[0] *= self.speed
pos[0] += (self.trajectory.qpos[-1, 0] - self.trajectory.qpos[0, 0]) * self.counter * self.speed
pos[1] = 0
vel = np.copy(self.trajectory.qvel[self.phase * self.simrate])
vel[0] *= self.speed
(qpos, qvel) = (pos, vel)
self.sim.set_qpos(qpos)
self.sim.set_qvel(qvel)
self.cassie_state = self.sim.step_pd(self.u)
self.speed = random.randint(0, 10) / 10
if self.phase is None:
self.phase = self.phase
if self.phase > self.phaselen:
self.phase = 0
pos = np.copy(self.trajectory.qpos[self.phase * self.simrate])
pos[0] *= self.speed
pos[0] += (self.trajectory.qpos[-1, 0] - self.trajectory.qpos[0, 0]) * self.counter * self.speed
pos[1] = 0
vel = np.copy(self.trajectory.qvel[self.phase * self.simrate])
vel[0] *= self.speed
(ref_pos, ref_vel) = (pos, vel)
self.prev_action = ref_pos[self.pos_idx]
return self.get_full_state()
|
apex
|
positive
|
def _test_clear_input(self):
"""
Tests that a user can clear the search filter to unfilter the results.
"""
<DeepExtract>
search_bar = self.page.q(css='#search-course-list')
self.assertTrue(search_bar.present)
self.clear_all_filters()
self.check_cleared()
search_input = self.driver.find_element_by_id('search-course-list')
search_input.send_keys(Keys.CONTROL, 'a')
search_input.send_keys('search')
clear = self.page.q(css='button.clear')
self.assertTrue(clear.present)
search_input.send_keys(Keys.ENTER)
search_input = self.page.q(css='#search-course-list')
self.assertEqual(search_input.attrs('value'), ['search'])
EmptyPromise(lambda : self.page.q(css='ul.active-filters').present, 'Search performed').fulfill()
active_filters = self.page.q(css='ul.active-filters')
self.assertTrue(active_filters.present)
search_active_filter = self.page.q(css='ul.active-filters li.filter-text_search')
self.assertTrue('search' in search_active_filter.text[0])
course_ids = self.page.q(css='.course-list .course-id')
self.assertFalse(course_ids.present)
alert = self.page.q(css='.list-main .alert-information')
self.assertTrue(alert.present)
self.assertTrue('No courses matched your criteria' in alert.text[0])
</DeepExtract>
clear = self.page.q(css='button.clear')
self.assertTrue(clear.present)
clear.first.click()
<DeepExtract>
EmptyPromise(lambda : self.driver.find_element_by_id('search-course-list').get_attribute('value') != 'search', 'Search input cleared').fulfill()
search_input = self.driver.find_element_by_id('search-course-list')
self.assertNotEqual(search_input.get_attribute('value'), 'search')
EmptyPromise(lambda : not self.page.q(css='ul.active-filters').present, 'Active filters hidden').fulfill()
active_filters = self.page.q(css='ul.active-filters')
self.assertFalse(active_filters.present)
EmptyPromise(lambda : self.page.q(css='.course-list .course-id').present, 'Table unfiltered').fulfill()
course_ids = self.page.q(css='.course-list .course-id')
self.assertTrue(course_ids.present)
self.assertIn(TEST_COURSE_ID, course_ids.text)
</DeepExtract>
|
def _test_clear_input(self):
"""
Tests that a user can clear the search filter to unfilter the results.
"""
search_bar = self.page.q(css='#search-course-list')
self.assertTrue(search_bar.present)
self.clear_all_filters()
self.check_cleared()
search_input = self.driver.find_element_by_id('search-course-list')
search_input.send_keys(Keys.CONTROL, 'a')
search_input.send_keys('search')
clear = self.page.q(css='button.clear')
self.assertTrue(clear.present)
search_input.send_keys(Keys.ENTER)
search_input = self.page.q(css='#search-course-list')
self.assertEqual(search_input.attrs('value'), ['search'])
EmptyPromise(lambda : self.page.q(css='ul.active-filters').present, 'Search performed').fulfill()
active_filters = self.page.q(css='ul.active-filters')
self.assertTrue(active_filters.present)
search_active_filter = self.page.q(css='ul.active-filters li.filter-text_search')
self.assertTrue('search' in search_active_filter.text[0])
course_ids = self.page.q(css='.course-list .course-id')
self.assertFalse(course_ids.present)
alert = self.page.q(css='.list-main .alert-information')
self.assertTrue(alert.present)
self.assertTrue('No courses matched your criteria' in alert.text[0])
clear = self.page.q(css='button.clear')
self.assertTrue(clear.present)
clear.first.click()
EmptyPromise(lambda : self.driver.find_element_by_id('search-course-list').get_attribute('value') != 'search', 'Search input cleared').fulfill()
search_input = self.driver.find_element_by_id('search-course-list')
self.assertNotEqual(search_input.get_attribute('value'), 'search')
EmptyPromise(lambda : not self.page.q(css='ul.active-filters').present, 'Active filters hidden').fulfill()
active_filters = self.page.q(css='ul.active-filters')
self.assertFalse(active_filters.present)
EmptyPromise(lambda : self.page.q(css='.course-list .course-id').present, 'Table unfiltered').fulfill()
course_ids = self.page.q(css='.course-list .course-id')
self.assertTrue(course_ids.present)
self.assertIn(TEST_COURSE_ID, course_ids.text)
</DeepExtract>
|
edx-analytics-dashboard
|
positive
|
def images_preprocessing_avabox(imgs, split, crop_size, spatial_shift_pos, flip_flag=''):
(height, width, _) = imgs[0].shape
if split == 1:
imgs = [cv2.resize(image, (crop_size, crop_size), interpolation=getattr(cv2, cfg.INTERPOLATION)).astype(np.float32) for image in imgs]
if cfg.AVABOX.RANDOM_CROP:
(imgs, _) = imgproc.random_short_side_scale_jitter_list(imgs, min_size=cfg.TRAIN.JITTER_SCALES[0], max_size=cfg.TRAIN.JITTER_SCALES[1])
(imgs, _) = imgproc.random_crop_list(imgs, crop_size, order='HWC')
if flip_flag == '':
(imgs, _) = imgproc.horizontal_flip_list(0.5, imgs, order='HWC')
elif flip_flag == 'y':
(imgs, _) = imgproc.horizontal_flip_list(0.5, imgs, order='HWC', force_flip=True)
else:
assert flip_flag == 'n'
else:
imgs = [cv2.resize(image, (crop_size, crop_size), interpolation=getattr(cv2, cfg.INTERPOLATION)).astype(np.float32) for image in imgs]
if cfg.AVA.FORCE_TEST_FLIP and cfg.DATASET in ['ava', 'avabox']:
(imgs, _) = imgproc.horizontal_flip_list(0.5, imgs, order='HWC', force_flip=True)
imgs = [imgproc.HWC2CHW(img) for img in imgs]
imgs = [img / 255.0 for img in imgs]
imgs = [np.ascontiguousarray(img.reshape((3, crop_size, crop_size))).astype(np.float32) for img in imgs]
if cfg.TRAIN.USE_COLOR_AUGMENTATION and split == 1:
<DeepExtract>
if not cfg.TRAIN.PCA_JITTER_ONLY:
imgs = imgproc.color_jitter_list(imgs, img_brightness=0.4, img_contrast=0.4, img_saturation=0.4)
imgs = imgproc.lighting_list(imgs, alphastd=0.1, eigval=PCA['eigval'], eigvec=np.array(PCA['eigvec']).astype(np.float32))
imgs = imgs
</DeepExtract>
imgs = [imgproc.color_normalization(img, DATA_MEAN, DATA_STD) for img in imgs]
imgs = np.concatenate([np.expand_dims(img, axis=1) for img in imgs], axis=1)
if not cfg.MODEL.USE_BGR:
imgs = imgs[::-1, ...]
return imgs
|
def images_preprocessing_avabox(imgs, split, crop_size, spatial_shift_pos, flip_flag=''):
(height, width, _) = imgs[0].shape
if split == 1:
imgs = [cv2.resize(image, (crop_size, crop_size), interpolation=getattr(cv2, cfg.INTERPOLATION)).astype(np.float32) for image in imgs]
if cfg.AVABOX.RANDOM_CROP:
(imgs, _) = imgproc.random_short_side_scale_jitter_list(imgs, min_size=cfg.TRAIN.JITTER_SCALES[0], max_size=cfg.TRAIN.JITTER_SCALES[1])
(imgs, _) = imgproc.random_crop_list(imgs, crop_size, order='HWC')
if flip_flag == '':
(imgs, _) = imgproc.horizontal_flip_list(0.5, imgs, order='HWC')
elif flip_flag == 'y':
(imgs, _) = imgproc.horizontal_flip_list(0.5, imgs, order='HWC', force_flip=True)
else:
assert flip_flag == 'n'
else:
imgs = [cv2.resize(image, (crop_size, crop_size), interpolation=getattr(cv2, cfg.INTERPOLATION)).astype(np.float32) for image in imgs]
if cfg.AVA.FORCE_TEST_FLIP and cfg.DATASET in ['ava', 'avabox']:
(imgs, _) = imgproc.horizontal_flip_list(0.5, imgs, order='HWC', force_flip=True)
imgs = [imgproc.HWC2CHW(img) for img in imgs]
imgs = [img / 255.0 for img in imgs]
imgs = [np.ascontiguousarray(img.reshape((3, crop_size, crop_size))).astype(np.float32) for img in imgs]
if cfg.TRAIN.USE_COLOR_AUGMENTATION and split == 1:
if not cfg.TRAIN.PCA_JITTER_ONLY:
imgs = imgproc.color_jitter_list(imgs, img_brightness=0.4, img_contrast=0.4, img_saturation=0.4)
imgs = imgproc.lighting_list(imgs, alphastd=0.1, eigval=PCA['eigval'], eigvec=np.array(PCA['eigvec']).astype(np.float32))
imgs = imgs
imgs = [imgproc.color_normalization(img, DATA_MEAN, DATA_STD) for img in imgs]
imgs = np.concatenate([np.expand_dims(img, axis=1) for img in imgs], axis=1)
if not cfg.MODEL.USE_BGR:
imgs = imgs[::-1, ...]
return imgs
|
CRCNN-Action
|
positive
|
def _pyfs_read(filename, offset, size):
"""_pyfs_read(filename, offset, size) -> size bytes starting at offset,
as a string"""
<DeepExtract>
components = filename.split('/')
if len(components) != 2 or components[0] != '':
functions = None
functions = _pyfs_files.get(components[1])
</DeepExtract>
if functions is None:
return None
(do_open, do_read) = functions
return do_read(offset, size)
|
def _pyfs_read(filename, offset, size):
"""_pyfs_read(filename, offset, size) -> size bytes starting at offset,
as a string"""
components = filename.split('/')
if len(components) != 2 or components[0] != '':
functions = None
functions = _pyfs_files.get(components[1])
if functions is None:
return None
(do_open, do_read) = functions
return do_read(offset, size)
|
bits
|
positive
|
def transform(self, data_pack: DataPack, verbose: int=1) -> DataPack:
"""
Apply transformation on data, create `tri-letter` representation.
:param data_pack: Inputs to be preprocessed.
:param verbose: Verbosity.
:return: Transformed data as :class:`DataPack` object.
"""
data_pack = data_pack.copy()
<DeepExtract>
units_ = [units.Tokenize(), units.Lowercase(), units.PuncRemoval(), units.StopRemoval(), units.NgramLetter()]
</DeepExtract>
assert len(units_) == 5, 'Must have 5 pre-processing step in DSSM '
if self._with_word_hashing:
term_index = self._context['vocab_unit'].state['term_index']
units_.append(units.WordHashing(term_index))
func = chain_transform(units_)
data_pack.apply_on_text(func, inplace=True, verbose=verbose)
return data_pack
|
def transform(self, data_pack: DataPack, verbose: int=1) -> DataPack:
"""
Apply transformation on data, create `tri-letter` representation.
:param data_pack: Inputs to be preprocessed.
:param verbose: Verbosity.
:return: Transformed data as :class:`DataPack` object.
"""
data_pack = data_pack.copy()
units_ = [units.Tokenize(), units.Lowercase(), units.PuncRemoval(), units.StopRemoval(), units.NgramLetter()]
assert len(units_) == 5, 'Must have 5 pre-processing step in DSSM '
if self._with_word_hashing:
term_index = self._context['vocab_unit'].state['term_index']
units_.append(units.WordHashing(term_index))
func = chain_transform(units_)
data_pack.apply_on_text(func, inplace=True, verbose=verbose)
return data_pack
|
EMNLP2020
|
positive
|
def mask_aug(mask, config):
"""
mask: uint8 (HxW), 0 (bg), 128 (ignore), 255 (fg)
"""
(oldh, oldw) = mask.shape
if config['flip'] and np.random.rand() > 0.5:
mask = mask[:, ::-1]
assert config['scale'][0] <= config['scale'][1]
if not (config['scale'][0] == 1 and config['scale'][0] == 1):
scale = np.random.uniform(config['scale'][0], config['scale'][1])
(newh, neww) = (int(scale * oldh), int(scale * oldw))
mask = cv2.resize(mask, (neww, newh), interpolation=cv2.INTER_NEAREST)
bbox = [(neww - oldw) // 2, (newh - oldh) // 2, oldw, oldh]
<DeepExtract>
need_squeeze = False
if len(mask.shape) == 2:
mask = mask[:, :, np.newaxis]
need_squeeze = True
assert len((0,)) == mask.shape[2]
(x, y, w, h) = bbox
(x, y, w, h) = (int(x), int(y), int(w), int(h))
(H, W) = mask.shape[:2]
output = np.tile(np.array((0,)), (h, w, 1)).astype(mask.dtype)
if bbox_iou((x, y, x + w, y + h), (0, 0, W, H)) > 0:
output[max(-y, 0):min(H - y, h), max(-x, 0):min(W - x, w), :] = mask[max(y, 0):min(y + h, H), max(x, 0):min(x + w, W), :]
if need_squeeze:
output = np.squeeze(output)
mask = output
</DeepExtract>
return mask
|
def mask_aug(mask, config):
"""
mask: uint8 (HxW), 0 (bg), 128 (ignore), 255 (fg)
"""
(oldh, oldw) = mask.shape
if config['flip'] and np.random.rand() > 0.5:
mask = mask[:, ::-1]
assert config['scale'][0] <= config['scale'][1]
if not (config['scale'][0] == 1 and config['scale'][0] == 1):
scale = np.random.uniform(config['scale'][0], config['scale'][1])
(newh, neww) = (int(scale * oldh), int(scale * oldw))
mask = cv2.resize(mask, (neww, newh), interpolation=cv2.INTER_NEAREST)
bbox = [(neww - oldw) // 2, (newh - oldh) // 2, oldw, oldh]
need_squeeze = False
if len(mask.shape) == 2:
mask = mask[:, :, np.newaxis]
need_squeeze = True
assert len((0,)) == mask.shape[2]
(x, y, w, h) = bbox
(x, y, w, h) = (int(x), int(y), int(w), int(h))
(H, W) = mask.shape[:2]
output = np.tile(np.array((0,)), (h, w, 1)).astype(mask.dtype)
if bbox_iou((x, y, x + w, y + h), (0, 0, W, H)) > 0:
output[max(-y, 0):min(H - y, h), max(-x, 0):min(W - x, w), :] = mask[max(y, 0):min(y + h, H), max(x, 0):min(x + w, W), :]
if need_squeeze:
output = np.squeeze(output)
mask = output
return mask
|
deocclusion
|
positive
|
def generate_report_data(self, params, kwargs, sox_data, report_fmt, node, root_node):
def get_accessed_records(user):
columns = ['record_uid', 'ip_address', 'keeper_version']
rq_filter = {'username': user, 'audit_event_type': 'open_record'}
from keepercommander.commands.aram import API_EVENT_SUMMARY_ROW_LIMIT
rq = {'command': 'get_audit_event_reports', 'report_type': 'span', 'scope': 'enterprise', 'aggregate': ['last_created'], 'limit': API_EVENT_SUMMARY_ROW_LIMIT, 'filter': rq_filter, 'columns': columns}
records_accessed = dict()
def update_records_accessed(events):
for event in events:
r_uid = event.get('record_uid')
if r_uid not in records_accessed:
records_accessed.update({r_uid: event})
def get_events(max_ts):
rq_filter['created'] = {'max': max_ts}
rs = api.communicate(params, rq)
return rs.get('audit_event_overview_report_rows')
max_ts = int(datetime.datetime.now().timestamp())
while True:
<DeepExtract>
rq_filter['created'] = {'max': max_ts}
rs = api.communicate(params, rq)
events = rs.get('audit_event_overview_report_rows')
</DeepExtract>
<DeepExtract>
for event in events:
r_uid = event.get('record_uid')
if r_uid not in records_accessed:
records_accessed.update({r_uid: event})
</DeepExtract>
if len(events) >= API_EVENT_SUMMARY_ROW_LIMIT:
earliest_event = events[-1]
max_ts = int(earliest_event.get('last_created'))
else:
break
return records_accessed
def fill_table(access_events):
table = []
for rec in access_events.values():
rec_uid = rec.get('record_uid')
sox_rec = sox_data.get_records().get(rec_uid)
rec_info = sox_rec.data if sox_rec else {}
rec_owner = sox_data.get_record_owner(rec_uid)
row = [rec_uid, rec_info.get('title'), rec_info.get('url', '').rstrip('/'), rec_owner and rec_owner.email, rec.get('ip_address'), rec.get('keeper_version'), datetime.datetime.fromtimestamp(int(rec.get('last_created')))]
table.append(row)
return table
report_data = []
user_lookup = {user.get('enterprise_user_id'): user.get('username') for user in params.enterprise.get('users')}
username_or_id = kwargs.get('user')
username = user_lookup.get(int(username_or_id)) if username_or_id.isdigit() else username_or_id
try:
<DeepExtract>
columns = ['record_uid', 'ip_address', 'keeper_version']
rq_filter = {'username': username, 'audit_event_type': 'open_record'}
from keepercommander.commands.aram import API_EVENT_SUMMARY_ROW_LIMIT
rq = {'command': 'get_audit_event_reports', 'report_type': 'span', 'scope': 'enterprise', 'aggregate': ['last_created'], 'limit': API_EVENT_SUMMARY_ROW_LIMIT, 'filter': rq_filter, 'columns': columns}
records_accessed = dict()
def update_records_accessed(events):
for event in events:
r_uid = event.get('record_uid')
if r_uid not in records_accessed:
records_accessed.update({r_uid: event})
def get_events(max_ts):
rq_filter['created'] = {'max': max_ts}
rs = api.communicate(params, rq)
accessed = rs.get('audit_event_overview_report_rows')
max_ts = int(datetime.datetime.now().timestamp())
while True:
events = get_events(max_ts)
update_records_accessed(events)
if len(events) >= API_EVENT_SUMMARY_ROW_LIMIT:
earliest_event = events[-1]
max_ts = int(earliest_event.get('last_created'))
else:
break
accessed = records_accessed
</DeepExtract>
<DeepExtract>
table = []
for rec in accessed.values():
rec_uid = rec.get('record_uid')
sox_rec = sox_data.get_records().get(rec_uid)
rec_info = sox_rec.data if sox_rec else {}
rec_owner = sox_data.get_record_owner(rec_uid)
row = [rec_uid, rec_info.get('title'), rec_info.get('url', '').rstrip('/'), rec_owner and rec_owner.email, rec.get('ip_address'), rec.get('keeper_version'), datetime.datetime.fromtimestamp(int(rec.get('last_created')))]
table.append(row)
report_data = table
</DeepExtract>
except Error as e:
logging.warning(f'User {username_or_id} not found, error = "{e.message}"')
return report_data
|
def generate_report_data(self, params, kwargs, sox_data, report_fmt, node, root_node):
def get_accessed_records(user):
columns = ['record_uid', 'ip_address', 'keeper_version']
rq_filter = {'username': user, 'audit_event_type': 'open_record'}
from keepercommander.commands.aram import API_EVENT_SUMMARY_ROW_LIMIT
rq = {'command': 'get_audit_event_reports', 'report_type': 'span', 'scope': 'enterprise', 'aggregate': ['last_created'], 'limit': API_EVENT_SUMMARY_ROW_LIMIT, 'filter': rq_filter, 'columns': columns}
records_accessed = dict()
def update_records_accessed(events):
for event in events:
r_uid = event.get('record_uid')
if r_uid not in records_accessed:
records_accessed.update({r_uid: event})
def get_events(max_ts):
rq_filter['created'] = {'max': max_ts}
rs = api.communicate(params, rq)
return rs.get('audit_event_overview_report_rows')
max_ts = int(datetime.datetime.now().timestamp())
while True:
rq_filter['created'] = {'max': max_ts}
rs = api.communicate(params, rq)
events = rs.get('audit_event_overview_report_rows')
for event in events:
r_uid = event.get('record_uid')
if r_uid not in records_accessed:
records_accessed.update({r_uid: event})
if len(events) >= API_EVENT_SUMMARY_ROW_LIMIT:
earliest_event = events[-1]
max_ts = int(earliest_event.get('last_created'))
else:
break
return records_accessed
def fill_table(access_events):
table = []
for rec in access_events.values():
rec_uid = rec.get('record_uid')
sox_rec = sox_data.get_records().get(rec_uid)
rec_info = sox_rec.data if sox_rec else {}
rec_owner = sox_data.get_record_owner(rec_uid)
row = [rec_uid, rec_info.get('title'), rec_info.get('url', '').rstrip('/'), rec_owner and rec_owner.email, rec.get('ip_address'), rec.get('keeper_version'), datetime.datetime.fromtimestamp(int(rec.get('last_created')))]
table.append(row)
return table
report_data = []
user_lookup = {user.get('enterprise_user_id'): user.get('username') for user in params.enterprise.get('users')}
username_or_id = kwargs.get('user')
username = user_lookup.get(int(username_or_id)) if username_or_id.isdigit() else username_or_id
try:
columns = ['record_uid', 'ip_address', 'keeper_version']
rq_filter = {'username': username, 'audit_event_type': 'open_record'}
from keepercommander.commands.aram import API_EVENT_SUMMARY_ROW_LIMIT
rq = {'command': 'get_audit_event_reports', 'report_type': 'span', 'scope': 'enterprise', 'aggregate': ['last_created'], 'limit': API_EVENT_SUMMARY_ROW_LIMIT, 'filter': rq_filter, 'columns': columns}
records_accessed = dict()
def update_records_accessed(events):
for event in events:
r_uid = event.get('record_uid')
if r_uid not in records_accessed:
records_accessed.update({r_uid: event})
def get_events(max_ts):
rq_filter['created'] = {'max': max_ts}
rs = api.communicate(params, rq)
accessed = rs.get('audit_event_overview_report_rows')
max_ts = int(datetime.datetime.now().timestamp())
while True:
events = get_events(max_ts)
update_records_accessed(events)
if len(events) >= API_EVENT_SUMMARY_ROW_LIMIT:
earliest_event = events[-1]
max_ts = int(earliest_event.get('last_created'))
else:
break
accessed = records_accessed
table = []
for rec in accessed.values():
rec_uid = rec.get('record_uid')
sox_rec = sox_data.get_records().get(rec_uid)
rec_info = sox_rec.data if sox_rec else {}
rec_owner = sox_data.get_record_owner(rec_uid)
row = [rec_uid, rec_info.get('title'), rec_info.get('url', '').rstrip('/'), rec_owner and rec_owner.email, rec.get('ip_address'), rec.get('keeper_version'), datetime.datetime.fromtimestamp(int(rec.get('last_created')))]
table.append(row)
report_data = table
except Error as e:
logging.warning(f'User {username_or_id} not found, error = "{e.message}"')
return report_data
|
Commander
|
positive
|
@flaky(max_runs=3)
def test_given_simple_causal_chain_with_linear_relationships_when_attribute_anomaly_scores_with_it_score_then_returns_qualitatively_correct_results():
num_training_samples = 5000
X0 = np.random.normal(0, 1, num_training_samples)
X1 = X0 + np.random.normal(0, 1, num_training_samples)
X2 = X1 + np.random.normal(0, 1, num_training_samples)
X3 = X2 + np.random.normal(0, 1, num_training_samples)
training_data = pd.DataFrame({'X0': X0, 'X1': X1, 'X2': X2, 'X3': X3})
causal_model = InvertibleStructuralCausalModel(nx.DiGraph([('X0', 'X1'), ('X1', 'X2'), ('X2', 'X3')]))
auto.assign_causal_mechanisms(causal_model, training_data, auto.AssignmentQuality.GOOD)
<DeepExtract>
pass
</DeepExtract>
anomaly_data = pd.DataFrame({'X0': np.array([0, 10, 10]), 'X1': np.array([10, 10, 10]), 'X2': np.array([10, 10, 10]), 'X3': np.array([10, 10, 20])})
scores = attribute_anomalies(causal_model, 'X3', anomaly_data, anomaly_scorer=MedianCDFQuantileScorer(), num_distribution_samples=num_training_samples, attribute_mean_deviation=False)
def total_anomaly_score(training_data, anomaly_sample):
distribution_samples = training_data['X3'].to_numpy()
anomaly_scorer = MedianCDFQuantileScorer()
anomaly_scorer.fit(distribution_samples)
return -np.log(_relative_frequency(anomaly_scorer.score(distribution_samples) >= anomaly_scorer.score(anomaly_sample)))
assert scores['X0'][0] < 0.5
assert scores['X0'][1] > 8
assert scores['X0'][2] > 4
assert scores['X1'][0] > 8
assert scores['X1'][1] < 0.5
assert scores['X1'][2] < 0.5
assert np.all(scores['X2'] < 0.5)
assert np.all(scores['X3'][:2] < 0.5)
assert scores['X3'][2] > 4
assert scores['X0'][0] + scores['X1'][0] + scores['X2'][0] + scores['X3'][0] == approx(total_anomaly_score(training_data, anomaly_data['X3'].to_numpy()[0]), abs=0.001)
assert scores['X0'][1] + scores['X1'][1] + scores['X2'][1] + scores['X3'][1] == approx(total_anomaly_score(training_data, anomaly_data['X3'].to_numpy()[1]), abs=0.001)
assert scores['X0'][2] + scores['X1'][2] + scores['X2'][2] + scores['X3'][2] == approx(total_anomaly_score(training_data, anomaly_data['X3'].to_numpy()[2]), abs=0.001)
|
@flaky(max_runs=3)
def test_given_simple_causal_chain_with_linear_relationships_when_attribute_anomaly_scores_with_it_score_then_returns_qualitatively_correct_results():
num_training_samples = 5000
X0 = np.random.normal(0, 1, num_training_samples)
X1 = X0 + np.random.normal(0, 1, num_training_samples)
X2 = X1 + np.random.normal(0, 1, num_training_samples)
X3 = X2 + np.random.normal(0, 1, num_training_samples)
training_data = pd.DataFrame({'X0': X0, 'X1': X1, 'X2': X2, 'X3': X3})
causal_model = InvertibleStructuralCausalModel(nx.DiGraph([('X0', 'X1'), ('X1', 'X2'), ('X2', 'X3')]))
auto.assign_causal_mechanisms(causal_model, training_data, auto.AssignmentQuality.GOOD)
pass
anomaly_data = pd.DataFrame({'X0': np.array([0, 10, 10]), 'X1': np.array([10, 10, 10]), 'X2': np.array([10, 10, 10]), 'X3': np.array([10, 10, 20])})
scores = attribute_anomalies(causal_model, 'X3', anomaly_data, anomaly_scorer=MedianCDFQuantileScorer(), num_distribution_samples=num_training_samples, attribute_mean_deviation=False)
def total_anomaly_score(training_data, anomaly_sample):
distribution_samples = training_data['X3'].to_numpy()
anomaly_scorer = MedianCDFQuantileScorer()
anomaly_scorer.fit(distribution_samples)
return -np.log(_relative_frequency(anomaly_scorer.score(distribution_samples) >= anomaly_scorer.score(anomaly_sample)))
assert scores['X0'][0] < 0.5
assert scores['X0'][1] > 8
assert scores['X0'][2] > 4
assert scores['X1'][0] > 8
assert scores['X1'][1] < 0.5
assert scores['X1'][2] < 0.5
assert np.all(scores['X2'] < 0.5)
assert np.all(scores['X3'][:2] < 0.5)
assert scores['X3'][2] > 4
assert scores['X0'][0] + scores['X1'][0] + scores['X2'][0] + scores['X3'][0] == approx(total_anomaly_score(training_data, anomaly_data['X3'].to_numpy()[0]), abs=0.001)
assert scores['X0'][1] + scores['X1'][1] + scores['X2'][1] + scores['X3'][1] == approx(total_anomaly_score(training_data, anomaly_data['X3'].to_numpy()[1]), abs=0.001)
assert scores['X0'][2] + scores['X1'][2] + scores['X2'][2] + scores['X3'][2] == approx(total_anomaly_score(training_data, anomaly_data['X3'].to_numpy()[2]), abs=0.001)
|
dowhy
|
positive
|
def test_coef_eq_layers_change():
dbn = DBN([5], pretrain_epochs=0, finetune_epochs=0, random_state=1234)
<DeepExtract>
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data
Y = iris.target
(X, y) = (X, Y)
</DeepExtract>
dbn.fit(X, y)
eq_(dbn.layers[0].b, np.zeros(5))
eq_(dbn.layers[1].b, np.zeros(3))
dbn.coef_[:] = np.ones(len(dbn.coef_))
eq_(dbn.layers[0].b, np.ones(5))
eq_(dbn.layers[0].W, np.ones((4, 5)))
eq_(dbn.layers[1].b, np.ones(3))
eq_(dbn.layers[1].W, np.ones((5, 3)))
eq_(dbn.layers[0].b[0], 1)
dbn.coef_[0] = 2
eq_(dbn.layers[0].b[0], 2)
eq_(dbn.coef_[1], 1)
dbn.layers[0].b[1] = 3
eq_(dbn.coef_[1], 3)
|
def test_coef_eq_layers_change():
dbn = DBN([5], pretrain_epochs=0, finetune_epochs=0, random_state=1234)
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data
Y = iris.target
(X, y) = (X, Y)
dbn.fit(X, y)
eq_(dbn.layers[0].b, np.zeros(5))
eq_(dbn.layers[1].b, np.zeros(3))
dbn.coef_[:] = np.ones(len(dbn.coef_))
eq_(dbn.layers[0].b, np.ones(5))
eq_(dbn.layers[0].W, np.ones((4, 5)))
eq_(dbn.layers[1].b, np.ones(3))
eq_(dbn.layers[1].W, np.ones((5, 3)))
eq_(dbn.layers[0].b[0], 1)
dbn.coef_[0] = 2
eq_(dbn.layers[0].b[0], 2)
eq_(dbn.coef_[1], 1)
dbn.layers[0].b[1] = 3
eq_(dbn.coef_[1], 3)
|
copper
|
positive
|
def subsetsSums(nums):
def helper(start, summ, result):
result.append(summ)
for i in range(start, len(nums)):
<DeepExtract>
result.append(summ + nums[i])
for i in range(i + 1, len(nums)):
helper(i + 1, summ + nums[i] + nums[i], result)
return result
</DeepExtract>
return result
return helper(0, 0, [])
|
def subsetsSums(nums):
def helper(start, summ, result):
result.append(summ)
for i in range(start, len(nums)):
result.append(summ + nums[i])
for i in range(i + 1, len(nums)):
helper(i + 1, summ + nums[i] + nums[i], result)
return result
return result
return helper(0, 0, [])
|
Algorithm-Implementations
|
positive
|
def process_item(self, item):
<DeepExtract>
if item is None:
cve = None
if 'ASSIGNER' not in item['cve']['CVE_data_meta']:
item['cve']['CVE_data_meta']['ASSIGNER'] = None
cve = {'id': item['cve']['CVE_data_meta']['ID'], 'assigner': item['cve']['CVE_data_meta']['ASSIGNER'], 'Published': parse_datetime(item['publishedDate'], ignoretz=True), 'Modified': parse_datetime(item['lastModifiedDate'], ignoretz=True), 'last-modified': parse_datetime(item['lastModifiedDate'], ignoretz=True)}
for description in item['cve']['description']['description_data']:
if description['lang'] == 'en':
if 'summary' in cve:
cve['summary'] += ' {}'.format(description['value'])
else:
cve['summary'] = description['value']
if 'impact' in item:
cve['access'] = {}
cve['impact'] = {}
if 'baseMetricV3' in item['impact']:
cve['impact3'] = {}
cve['exploitability3'] = {}
cve['impact3']['availability'] = item['impact']['baseMetricV3']['cvssV3']['availabilityImpact']
cve['impact3']['confidentiality'] = item['impact']['baseMetricV3']['cvssV3']['confidentialityImpact']
cve['impact3']['integrity'] = item['impact']['baseMetricV3']['cvssV3']['integrityImpact']
cve['exploitability3']['attackvector'] = item['impact']['baseMetricV3']['cvssV3']['attackVector']
cve['exploitability3']['attackcomplexity'] = item['impact']['baseMetricV3']['cvssV3']['attackComplexity']
cve['exploitability3']['privilegesrequired'] = item['impact']['baseMetricV3']['cvssV3']['privilegesRequired']
cve['exploitability3']['userinteraction'] = item['impact']['baseMetricV3']['cvssV3']['userInteraction']
cve['exploitability3']['scope'] = item['impact']['baseMetricV3']['cvssV3']['scope']
cve['cvss3'] = float(item['impact']['baseMetricV3']['cvssV3']['baseScore'])
cve['cvss3-vector'] = item['impact']['baseMetricV3']['cvssV3']['vectorString']
cve['impactScore3'] = float(item['impact']['baseMetricV3']['impactScore'])
cve['exploitabilityScore3'] = float(item['impact']['baseMetricV3']['exploitabilityScore'])
else:
cve['cvss3'] = None
if 'baseMetricV2' in item['impact']:
cve['access']['authentication'] = item['impact']['baseMetricV2']['cvssV2']['authentication']
cve['access']['complexity'] = item['impact']['baseMetricV2']['cvssV2']['accessComplexity']
cve['access']['vector'] = item['impact']['baseMetricV2']['cvssV2']['accessVector']
cve['impact']['availability'] = item['impact']['baseMetricV2']['cvssV2']['availabilityImpact']
cve['impact']['confidentiality'] = item['impact']['baseMetricV2']['cvssV2']['confidentialityImpact']
cve['impact']['integrity'] = item['impact']['baseMetricV2']['cvssV2']['integrityImpact']
cve['cvss'] = float(item['impact']['baseMetricV2']['cvssV2']['baseScore'])
cve['exploitabilityScore'] = float(item['impact']['baseMetricV2']['exploitabilityScore'])
cve['impactScore'] = float(item['impact']['baseMetricV2']['impactScore'])
cve['cvss-time'] = parse_datetime(item['lastModifiedDate'], ignoretz=True)
cve['cvss-vector'] = item['impact']['baseMetricV2']['cvssV2']['vectorString']
else:
cve['cvss'] = None
if 'references' in item['cve']:
cve['references'] = []
for ref in item['cve']['references']['reference_data']:
cve['references'].append(ref['url'])
if 'configurations' in item:
cve['vulnerable_configuration'] = []
cve['vulnerable_product'] = []
cve['vendors'] = []
cve['products'] = []
cve['vulnerable_product_stems'] = []
cve['vulnerable_configuration_stems'] = []
for cpe in item['configurations']['nodes']:
if 'cpe_match' in cpe:
for cpeuri in cpe['cpe_match']:
if 'cpe23Uri' not in cpeuri:
continue
if cpeuri['vulnerable']:
(query, version_info) = self.get_cpe_info(cpeuri)
if query != {}:
query['id'] = hashlib.sha1(cpeuri['cpe23Uri'].encode('utf-8') + version_info.encode('utf-8')).hexdigest()
cpe_info = getCPEVersionInformation(query)
if cpe_info:
if cpe_info['cpe_name']:
for vulnerable_version in cpe_info['cpe_name']:
cve = self.add_if_missing(cve, 'vulnerable_product', vulnerable_version['cpe23Uri'])
cve = self.add_if_missing(cve, 'vulnerable_configuration', vulnerable_version['cpe23Uri'])
cve = self.add_if_missing(cve, 'vulnerable_configuration_stems', self.stem(vulnerable_version['cpe23Uri']))
(vendor, product) = self.get_vendor_product(vulnerable_version['cpe23Uri'])
cve = self.add_if_missing(cve, 'vendors', vendor)
cve = self.add_if_missing(cve, 'products', product)
cve = self.add_if_missing(cve, 'vulnerable_product_stems', self.stem(vulnerable_version['cpe23Uri']))
else:
cve = self.add_if_missing(cve, 'vulnerable_product', cpeuri['cpe23Uri'])
cve = self.add_if_missing(cve, 'vulnerable_configuration', cpeuri['cpe23Uri'])
cve = self.add_if_missing(cve, 'vulnerable_configuration_stems', self.stem(cpeuri['cpe23Uri']))
(vendor, product) = self.get_vendor_product(cpeuri['cpe23Uri'])
cve = self.add_if_missing(cve, 'vendors', vendor)
cve = self.add_if_missing(cve, 'products', product)
cve = self.add_if_missing(cve, 'vulnerable_product_stems', self.stem(cpeuri['cpe23Uri']))
else:
cve = self.add_if_missing(cve, 'vulnerable_product', cpeuri['cpe23Uri'])
cve = self.add_if_missing(cve, 'vulnerable_configuration', cpeuri['cpe23Uri'])
cve = self.add_if_missing(cve, 'vulnerable_configuration_stems', self.stem(cpeuri['cpe23Uri']))
(vendor, product) = self.get_vendor_product(cpeuri['cpe23Uri'])
cve = self.add_if_missing(cve, 'vendors', vendor)
cve = self.add_if_missing(cve, 'products', product)
cve = self.add_if_missing(cve, 'vulnerable_product_stems', self.stem(cpeuri['cpe23Uri']))
else:
cve = self.add_if_missing(cve, 'vulnerable_configuration', cpeuri['cpe23Uri'])
cve = self.add_if_missing(cve, 'vulnerable_configuration_stems', self.stem(cpeuri['cpe23Uri']))
if 'children' in cpe:
for child in cpe['children']:
if 'cpe_match' in child:
for cpeuri in child['cpe_match']:
if 'cpe23Uri' not in cpeuri:
continue
if cpeuri['vulnerable']:
(query, version_info) = self.get_cpe_info(cpeuri)
if query != {}:
query['id'] = hashlib.sha1(cpeuri['cpe23Uri'].encode('utf-8') + version_info.encode('utf-8')).hexdigest()
cpe_info = getCPEVersionInformation(query)
if cpe_info:
if cpe_info['cpe_name']:
for vulnerable_version in cpe_info['cpe_name']:
cve = self.add_if_missing(cve, 'vulnerable_product', vulnerable_version['cpe23Uri'])
cve = self.add_if_missing(cve, 'vulnerable_configuration', vulnerable_version['cpe23Uri'])
cve = self.add_if_missing(cve, 'vulnerable_configuration_stems', self.stem(vulnerable_version['cpe23Uri']))
(vendor, product) = self.get_vendor_product(vulnerable_version['cpe23Uri'])
cve = self.add_if_missing(cve, 'vendors', vendor)
cve = self.add_if_missing(cve, 'products', product)
cve = self.add_if_missing(cve, 'vulnerable_product_stems', self.stem(vulnerable_version['cpe23Uri']))
else:
cve = self.add_if_missing(cve, 'vulnerable_product', cpeuri['cpe23Uri'])
cve = self.add_if_missing(cve, 'vulnerable_configuration', cpeuri['cpe23Uri'])
cve = self.add_if_missing(cve, 'vulnerable_configuration_stems', self.stem(cpeuri['cpe23Uri']))
(vendor, product) = self.get_vendor_product(cpeuri['cpe23Uri'])
cve = self.add_if_missing(cve, 'vendors', vendor)
cve = self.add_if_missing(cve, 'products', product)
cve = self.add_if_missing(cve, 'vulnerable_product_stems', self.stem(cpeuri['cpe23Uri']))
else:
if 'cpe23Uri' not in cpeuri:
continue
cve = self.add_if_missing(cve, 'vulnerable_product', cpeuri['cpe23Uri'])
cve = self.add_if_missing(cve, 'vulnerable_configuration', cpeuri['cpe23Uri'])
cve = self.add_if_missing(cve, 'vulnerable_configuration_stems', self.stem(cpeuri['cpe23Uri']))
(vendor, product) = self.get_vendor_product(cpeuri['cpe23Uri'])
cve = self.add_if_missing(cve, 'vendors', vendor)
cve = self.add_if_missing(cve, 'products', product)
cve = self.add_if_missing(cve, 'vulnerable_product_stems', self.stem(cpeuri['cpe23Uri']))
else:
if 'cpe23Uri' not in cpeuri:
continue
cve = self.add_if_missing(cve, 'vulnerable_configuration', cpeuri['cpe23Uri'])
cve = self.add_if_missing(cve, 'vulnerable_configuration_stems', self.stem(cpeuri['cpe23Uri']))
if 'problemtype' in item['cve']:
for problem in item['cve']['problemtype']['problemtype_data']:
for cwe in problem['description']:
if cwe['lang'] == 'en':
cve['cwe'] = cwe['value']
if not 'cwe' in cve:
cve['cwe'] = defaultvalue['cwe']
else:
cve['cwe'] = defaultvalue['cwe']
cve['vulnerable_configuration_cpe_2_2'] = []
cve = cve
</DeepExtract>
if cve is not None:
if self.is_update:
self.queue.put(DatabaseAction(action=DatabaseAction.actions.UpdateOne, collection=self.feed_type.lower(), doc=cve))
else:
self.queue.put(DatabaseAction(action=DatabaseAction.actions.InsertOne, collection=self.feed_type.lower(), doc=cve))
|
def process_item(self, item):
if item is None:
cve = None
if 'ASSIGNER' not in item['cve']['CVE_data_meta']:
item['cve']['CVE_data_meta']['ASSIGNER'] = None
cve = {'id': item['cve']['CVE_data_meta']['ID'], 'assigner': item['cve']['CVE_data_meta']['ASSIGNER'], 'Published': parse_datetime(item['publishedDate'], ignoretz=True), 'Modified': parse_datetime(item['lastModifiedDate'], ignoretz=True), 'last-modified': parse_datetime(item['lastModifiedDate'], ignoretz=True)}
for description in item['cve']['description']['description_data']:
if description['lang'] == 'en':
if 'summary' in cve:
cve['summary'] += ' {}'.format(description['value'])
else:
cve['summary'] = description['value']
if 'impact' in item:
cve['access'] = {}
cve['impact'] = {}
if 'baseMetricV3' in item['impact']:
cve['impact3'] = {}
cve['exploitability3'] = {}
cve['impact3']['availability'] = item['impact']['baseMetricV3']['cvssV3']['availabilityImpact']
cve['impact3']['confidentiality'] = item['impact']['baseMetricV3']['cvssV3']['confidentialityImpact']
cve['impact3']['integrity'] = item['impact']['baseMetricV3']['cvssV3']['integrityImpact']
cve['exploitability3']['attackvector'] = item['impact']['baseMetricV3']['cvssV3']['attackVector']
cve['exploitability3']['attackcomplexity'] = item['impact']['baseMetricV3']['cvssV3']['attackComplexity']
cve['exploitability3']['privilegesrequired'] = item['impact']['baseMetricV3']['cvssV3']['privilegesRequired']
cve['exploitability3']['userinteraction'] = item['impact']['baseMetricV3']['cvssV3']['userInteraction']
cve['exploitability3']['scope'] = item['impact']['baseMetricV3']['cvssV3']['scope']
cve['cvss3'] = float(item['impact']['baseMetricV3']['cvssV3']['baseScore'])
cve['cvss3-vector'] = item['impact']['baseMetricV3']['cvssV3']['vectorString']
cve['impactScore3'] = float(item['impact']['baseMetricV3']['impactScore'])
cve['exploitabilityScore3'] = float(item['impact']['baseMetricV3']['exploitabilityScore'])
else:
cve['cvss3'] = None
if 'baseMetricV2' in item['impact']:
cve['access']['authentication'] = item['impact']['baseMetricV2']['cvssV2']['authentication']
cve['access']['complexity'] = item['impact']['baseMetricV2']['cvssV2']['accessComplexity']
cve['access']['vector'] = item['impact']['baseMetricV2']['cvssV2']['accessVector']
cve['impact']['availability'] = item['impact']['baseMetricV2']['cvssV2']['availabilityImpact']
cve['impact']['confidentiality'] = item['impact']['baseMetricV2']['cvssV2']['confidentialityImpact']
cve['impact']['integrity'] = item['impact']['baseMetricV2']['cvssV2']['integrityImpact']
cve['cvss'] = float(item['impact']['baseMetricV2']['cvssV2']['baseScore'])
cve['exploitabilityScore'] = float(item['impact']['baseMetricV2']['exploitabilityScore'])
cve['impactScore'] = float(item['impact']['baseMetricV2']['impactScore'])
cve['cvss-time'] = parse_datetime(item['lastModifiedDate'], ignoretz=True)
cve['cvss-vector'] = item['impact']['baseMetricV2']['cvssV2']['vectorString']
else:
cve['cvss'] = None
if 'references' in item['cve']:
cve['references'] = []
for ref in item['cve']['references']['reference_data']:
cve['references'].append(ref['url'])
if 'configurations' in item:
cve['vulnerable_configuration'] = []
cve['vulnerable_product'] = []
cve['vendors'] = []
cve['products'] = []
cve['vulnerable_product_stems'] = []
cve['vulnerable_configuration_stems'] = []
for cpe in item['configurations']['nodes']:
if 'cpe_match' in cpe:
for cpeuri in cpe['cpe_match']:
if 'cpe23Uri' not in cpeuri:
continue
if cpeuri['vulnerable']:
(query, version_info) = self.get_cpe_info(cpeuri)
if query != {}:
query['id'] = hashlib.sha1(cpeuri['cpe23Uri'].encode('utf-8') + version_info.encode('utf-8')).hexdigest()
cpe_info = getCPEVersionInformation(query)
if cpe_info:
if cpe_info['cpe_name']:
for vulnerable_version in cpe_info['cpe_name']:
cve = self.add_if_missing(cve, 'vulnerable_product', vulnerable_version['cpe23Uri'])
cve = self.add_if_missing(cve, 'vulnerable_configuration', vulnerable_version['cpe23Uri'])
cve = self.add_if_missing(cve, 'vulnerable_configuration_stems', self.stem(vulnerable_version['cpe23Uri']))
(vendor, product) = self.get_vendor_product(vulnerable_version['cpe23Uri'])
cve = self.add_if_missing(cve, 'vendors', vendor)
cve = self.add_if_missing(cve, 'products', product)
cve = self.add_if_missing(cve, 'vulnerable_product_stems', self.stem(vulnerable_version['cpe23Uri']))
else:
cve = self.add_if_missing(cve, 'vulnerable_product', cpeuri['cpe23Uri'])
cve = self.add_if_missing(cve, 'vulnerable_configuration', cpeuri['cpe23Uri'])
cve = self.add_if_missing(cve, 'vulnerable_configuration_stems', self.stem(cpeuri['cpe23Uri']))
(vendor, product) = self.get_vendor_product(cpeuri['cpe23Uri'])
cve = self.add_if_missing(cve, 'vendors', vendor)
cve = self.add_if_missing(cve, 'products', product)
cve = self.add_if_missing(cve, 'vulnerable_product_stems', self.stem(cpeuri['cpe23Uri']))
else:
cve = self.add_if_missing(cve, 'vulnerable_product', cpeuri['cpe23Uri'])
cve = self.add_if_missing(cve, 'vulnerable_configuration', cpeuri['cpe23Uri'])
cve = self.add_if_missing(cve, 'vulnerable_configuration_stems', self.stem(cpeuri['cpe23Uri']))
(vendor, product) = self.get_vendor_product(cpeuri['cpe23Uri'])
cve = self.add_if_missing(cve, 'vendors', vendor)
cve = self.add_if_missing(cve, 'products', product)
cve = self.add_if_missing(cve, 'vulnerable_product_stems', self.stem(cpeuri['cpe23Uri']))
else:
cve = self.add_if_missing(cve, 'vulnerable_configuration', cpeuri['cpe23Uri'])
cve = self.add_if_missing(cve, 'vulnerable_configuration_stems', self.stem(cpeuri['cpe23Uri']))
if 'children' in cpe:
for child in cpe['children']:
if 'cpe_match' in child:
for cpeuri in child['cpe_match']:
if 'cpe23Uri' not in cpeuri:
continue
if cpeuri['vulnerable']:
(query, version_info) = self.get_cpe_info(cpeuri)
if query != {}:
query['id'] = hashlib.sha1(cpeuri['cpe23Uri'].encode('utf-8') + version_info.encode('utf-8')).hexdigest()
cpe_info = getCPEVersionInformation(query)
if cpe_info:
if cpe_info['cpe_name']:
for vulnerable_version in cpe_info['cpe_name']:
cve = self.add_if_missing(cve, 'vulnerable_product', vulnerable_version['cpe23Uri'])
cve = self.add_if_missing(cve, 'vulnerable_configuration', vulnerable_version['cpe23Uri'])
cve = self.add_if_missing(cve, 'vulnerable_configuration_stems', self.stem(vulnerable_version['cpe23Uri']))
(vendor, product) = self.get_vendor_product(vulnerable_version['cpe23Uri'])
cve = self.add_if_missing(cve, 'vendors', vendor)
cve = self.add_if_missing(cve, 'products', product)
cve = self.add_if_missing(cve, 'vulnerable_product_stems', self.stem(vulnerable_version['cpe23Uri']))
else:
cve = self.add_if_missing(cve, 'vulnerable_product', cpeuri['cpe23Uri'])
cve = self.add_if_missing(cve, 'vulnerable_configuration', cpeuri['cpe23Uri'])
cve = self.add_if_missing(cve, 'vulnerable_configuration_stems', self.stem(cpeuri['cpe23Uri']))
(vendor, product) = self.get_vendor_product(cpeuri['cpe23Uri'])
cve = self.add_if_missing(cve, 'vendors', vendor)
cve = self.add_if_missing(cve, 'products', product)
cve = self.add_if_missing(cve, 'vulnerable_product_stems', self.stem(cpeuri['cpe23Uri']))
else:
if 'cpe23Uri' not in cpeuri:
continue
cve = self.add_if_missing(cve, 'vulnerable_product', cpeuri['cpe23Uri'])
cve = self.add_if_missing(cve, 'vulnerable_configuration', cpeuri['cpe23Uri'])
cve = self.add_if_missing(cve, 'vulnerable_configuration_stems', self.stem(cpeuri['cpe23Uri']))
(vendor, product) = self.get_vendor_product(cpeuri['cpe23Uri'])
cve = self.add_if_missing(cve, 'vendors', vendor)
cve = self.add_if_missing(cve, 'products', product)
cve = self.add_if_missing(cve, 'vulnerable_product_stems', self.stem(cpeuri['cpe23Uri']))
else:
if 'cpe23Uri' not in cpeuri:
continue
cve = self.add_if_missing(cve, 'vulnerable_configuration', cpeuri['cpe23Uri'])
cve = self.add_if_missing(cve, 'vulnerable_configuration_stems', self.stem(cpeuri['cpe23Uri']))
if 'problemtype' in item['cve']:
for problem in item['cve']['problemtype']['problemtype_data']:
for cwe in problem['description']:
if cwe['lang'] == 'en':
cve['cwe'] = cwe['value']
if not 'cwe' in cve:
cve['cwe'] = defaultvalue['cwe']
else:
cve['cwe'] = defaultvalue['cwe']
cve['vulnerable_configuration_cpe_2_2'] = []
cve = cve
if cve is not None:
if self.is_update:
self.queue.put(DatabaseAction(action=DatabaseAction.actions.UpdateOne, collection=self.feed_type.lower(), doc=cve))
else:
self.queue.put(DatabaseAction(action=DatabaseAction.actions.InsertOne, collection=self.feed_type.lower(), doc=cve))
|
cve-search
|
positive
|
def extract_unt_fedban(message: Message, args: List[str]) -> (Optional[int], Optional[str]):
prev_message = message.reply_to_message
split_text = message.text.split(None, 1)
if len(split_text) < 2:
return id_from_reply(message)
text_to_parse = split_text[1]
text = ''
entities = list(message.parse_entities([MessageEntity.TEXT_MENTION]))
if len(entities) > 0:
ent = entities[0]
else:
ent = None
if entities and ent and (ent.offset == len(message.text) - len(text_to_parse)):
ent = entities[0]
user_id = ent.user.id
text = message.text[ent.offset + ent.length:]
elif len(args) >= 1 and args[0][0] == '@':
user = args[0]
user_id = get_user_id(user)
if not user_id and (not str(user_id).isdigit()):
message.reply_text("I don't seem to have interacted with this user before - please forward a message from them to give me control! (like a voodoo doll, I need a piece of them to be able to execute certain commands...)")
return (None, None)
else:
user_id = user_id
res = message.text.split(None, 2)
if len(res) >= 3:
text = res[2]
elif len(args) >= 1 and args[0].isdigit():
user_id = int(args[0])
res = message.text.split(None, 2)
if len(res) >= 3:
text = res[2]
elif prev_message:
<DeepExtract>
prev_message = message.reply_to_message
if not prev_message:
(user_id, text) = (None, None)
user_id = prev_message.from_user.id
res = message.text.split(None, 1)
if len(res) < 2:
(user_id, text) = (user_id, '')
(user_id, text) = (user_id, res[1])
</DeepExtract>
else:
return (None, None)
try:
message.bot.get_chat(user_id)
except BadRequest as excp:
if excp.message in ('User_id_invalid', 'Chat not found') and (not str(user_id).isdigit()):
message.reply_text("I don't seem to have interacted with this user before - please forward messages fromthem to give me control! (Like a voodoo doll, I need a piece to be able toto execute certain commands ...)")
return (None, None)
elif excp.message != 'Chat not found':
LOGGER.exception('Exception %s on user %s', excp.message, user_id)
return (None, None)
elif not str(user_id).isdigit():
return (None, None)
return (user_id, text)
|
def extract_unt_fedban(message: Message, args: List[str]) -> (Optional[int], Optional[str]):
prev_message = message.reply_to_message
split_text = message.text.split(None, 1)
if len(split_text) < 2:
return id_from_reply(message)
text_to_parse = split_text[1]
text = ''
entities = list(message.parse_entities([MessageEntity.TEXT_MENTION]))
if len(entities) > 0:
ent = entities[0]
else:
ent = None
if entities and ent and (ent.offset == len(message.text) - len(text_to_parse)):
ent = entities[0]
user_id = ent.user.id
text = message.text[ent.offset + ent.length:]
elif len(args) >= 1 and args[0][0] == '@':
user = args[0]
user_id = get_user_id(user)
if not user_id and (not str(user_id).isdigit()):
message.reply_text("I don't seem to have interacted with this user before - please forward a message from them to give me control! (like a voodoo doll, I need a piece of them to be able to execute certain commands...)")
return (None, None)
else:
user_id = user_id
res = message.text.split(None, 2)
if len(res) >= 3:
text = res[2]
elif len(args) >= 1 and args[0].isdigit():
user_id = int(args[0])
res = message.text.split(None, 2)
if len(res) >= 3:
text = res[2]
elif prev_message:
prev_message = message.reply_to_message
if not prev_message:
(user_id, text) = (None, None)
user_id = prev_message.from_user.id
res = message.text.split(None, 1)
if len(res) < 2:
(user_id, text) = (user_id, '')
(user_id, text) = (user_id, res[1])
else:
return (None, None)
try:
message.bot.get_chat(user_id)
except BadRequest as excp:
if excp.message in ('User_id_invalid', 'Chat not found') and (not str(user_id).isdigit()):
message.reply_text("I don't seem to have interacted with this user before - please forward messages fromthem to give me control! (Like a voodoo doll, I need a piece to be able toto execute certain commands ...)")
return (None, None)
elif excp.message != 'Chat not found':
LOGGER.exception('Exception %s on user %s', excp.message, user_id)
return (None, None)
elif not str(user_id).isdigit():
return (None, None)
return (user_id, text)
|
CinderellaProBot
|
positive
|
@bp.route('/reset', methods=['POST'], defaults={'target': 'all'})
@bp.route('/reset/<string:target>', methods=['POST'])
def http_reset(target):
try:
<DeepExtract>
message_uuid = str(uuid.uuid4())
formatted_message = json.dumps({'job': 'reset', 'target': target, 'body': body, 'request_method': request.method, 'message_uuid': message_uuid})
self.redis_connector.message_connection.publish(self.redis_connector.servers_channel, formatted_message)
self.logger.info('Published {}'.format(formatted_message))
message_uuid = message_uuid
</DeepExtract>
return Response(json.dumps(gather_response('reset', message_uuid=message_uuid, response_number_target=0 if target == 'all' else 1)), mimetype='application/json', status=200)
except Exception as e:
self.logger.error(e)
return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500)
|
@bp.route('/reset', methods=['POST'], defaults={'target': 'all'})
@bp.route('/reset/<string:target>', methods=['POST'])
def http_reset(target):
try:
message_uuid = str(uuid.uuid4())
formatted_message = json.dumps({'job': 'reset', 'target': target, 'body': body, 'request_method': request.method, 'message_uuid': message_uuid})
self.redis_connector.message_connection.publish(self.redis_connector.servers_channel, formatted_message)
self.logger.info('Published {}'.format(formatted_message))
message_uuid = message_uuid
return Response(json.dumps(gather_response('reset', message_uuid=message_uuid, response_number_target=0 if target == 'all' else 1)), mimetype='application/json', status=200)
except Exception as e:
self.logger.error(e)
return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500)
|
eventgen
|
positive
|
def __getitem__(self, index):
index = self.indices[index]
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
<DeepExtract>
labels4 = []
s = self.img_size
(yc, xc) = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border]
indices = [self] + [self.indices[random.randint(0, self.n - 1)] for _ in range(3)]
for (i, self) in enumerate(indices):
(img, _, (h, w)) = load_image(self, self)
if i == 0:
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8)
(x1a, y1a, x2a, y2a) = (max(xc - w, 0), max(yc - h, 0), xc, yc)
(x1b, y1b, x2b, y2b) = (w - (x2a - x1a), h - (y2a - y1a), w, h)
elif i == 1:
(x1a, y1a, x2a, y2a) = (xc, max(yc - h, 0), min(xc + w, s * 2), yc)
(x1b, y1b, x2b, y2b) = (0, h - (y2a - y1a), min(w, x2a - x1a), h)
elif i == 2:
(x1a, y1a, x2a, y2a) = (max(xc - w, 0), yc, xc, min(s * 2, yc + h))
(x1b, y1b, x2b, y2b) = (w - (x2a - x1a), 0, w, min(y2a - y1a, h))
elif i == 3:
(x1a, y1a, x2a, y2a) = (xc, yc, min(xc + w, s * 2), min(s * 2, yc + h))
(x1b, y1b, x2b, y2b) = (0, 0, min(w, x2a - x1a), min(y2a - y1a, h))
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b]
padw = x1a - x1b
padh = y1a - y1b
x = self.labels[self]
labels = x.copy()
if x.size > 0:
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
if len(labels4):
labels4 = np.concatenate(labels4, 0)
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:])
(img4, labels4) = random_perspective(img4, labels4, degrees=self.hyp['degrees'], translate=self.hyp['translate'], scale=self.hyp['scale'], shear=self.hyp['shear'], perspective=self.hyp['perspective'], border=self.mosaic_border)
(img, labels) = (img4, labels4)
</DeepExtract>
shapes = None
if random.random() < hyp['mixup']:
<DeepExtract>
labels4 = []
s = self.img_size
(yc, xc) = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border]
indices = [self] + [self.indices[random.randint(0, self.n - 1)] for _ in range(3)]
for (i, self) in enumerate(indices):
(img, _, (h, w)) = load_image(self, self)
if i == 0:
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8)
(x1a, y1a, x2a, y2a) = (max(xc - w, 0), max(yc - h, 0), xc, yc)
(x1b, y1b, x2b, y2b) = (w - (x2a - x1a), h - (y2a - y1a), w, h)
elif i == 1:
(x1a, y1a, x2a, y2a) = (xc, max(yc - h, 0), min(xc + w, s * 2), yc)
(x1b, y1b, x2b, y2b) = (0, h - (y2a - y1a), min(w, x2a - x1a), h)
elif i == 2:
(x1a, y1a, x2a, y2a) = (max(xc - w, 0), yc, xc, min(s * 2, yc + h))
(x1b, y1b, x2b, y2b) = (w - (x2a - x1a), 0, w, min(y2a - y1a, h))
elif i == 3:
(x1a, y1a, x2a, y2a) = (xc, yc, min(xc + w, s * 2), min(s * 2, yc + h))
(x1b, y1b, x2b, y2b) = (0, 0, min(w, x2a - x1a), min(y2a - y1a, h))
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b]
padw = x1a - x1b
padh = y1a - y1b
x = self.labels[self]
labels = x.copy()
if x.size > 0:
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
if len(labels4):
labels4 = np.concatenate(labels4, 0)
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:])
(img4, labels4) = random_perspective(img4, labels4, degrees=self.hyp['degrees'], translate=self.hyp['translate'], scale=self.hyp['scale'], shear=self.hyp['shear'], perspective=self.hyp['perspective'], border=self.mosaic_border)
(img2, labels2) = (img4, labels4)
</DeepExtract>
r = np.random.beta(8.0, 8.0)
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
<DeepExtract>
img = self.imgs[self]
if img is None:
path = self.img_files[self]
img = cv2.imread(path)
assert img is not None, 'Image Not Found ' + path
(h0, w0) = img.shape[:2]
r = self.img_size / max(h0, w0)
if r != 1:
interp = cv2.INTER_AREA if r < 1 and (not self.augment) else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
(img, (h0, w0), (h, w)) = (img, (h0, w0), img.shape[:2])
else:
(img, (h0, w0), (h, w)) = (self.imgs[self], self.img_hw0[self], self.img_hw[self])
</DeepExtract>
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size
<DeepExtract>
shape = img.shape[:2]
if isinstance(shape, int):
shape = (shape, shape)
r = min(shape[0] / shape[0], shape[1] / shape[1])
if not self.augment:
r = min(r, 1.0)
ratio = (r, r)
new_unpad = (int(round(shape[1] * r)), int(round(shape[0] * r)))
(dw, dh) = (shape[1] - new_unpad[0], shape[0] - new_unpad[1])
if False:
(dw, dh) = (np.mod(dw, 32), np.mod(dh, 32))
elif scaleFill:
(dw, dh) = (0.0, 0.0)
new_unpad = (shape[1], shape[0])
ratio = (shape[1] / shape[1], shape[0] / shape[0])
dw /= 2
dh /= 2
if shape[::-1] != new_unpad:
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
(top, bottom) = (int(round(dh - 0.1)), int(round(dh + 0.1)))
(left, right) = (int(round(dw - 0.1)), int(round(dw + 0.1)))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
(img, ratio, pad) = (img, ratio, (dw, dh))
</DeepExtract>
shapes = ((h0, w0), ((h / h0, w / w0), pad))
labels = []
x = self.labels[index]
if x.size > 0:
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0]
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1]
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
if not mosaic:
<DeepExtract>
height = img.shape[0] + border[0] * 2
width = img.shape[1] + border[1] * 2
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2
C[1, 2] = -img.shape[0] / 2
P = np.eye(3)
P[2, 0] = random.uniform(-hyp['perspective'], hyp['perspective'])
P[2, 1] = random.uniform(-hyp['perspective'], hyp['perspective'])
R = np.eye(3)
a = random.uniform(-hyp['degrees'], hyp['degrees'])
s = random.uniform(1 - hyp['scale'], 1 + hyp['scale'])
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-hyp['shear'], hyp['shear']) * math.pi / 180)
S[1, 0] = math.tan(random.uniform(-hyp['shear'], hyp['shear']) * math.pi / 180)
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - hyp['translate'], 0.5 + hyp['translate']) * width
T[1, 2] = random.uniform(0.5 - hyp['translate'], 0.5 + hyp['translate']) * height
M = T @ S @ R @ P @ C
if border[0] != 0 or border[1] != 0 or (M != np.eye(3)).any():
if hyp['perspective']:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else:
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
n = len(labels)
if n:
xy = np.ones((n * 4, 3))
xy[:, :2] = labels[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2)
xy = xy @ M.T
if hyp['perspective']:
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8)
else:
xy = xy[:, :2].reshape(n, 8)
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
i = box_candidates(box1=labels[:, 1:5].T * s, box2=xy.T)
labels = labels[i]
labels[:, 1:5] = xy[i]
(img, labels) = (img, labels)
</DeepExtract>
<DeepExtract>
r = np.random.uniform(-1, 1, 3) * [hyp['hsv_h'], hyp['hsv_s'], hyp['hsv_v']] + 1
(hue, sat, val) = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype
x = np.arange(0, 256, dtype=np.int16)
lut_hue = (x * r[0] % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)
</DeepExtract>
nL = len(labels)
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
labels[:, [2, 4]] /= img.shape[0]
labels[:, [1, 3]] /= img.shape[1]
if self.augment:
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img)
return (torch.from_numpy(img), labels_out, self.img_files[index], shapes)
|
def __getitem__(self, index):
index = self.indices[index]
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
labels4 = []
s = self.img_size
(yc, xc) = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border]
indices = [self] + [self.indices[random.randint(0, self.n - 1)] for _ in range(3)]
for (i, self) in enumerate(indices):
(img, _, (h, w)) = load_image(self, self)
if i == 0:
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8)
(x1a, y1a, x2a, y2a) = (max(xc - w, 0), max(yc - h, 0), xc, yc)
(x1b, y1b, x2b, y2b) = (w - (x2a - x1a), h - (y2a - y1a), w, h)
elif i == 1:
(x1a, y1a, x2a, y2a) = (xc, max(yc - h, 0), min(xc + w, s * 2), yc)
(x1b, y1b, x2b, y2b) = (0, h - (y2a - y1a), min(w, x2a - x1a), h)
elif i == 2:
(x1a, y1a, x2a, y2a) = (max(xc - w, 0), yc, xc, min(s * 2, yc + h))
(x1b, y1b, x2b, y2b) = (w - (x2a - x1a), 0, w, min(y2a - y1a, h))
elif i == 3:
(x1a, y1a, x2a, y2a) = (xc, yc, min(xc + w, s * 2), min(s * 2, yc + h))
(x1b, y1b, x2b, y2b) = (0, 0, min(w, x2a - x1a), min(y2a - y1a, h))
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b]
padw = x1a - x1b
padh = y1a - y1b
x = self.labels[self]
labels = x.copy()
if x.size > 0:
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
if len(labels4):
labels4 = np.concatenate(labels4, 0)
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:])
(img4, labels4) = random_perspective(img4, labels4, degrees=self.hyp['degrees'], translate=self.hyp['translate'], scale=self.hyp['scale'], shear=self.hyp['shear'], perspective=self.hyp['perspective'], border=self.mosaic_border)
(img, labels) = (img4, labels4)
shapes = None
if random.random() < hyp['mixup']:
labels4 = []
s = self.img_size
(yc, xc) = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border]
indices = [self] + [self.indices[random.randint(0, self.n - 1)] for _ in range(3)]
for (i, self) in enumerate(indices):
(img, _, (h, w)) = load_image(self, self)
if i == 0:
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8)
(x1a, y1a, x2a, y2a) = (max(xc - w, 0), max(yc - h, 0), xc, yc)
(x1b, y1b, x2b, y2b) = (w - (x2a - x1a), h - (y2a - y1a), w, h)
elif i == 1:
(x1a, y1a, x2a, y2a) = (xc, max(yc - h, 0), min(xc + w, s * 2), yc)
(x1b, y1b, x2b, y2b) = (0, h - (y2a - y1a), min(w, x2a - x1a), h)
elif i == 2:
(x1a, y1a, x2a, y2a) = (max(xc - w, 0), yc, xc, min(s * 2, yc + h))
(x1b, y1b, x2b, y2b) = (w - (x2a - x1a), 0, w, min(y2a - y1a, h))
elif i == 3:
(x1a, y1a, x2a, y2a) = (xc, yc, min(xc + w, s * 2), min(s * 2, yc + h))
(x1b, y1b, x2b, y2b) = (0, 0, min(w, x2a - x1a), min(y2a - y1a, h))
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b]
padw = x1a - x1b
padh = y1a - y1b
x = self.labels[self]
labels = x.copy()
if x.size > 0:
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
if len(labels4):
labels4 = np.concatenate(labels4, 0)
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:])
(img4, labels4) = random_perspective(img4, labels4, degrees=self.hyp['degrees'], translate=self.hyp['translate'], scale=self.hyp['scale'], shear=self.hyp['shear'], perspective=self.hyp['perspective'], border=self.mosaic_border)
(img2, labels2) = (img4, labels4)
r = np.random.beta(8.0, 8.0)
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
img = self.imgs[self]
if img is None:
path = self.img_files[self]
img = cv2.imread(path)
assert img is not None, 'Image Not Found ' + path
(h0, w0) = img.shape[:2]
r = self.img_size / max(h0, w0)
if r != 1:
interp = cv2.INTER_AREA if r < 1 and (not self.augment) else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
(img, (h0, w0), (h, w)) = (img, (h0, w0), img.shape[:2])
else:
(img, (h0, w0), (h, w)) = (self.imgs[self], self.img_hw0[self], self.img_hw[self])
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size
shape = img.shape[:2]
if isinstance(shape, int):
shape = (shape, shape)
r = min(shape[0] / shape[0], shape[1] / shape[1])
if not self.augment:
r = min(r, 1.0)
ratio = (r, r)
new_unpad = (int(round(shape[1] * r)), int(round(shape[0] * r)))
(dw, dh) = (shape[1] - new_unpad[0], shape[0] - new_unpad[1])
if False:
(dw, dh) = (np.mod(dw, 32), np.mod(dh, 32))
elif scaleFill:
(dw, dh) = (0.0, 0.0)
new_unpad = (shape[1], shape[0])
ratio = (shape[1] / shape[1], shape[0] / shape[0])
dw /= 2
dh /= 2
if shape[::-1] != new_unpad:
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
(top, bottom) = (int(round(dh - 0.1)), int(round(dh + 0.1)))
(left, right) = (int(round(dw - 0.1)), int(round(dw + 0.1)))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
(img, ratio, pad) = (img, ratio, (dw, dh))
shapes = ((h0, w0), ((h / h0, w / w0), pad))
labels = []
x = self.labels[index]
if x.size > 0:
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0]
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1]
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
if not mosaic:
height = img.shape[0] + border[0] * 2
width = img.shape[1] + border[1] * 2
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2
C[1, 2] = -img.shape[0] / 2
P = np.eye(3)
P[2, 0] = random.uniform(-hyp['perspective'], hyp['perspective'])
P[2, 1] = random.uniform(-hyp['perspective'], hyp['perspective'])
R = np.eye(3)
a = random.uniform(-hyp['degrees'], hyp['degrees'])
s = random.uniform(1 - hyp['scale'], 1 + hyp['scale'])
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-hyp['shear'], hyp['shear']) * math.pi / 180)
S[1, 0] = math.tan(random.uniform(-hyp['shear'], hyp['shear']) * math.pi / 180)
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - hyp['translate'], 0.5 + hyp['translate']) * width
T[1, 2] = random.uniform(0.5 - hyp['translate'], 0.5 + hyp['translate']) * height
M = T @ S @ R @ P @ C
if border[0] != 0 or border[1] != 0 or (M != np.eye(3)).any():
if hyp['perspective']:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else:
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
n = len(labels)
if n:
xy = np.ones((n * 4, 3))
xy[:, :2] = labels[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2)
xy = xy @ M.T
if hyp['perspective']:
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8)
else:
xy = xy[:, :2].reshape(n, 8)
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
i = box_candidates(box1=labels[:, 1:5].T * s, box2=xy.T)
labels = labels[i]
labels[:, 1:5] = xy[i]
(img, labels) = (img, labels)
r = np.random.uniform(-1, 1, 3) * [hyp['hsv_h'], hyp['hsv_s'], hyp['hsv_v']] + 1
(hue, sat, val) = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype
x = np.arange(0, 256, dtype=np.int16)
lut_hue = (x * r[0] % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)
nL = len(labels)
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
labels[:, [2, 4]] /= img.shape[0]
labels[:, [1, 3]] /= img.shape[1]
if self.augment:
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img)
return (torch.from_numpy(img), labels_out, self.img_files[index], shapes)
|
Auto_maker
|
positive
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input_file', default=None, type=str, required=True)
parser.add_argument('--vocab_file', default=None, type=str, required=True, help='The vocabulary file that the BERT model was trained on.')
parser.add_argument('--output_file', default=None, type=str, required=True)
parser.add_argument('--bert_config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained BERT model. This specifies the model architecture.')
parser.add_argument('--init_checkpoint', default=None, type=str, required=True, help='Initial checkpoint (usually from a pre-trained BERT model).')
parser.add_argument('--layers', default='-1,-2,-3,-4', type=str)
parser.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after WordPiece tokenization. Sequences longer than this will be truncated, and sequences shorter than this will be padded.')
parser.add_argument('--do_lower_case', default=True, action='store_true', help='Whether to lower case the input text. Should be True for uncased models and False for cased models.')
parser.add_argument('--batch_size', default=32, type=int, help='Batch size for predictions.')
parser.add_argument('--local_rank', type=int, default=-1, help='local_rank for distributed training on gpus')
args = parser.parse_args()
if args.local_rank == -1 or args.no_cuda:
device = torch.device('cuda' if torch.cuda.is_available() and (not args.no_cuda) else 'cpu')
n_gpu = torch.cuda.device_count()
else:
device = torch.device('cuda', args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
logger.info('device', device, 'n_gpu', n_gpu, 'distributed training', bool(args.local_rank != -1))
layer_indexes = [int(x) for x in args.layers.split(',')]
bert_config = BertConfig.from_json_file(args.bert_config_file)
tokenizer = tokenization.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case)
<DeepExtract>
examples = []
unique_id = 0
with open(args.input_file, 'r') as reader:
while True:
line = tokenization.convert_to_unicode(reader.readline())
if not line:
break
line = line.strip()
text_a = None
text_b = None
m = re.match('^(.*) \\|\\|\\| (.*)$', line)
if m is None:
text_a = line
else:
text_a = m.group(1)
text_b = m.group(2)
examples.append(InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))
unique_id += 1
examples = examples
</DeepExtract>
<DeepExtract>
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
_truncate_seq_pair(tokens_a, tokens_b, args.max_seq_length - 3)
elif len(tokens_a) > args.max_seq_length - 2:
tokens_a = tokens_a[0:args.max_seq_length - 2]
tokens = []
input_type_ids = []
tokens.append('[CLS]')
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append('[SEP]')
input_type_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
input_type_ids.append(1)
tokens.append('[SEP]')
input_type_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < args.max_seq_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
assert len(input_ids) == args.max_seq_length
assert len(input_mask) == args.max_seq_length
assert len(input_type_ids) == args.max_seq_length
if ex_index < 5:
logger.info('*** Example ***')
logger.info('unique_id: %s' % example.unique_id)
logger.info('tokens: %s' % ' '.join([str(x) for x in tokens]))
logger.info('input_ids: %s' % ' '.join([str(x) for x in input_ids]))
logger.info('input_mask: %s' % ' '.join([str(x) for x in input_mask]))
logger.info('input_type_ids: %s' % ' '.join([str(x) for x in input_type_ids]))
features.append(InputFeatures(unique_id=example.unique_id, tokens=tokens, input_ids=input_ids, input_mask=input_mask, input_type_ids=input_type_ids))
features = features
</DeepExtract>
unique_id_to_feature = {}
for feature in features:
unique_id_to_feature[feature.unique_id] = feature
model = BertModel(bert_config)
if args.init_checkpoint is not None:
model.load_state_dict(torch.load(args.init_checkpoint, map_location='cpu'))
model.to(device)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_example_index)
if args.local_rank == -1:
eval_sampler = SequentialSampler(eval_data)
else:
eval_sampler = DistributedSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size)
model.eval()
with open(args.output_file, 'w', encoding='utf-8') as writer:
for (input_ids, input_mask, example_indices) in eval_dataloader:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
(all_encoder_layers, _) = model(input_ids, token_type_ids=None, attention_mask=input_mask)
all_encoder_layers = all_encoder_layers
for (b, example_index) in enumerate(example_indices):
feature = features[example_index.item()]
unique_id = int(feature.unique_id)
output_json = collections.OrderedDict()
output_json['linex_index'] = unique_id
all_out_features = []
for (i, token) in enumerate(feature.tokens):
all_layers = []
for (j, layer_index) in enumerate(layer_indexes):
layer_output = all_encoder_layers[int(layer_index)].detach().cpu().numpy()
layer_output = layer_output[b]
layers = collections.OrderedDict()
layers['index'] = layer_index
layers['values'] = [round(x.item(), 6) for x in layer_output[i]]
all_layers.append(layers)
out_features = collections.OrderedDict()
out_features['token'] = token
out_features['layers'] = all_layers
all_out_features.append(out_features)
output_json['features'] = all_out_features
writer.write(json.dumps(output_json) + '\n')
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input_file', default=None, type=str, required=True)
parser.add_argument('--vocab_file', default=None, type=str, required=True, help='The vocabulary file that the BERT model was trained on.')
parser.add_argument('--output_file', default=None, type=str, required=True)
parser.add_argument('--bert_config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained BERT model. This specifies the model architecture.')
parser.add_argument('--init_checkpoint', default=None, type=str, required=True, help='Initial checkpoint (usually from a pre-trained BERT model).')
parser.add_argument('--layers', default='-1,-2,-3,-4', type=str)
parser.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after WordPiece tokenization. Sequences longer than this will be truncated, and sequences shorter than this will be padded.')
parser.add_argument('--do_lower_case', default=True, action='store_true', help='Whether to lower case the input text. Should be True for uncased models and False for cased models.')
parser.add_argument('--batch_size', default=32, type=int, help='Batch size for predictions.')
parser.add_argument('--local_rank', type=int, default=-1, help='local_rank for distributed training on gpus')
args = parser.parse_args()
if args.local_rank == -1 or args.no_cuda:
device = torch.device('cuda' if torch.cuda.is_available() and (not args.no_cuda) else 'cpu')
n_gpu = torch.cuda.device_count()
else:
device = torch.device('cuda', args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
logger.info('device', device, 'n_gpu', n_gpu, 'distributed training', bool(args.local_rank != -1))
layer_indexes = [int(x) for x in args.layers.split(',')]
bert_config = BertConfig.from_json_file(args.bert_config_file)
tokenizer = tokenization.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case)
examples = []
unique_id = 0
with open(args.input_file, 'r') as reader:
while True:
line = tokenization.convert_to_unicode(reader.readline())
if not line:
break
line = line.strip()
text_a = None
text_b = None
m = re.match('^(.*) \\|\\|\\| (.*)$', line)
if m is None:
text_a = line
else:
text_a = m.group(1)
text_b = m.group(2)
examples.append(InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))
unique_id += 1
examples = examples
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
_truncate_seq_pair(tokens_a, tokens_b, args.max_seq_length - 3)
elif len(tokens_a) > args.max_seq_length - 2:
tokens_a = tokens_a[0:args.max_seq_length - 2]
tokens = []
input_type_ids = []
tokens.append('[CLS]')
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append('[SEP]')
input_type_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
input_type_ids.append(1)
tokens.append('[SEP]')
input_type_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < args.max_seq_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
assert len(input_ids) == args.max_seq_length
assert len(input_mask) == args.max_seq_length
assert len(input_type_ids) == args.max_seq_length
if ex_index < 5:
logger.info('*** Example ***')
logger.info('unique_id: %s' % example.unique_id)
logger.info('tokens: %s' % ' '.join([str(x) for x in tokens]))
logger.info('input_ids: %s' % ' '.join([str(x) for x in input_ids]))
logger.info('input_mask: %s' % ' '.join([str(x) for x in input_mask]))
logger.info('input_type_ids: %s' % ' '.join([str(x) for x in input_type_ids]))
features.append(InputFeatures(unique_id=example.unique_id, tokens=tokens, input_ids=input_ids, input_mask=input_mask, input_type_ids=input_type_ids))
features = features
unique_id_to_feature = {}
for feature in features:
unique_id_to_feature[feature.unique_id] = feature
model = BertModel(bert_config)
if args.init_checkpoint is not None:
model.load_state_dict(torch.load(args.init_checkpoint, map_location='cpu'))
model.to(device)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_example_index)
if args.local_rank == -1:
eval_sampler = SequentialSampler(eval_data)
else:
eval_sampler = DistributedSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size)
model.eval()
with open(args.output_file, 'w', encoding='utf-8') as writer:
for (input_ids, input_mask, example_indices) in eval_dataloader:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
(all_encoder_layers, _) = model(input_ids, token_type_ids=None, attention_mask=input_mask)
all_encoder_layers = all_encoder_layers
for (b, example_index) in enumerate(example_indices):
feature = features[example_index.item()]
unique_id = int(feature.unique_id)
output_json = collections.OrderedDict()
output_json['linex_index'] = unique_id
all_out_features = []
for (i, token) in enumerate(feature.tokens):
all_layers = []
for (j, layer_index) in enumerate(layer_indexes):
layer_output = all_encoder_layers[int(layer_index)].detach().cpu().numpy()
layer_output = layer_output[b]
layers = collections.OrderedDict()
layers['index'] = layer_index
layers['values'] = [round(x.item(), 6) for x in layer_output[i]]
all_layers.append(layers)
out_features = collections.OrderedDict()
out_features['token'] = token
out_features['layers'] = all_layers
all_out_features.append(out_features)
output_json['features'] = all_out_features
writer.write(json.dumps(output_json) + '\n')
|
BERT4doc-Classification
|
positive
|
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
<DeepExtract>
if not u.host:
raise LocationValueError('No host specified.')
u.scheme = u.scheme or 'http'
u.port = u.port or port_by_scheme.get(u.scheme, 80)
pool_key = (u.scheme, u.host, u.port)
with self.pools.lock:
pool = self.pools.get(pool_key)
if pool:
conn = pool
pool = self._new_pool(u.scheme, u.host, u.port)
self.pools[pool_key] = pool
conn = pool
</DeepExtract>
kw['assert_same_host'] = False
kw['redirect'] = False
if 'headers' not in kw:
kw['headers'] = self.headers
if self.proxy is not None and u.scheme == 'http':
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
redirect_location = urljoin(url, redirect_location)
if response.status == 303:
method = 'GET'
retries = kw.get('retries')
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect)
kw['retries'] = retries.increment(method, redirect_location)
kw['redirect'] = redirect
log.info('Redirecting %s -> %s' % (url, redirect_location))
return self.urlopen(method, redirect_location, **kw)
|
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
if not u.host:
raise LocationValueError('No host specified.')
u.scheme = u.scheme or 'http'
u.port = u.port or port_by_scheme.get(u.scheme, 80)
pool_key = (u.scheme, u.host, u.port)
with self.pools.lock:
pool = self.pools.get(pool_key)
if pool:
conn = pool
pool = self._new_pool(u.scheme, u.host, u.port)
self.pools[pool_key] = pool
conn = pool
kw['assert_same_host'] = False
kw['redirect'] = False
if 'headers' not in kw:
kw['headers'] = self.headers
if self.proxy is not None and u.scheme == 'http':
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
redirect_location = urljoin(url, redirect_location)
if response.status == 303:
method = 'GET'
retries = kw.get('retries')
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect)
kw['retries'] = retries.increment(method, redirect_location)
kw['redirect'] = redirect
log.info('Redirecting %s -> %s' % (url, redirect_location))
return self.urlopen(method, redirect_location, **kw)
|
crunchy-xml-decoder
|
positive
|
def load(file_or_filename):
"""Load the configuration from a file.
'file_or_filename' can be either:-
a) a filename
b) a file-like object that must be open for reading
"""
<DeepExtract>
if isinstance(file_or_filename, str):
f = open(file_or_filename, mode)
else:
f = file_or_filename
f = f
</DeepExtract>
section_name = None
section_body = ''
for line in f:
stripped = line.strip()
if stripped.startswith('[') and stripped.endswith(']'):
if section_name is not None:
<DeepExtract>
section = self.setdefault(section_name, {})
exec(section_body, self._namespaces, section)
del self._namespaces['__builtins__']
namespace = self._get_namespace(section_name)
namespace.__dict__.update(section)
</DeepExtract>
section_name = stripped[1:-1]
section_body = ''
elif section_name is not None:
section_body += line
if section_name is not None:
<DeepExtract>
section = self.setdefault(section_name, {})
exec(section_body, self._namespaces, section)
del self._namespaces['__builtins__']
namespace = self._get_namespace(section_name)
namespace.__dict__.update(section)
</DeepExtract>
f.close()
|
def load(file_or_filename):
"""Load the configuration from a file.
'file_or_filename' can be either:-
a) a filename
b) a file-like object that must be open for reading
"""
if isinstance(file_or_filename, str):
f = open(file_or_filename, mode)
else:
f = file_or_filename
f = f
section_name = None
section_body = ''
for line in f:
stripped = line.strip()
if stripped.startswith('[') and stripped.endswith(']'):
if section_name is not None:
section = self.setdefault(section_name, {})
exec(section_body, self._namespaces, section)
del self._namespaces['__builtins__']
namespace = self._get_namespace(section_name)
namespace.__dict__.update(section)
section_name = stripped[1:-1]
section_body = ''
elif section_name is not None:
section_body += line
if section_name is not None:
section = self.setdefault(section_name, {})
exec(section_body, self._namespaces, section)
del self._namespaces['__builtins__']
namespace = self._get_namespace(section_name)
namespace.__dict__.update(section)
f.close()
|
apptools
|
positive
|
def parse_packet(self, parent: str, payload: bytes) -> None:
self.parent = parent
if parent == '[SERVER]':
self.parentPacket = self.serverPacket
elif parent == '[CLIENT]':
self.parentPacket = self.clientPacket
else:
raise Exception
if self.parentPacket.packetSize == 0:
self.parentPacket.packetSize = int(payload[:4].hex(), 16) - len(payload[4:])
payload = payload[4:]
self.parentPacket.data = payload
payload = b''
elif len(payload) > self.parentPacket.packetSize:
self.parentPacket.data = self.parentPacket.data + payload[:self.parentPacket.packetSize]
payload = payload[self.parentPacket.packetSize:]
self.parentPacket.packetSize = 0
else:
self.parentPacket.packetSize -= len(payload)
self.parentPacket.data = self.parentPacket.data + payload
payload = b''
if self.parentPacket.packetSize == 0:
<DeepExtract>
self.packetSize: int = self.parentPacket.packetSize
self.data: bytes = self.parentPacket.data
self.command: bytes
sftp_num: int = self.extract_int(1)
packet: str = self.packetLayout[sftp_num]
self.prevID: int = self.ID
self.ID: int = self.extract_int(4)
self.path: bytes = b''
if packet == 'SSH_FXP_OPENDIR':
self.path = self.extract_string()
elif packet == 'SSH_FXP_REALPATH':
self.path = self.extract_string()
self.command = b'cd ' + self.path
log.msg(parent + '[SFTP] Entered Command: ' + self.command.decode())
elif packet == 'SSH_FXP_OPEN':
self.path = self.extract_string()
pflags = f'{self.extract_int(4):08b}'
if pflags[6] == '1':
self.command = b'put ' + self.path
self.theFile = b''
elif pflags[7] == '1':
self.command = b'get ' + self.path
else:
log.msg(parent + f'[SFTP] New SFTP pflag detected: {pflags!r} {self.data!r}')
log.msg(parent + ' [SFTP] Entered Command: ' + self.command.decode())
elif packet == 'SSH_FXP_READ':
pass
elif packet == 'SSH_FXP_WRITE':
if self.handle == self.extract_string():
self.offset = self.extract_int(8)
self.theFile = self.theFile[:self.offset] + self.extract_data()
elif packet == 'SSH_FXP_HANDLE':
if self.ID == self.prevID:
self.handle = self.extract_string()
elif packet == 'SSH_FXP_READDIR':
if self.handle == self.extract_string():
self.command = b'ls ' + self.path
elif packet == 'SSH_FXP_SETSTAT':
self.path = self.extract_string()
self.command = self.extract_attrs() + b' ' + self.path
elif packet == 'SSH_FXP_EXTENDED':
cmd = self.extract_string()
self.path = self.extract_string()
if cmd == b'statvfs@openssh.com':
self.command = b'df ' + self.path
elif cmd == b'hardlink@openssh.com':
self.command = b'ln ' + self.path + b' ' + self.extract_string()
elif cmd == b'posix-rename@openssh.com':
self.command = b'mv ' + self.path + b' ' + self.extract_string()
else:
log.msg(parent + f'[SFTP] New SFTP Extended Command detected: {cmd!r} {self.data!r}')
elif packet == 'SSH_FXP_EXTENDED_REPLY':
log.msg(parent + '[SFTP] Entered Command: ' + self.command.decode())
elif packet == 'SSH_FXP_CLOSE':
if self.handle == self.extract_string():
if b'get' in self.command:
log.msg(parent + ' [SFTP] Finished Downloading: ' + self.path.decode())
elif b'put' in self.command:
log.msg(parent + ' [SFTP] Finished Uploading: ' + self.path.decode())
elif packet == 'SSH_FXP_SYMLINK':
self.command = b'ln -s ' + self.extract_string() + b' ' + self.extract_string()
elif packet == 'SSH_FXP_MKDIR':
self.command = b'mkdir ' + self.extract_string()
elif packet == 'SSH_FXP_REMOVE':
self.command = b'rm ' + self.extract_string()
elif packet == 'SSH_FXP_RMDIR':
self.command = b'rmdir ' + self.extract_string()
elif packet == 'SSH_FXP_STATUS':
if self.ID == self.prevID:
code = self.extract_int(4)
if code in [0, 1]:
if b'get' not in self.command and b'put' not in self.command:
log.msg(parent + ' [SFTP] Entered Command: ' + self.command.decode())
else:
message = self.extract_string()
log.msg(parent + ' [SFTP] Failed Command: ' + self.command.decode() + ' Reason: ' + message.decode())
</DeepExtract>
if len(payload) != 0:
<DeepExtract>
self.parent = parent
if parent == '[SERVER]':
self.parentPacket = self.serverPacket
elif parent == '[CLIENT]':
self.parentPacket = self.clientPacket
else:
raise Exception
if self.parentPacket.packetSize == 0:
self.parentPacket.packetSize = int(payload[:4].hex(), 16) - len(payload[4:])
payload = payload[4:]
self.parentPacket.data = payload
payload = b''
elif len(payload) > self.parentPacket.packetSize:
self.parentPacket.data = self.parentPacket.data + payload[:self.parentPacket.packetSize]
payload = payload[self.parentPacket.packetSize:]
self.parentPacket.packetSize = 0
else:
self.parentPacket.packetSize -= len(payload)
self.parentPacket.data = self.parentPacket.data + payload
payload = b''
if self.parentPacket.packetSize == 0:
self.handle_packet(parent)
if len(payload) != 0:
self.parse_packet(parent, payload)
</DeepExtract>
|
def parse_packet(self, parent: str, payload: bytes) -> None:
self.parent = parent
if parent == '[SERVER]':
self.parentPacket = self.serverPacket
elif parent == '[CLIENT]':
self.parentPacket = self.clientPacket
else:
raise Exception
if self.parentPacket.packetSize == 0:
self.parentPacket.packetSize = int(payload[:4].hex(), 16) - len(payload[4:])
payload = payload[4:]
self.parentPacket.data = payload
payload = b''
elif len(payload) > self.parentPacket.packetSize:
self.parentPacket.data = self.parentPacket.data + payload[:self.parentPacket.packetSize]
payload = payload[self.parentPacket.packetSize:]
self.parentPacket.packetSize = 0
else:
self.parentPacket.packetSize -= len(payload)
self.parentPacket.data = self.parentPacket.data + payload
payload = b''
if self.parentPacket.packetSize == 0:
self.packetSize: int = self.parentPacket.packetSize
self.data: bytes = self.parentPacket.data
self.command: bytes
sftp_num: int = self.extract_int(1)
packet: str = self.packetLayout[sftp_num]
self.prevID: int = self.ID
self.ID: int = self.extract_int(4)
self.path: bytes = b''
if packet == 'SSH_FXP_OPENDIR':
self.path = self.extract_string()
elif packet == 'SSH_FXP_REALPATH':
self.path = self.extract_string()
self.command = b'cd ' + self.path
log.msg(parent + '[SFTP] Entered Command: ' + self.command.decode())
elif packet == 'SSH_FXP_OPEN':
self.path = self.extract_string()
pflags = f'{self.extract_int(4):08b}'
if pflags[6] == '1':
self.command = b'put ' + self.path
self.theFile = b''
elif pflags[7] == '1':
self.command = b'get ' + self.path
else:
log.msg(parent + f'[SFTP] New SFTP pflag detected: {pflags!r} {self.data!r}')
log.msg(parent + ' [SFTP] Entered Command: ' + self.command.decode())
elif packet == 'SSH_FXP_READ':
pass
elif packet == 'SSH_FXP_WRITE':
if self.handle == self.extract_string():
self.offset = self.extract_int(8)
self.theFile = self.theFile[:self.offset] + self.extract_data()
elif packet == 'SSH_FXP_HANDLE':
if self.ID == self.prevID:
self.handle = self.extract_string()
elif packet == 'SSH_FXP_READDIR':
if self.handle == self.extract_string():
self.command = b'ls ' + self.path
elif packet == 'SSH_FXP_SETSTAT':
self.path = self.extract_string()
self.command = self.extract_attrs() + b' ' + self.path
elif packet == 'SSH_FXP_EXTENDED':
cmd = self.extract_string()
self.path = self.extract_string()
if cmd == b'statvfs@openssh.com':
self.command = b'df ' + self.path
elif cmd == b'hardlink@openssh.com':
self.command = b'ln ' + self.path + b' ' + self.extract_string()
elif cmd == b'posix-rename@openssh.com':
self.command = b'mv ' + self.path + b' ' + self.extract_string()
else:
log.msg(parent + f'[SFTP] New SFTP Extended Command detected: {cmd!r} {self.data!r}')
elif packet == 'SSH_FXP_EXTENDED_REPLY':
log.msg(parent + '[SFTP] Entered Command: ' + self.command.decode())
elif packet == 'SSH_FXP_CLOSE':
if self.handle == self.extract_string():
if b'get' in self.command:
log.msg(parent + ' [SFTP] Finished Downloading: ' + self.path.decode())
elif b'put' in self.command:
log.msg(parent + ' [SFTP] Finished Uploading: ' + self.path.decode())
elif packet == 'SSH_FXP_SYMLINK':
self.command = b'ln -s ' + self.extract_string() + b' ' + self.extract_string()
elif packet == 'SSH_FXP_MKDIR':
self.command = b'mkdir ' + self.extract_string()
elif packet == 'SSH_FXP_REMOVE':
self.command = b'rm ' + self.extract_string()
elif packet == 'SSH_FXP_RMDIR':
self.command = b'rmdir ' + self.extract_string()
elif packet == 'SSH_FXP_STATUS':
if self.ID == self.prevID:
code = self.extract_int(4)
if code in [0, 1]:
if b'get' not in self.command and b'put' not in self.command:
log.msg(parent + ' [SFTP] Entered Command: ' + self.command.decode())
else:
message = self.extract_string()
log.msg(parent + ' [SFTP] Failed Command: ' + self.command.decode() + ' Reason: ' + message.decode())
if len(payload) != 0:
self.parent = parent
if parent == '[SERVER]':
self.parentPacket = self.serverPacket
elif parent == '[CLIENT]':
self.parentPacket = self.clientPacket
else:
raise Exception
if self.parentPacket.packetSize == 0:
self.parentPacket.packetSize = int(payload[:4].hex(), 16) - len(payload[4:])
payload = payload[4:]
self.parentPacket.data = payload
payload = b''
elif len(payload) > self.parentPacket.packetSize:
self.parentPacket.data = self.parentPacket.data + payload[:self.parentPacket.packetSize]
payload = payload[self.parentPacket.packetSize:]
self.parentPacket.packetSize = 0
else:
self.parentPacket.packetSize -= len(payload)
self.parentPacket.data = self.parentPacket.data + payload
payload = b''
if self.parentPacket.packetSize == 0:
self.handle_packet(parent)
if len(payload) != 0:
self.parse_packet(parent, payload)
</DeepExtract>
|
cowrie
|
positive
|
def step(self, actions):
if self.terminate_episode:
return self.reset()
self.current_step += 1
rewards = np.zeros(len(self.ur5_episode_memories))
<DeepExtract>
if self.stop_ur5_after_reach:
for (i, (action, ur5, target_eef_pose)) in enumerate(zip(actions, self.active_ur5s, self.task_manager.get_target_end_effector_poses())):
if self.check_ur5_reached_target(i, ur5, target_eef_pose):
action = np.zeros(6)
if self.centralized_policy:
actions = actions[0]
self.ur5_episode_memories[0].add_action(actions)
actions = list(torch.split(actions, 6))
else:
for (action, m) in zip(actions, self.ur5_episode_memories):
m.add_action(action)
if self.centralized_critic:
for (this_ur5, memory) in zip(self.active_ur5s, self.ur5_episode_memories):
pos = np.array(this_ur5.get_pose()[0])
sorted_ur5s = [(action, ur5) for (action, ur5) in zip(actions, self.active_ur5s) if np.linalg.norm(pos - np.array(ur5.get_pose()[0])) < 2 * self.workspace_radius]
sorted_ur5s.sort(reverse=True, key=lambda item: np.linalg.norm(pos - np.array(item[1].get_pose()[0])))
sorted_actions = [action for (action, ur5) in sorted_ur5s]
sorted_actions[-1] = torch.FloatTensor([0.0] * 6)
critic_obs = []
for (obs, action) in zip(memory.data['observations'][-1], sorted_actions):
critic_obs.append(torch.cat((obs, action)))
memory.data['critic_observations'].append(torch.stack(critic_obs))
if len(memory.data['critic_next_observations']):
memory.data['critic_next_observations'][-1] = memory.data['critic_observations'][-1]
self.action_to_robots(actions)
</DeepExtract>
for t_sim in range(self.simulation_steps_per_action_step):
p.stepSimulation()
for ur5 in self.active_ur5s:
ur5.step()
<DeepExtract>
colliding = any([ur5.check_collision() for ur5 in self.active_ur5s])
if colliding:
self.on_collision()
self.state = {'ur5s': []}
self.state['ur5s'] = [{'end_effector_pose': ur5.get_end_effector_pose(), 'joint_values': ur5.get_arm_joint_values(), 'link_positions': ur5.get_link_global_positions(), 'ur5': ur5, 'pose': ur5.get_pose(), 'colliding': False if not colliding else ur5.check_collision(), 'target_pose': target_eef_pose, 'reached_target': self.check_ur5_reached_target(i, ur5, target_eef_pose)} for (i, (ur5, target_eef_pose)) in enumerate(zip(self.active_ur5s, self.task_manager.get_target_end_effector_poses()))]
self.state['reach_count'] = sum([1 if ur5_state['reached_target'] else 0 for ur5_state in self.state['ur5s']])
if self.state['reach_count'] == len(self.active_ur5s):
self.on_all_ur5s_reach_target()
self.state = self.state
</DeepExtract>
rewards += self.get_rewards(self.state)
<DeepExtract>
for (i, ur5) in enumerate(self.active_ur5s):
if self.state['ur5s'][i]['colliding']:
self.stats['collisions'][i] += +1
self.task_manager.set_timer(self.current_step * self.simulation_steps_per_action_step + t_sim / self.episode_length * self.simulation_steps_per_action_step)
</DeepExtract>
if self.gui and p.readUserDebugParameter(self.real_time_debug) == 1.0:
time.sleep(BaseEnv.SIMULATION_TIMESTEP)
rKey = ord('r')
keys = p.getKeyboardEvents()
if rKey in keys and keys[rKey] & p.KEY_WAS_TRIGGERED:
self.terminate_episode = True
if self.terminate_episode:
break
self.terminate_episode = self.terminate_episode or self.current_step >= self.episode_length
<DeepExtract>
pass
</DeepExtract>
self.observations = [self.preprocess_obs(o) for o in self.get_observations(state=self.state)]
self.episode_reward_sum += rewards
for (o, r, m) in zip(self.observations, rewards, self.ur5_episode_memories):
m.add_rewards_and_termination(r, self.terminate_episode)
m.add_value('next_observations', o)
if self.centralized_critic:
critic_next_obs = []
for obs in m.data['next_observations'][-1]:
critic_next_obs.append(torch.cat((obs, torch.FloatTensor([0.0] * 6))))
m.data['critic_next_observations'].append(torch.stack(critic_next_obs))
if not self.terminate_episode:
m.add_observation(o)
if self.terminate_episode:
<DeepExtract>
success = self.stats['collective_reach_count'] if sum(self.stats['collisions']) == 0 else 0
if success != 0:
self.on_success()
self.failed_in_task = self.stats['collective_reach_count'] == 0 or sum(self.stats['collisions']) != 0
self.send_memory_to_clusters()
self.send_stats_to_logger()
</DeepExtract>
return (self.obs_to_policy(self.observations), self.ray_id)
|
def step(self, actions):
if self.terminate_episode:
return self.reset()
self.current_step += 1
rewards = np.zeros(len(self.ur5_episode_memories))
if self.stop_ur5_after_reach:
for (i, (action, ur5, target_eef_pose)) in enumerate(zip(actions, self.active_ur5s, self.task_manager.get_target_end_effector_poses())):
if self.check_ur5_reached_target(i, ur5, target_eef_pose):
action = np.zeros(6)
if self.centralized_policy:
actions = actions[0]
self.ur5_episode_memories[0].add_action(actions)
actions = list(torch.split(actions, 6))
else:
for (action, m) in zip(actions, self.ur5_episode_memories):
m.add_action(action)
if self.centralized_critic:
for (this_ur5, memory) in zip(self.active_ur5s, self.ur5_episode_memories):
pos = np.array(this_ur5.get_pose()[0])
sorted_ur5s = [(action, ur5) for (action, ur5) in zip(actions, self.active_ur5s) if np.linalg.norm(pos - np.array(ur5.get_pose()[0])) < 2 * self.workspace_radius]
sorted_ur5s.sort(reverse=True, key=lambda item: np.linalg.norm(pos - np.array(item[1].get_pose()[0])))
sorted_actions = [action for (action, ur5) in sorted_ur5s]
sorted_actions[-1] = torch.FloatTensor([0.0] * 6)
critic_obs = []
for (obs, action) in zip(memory.data['observations'][-1], sorted_actions):
critic_obs.append(torch.cat((obs, action)))
memory.data['critic_observations'].append(torch.stack(critic_obs))
if len(memory.data['critic_next_observations']):
memory.data['critic_next_observations'][-1] = memory.data['critic_observations'][-1]
self.action_to_robots(actions)
for t_sim in range(self.simulation_steps_per_action_step):
p.stepSimulation()
for ur5 in self.active_ur5s:
ur5.step()
colliding = any([ur5.check_collision() for ur5 in self.active_ur5s])
if colliding:
self.on_collision()
self.state = {'ur5s': []}
self.state['ur5s'] = [{'end_effector_pose': ur5.get_end_effector_pose(), 'joint_values': ur5.get_arm_joint_values(), 'link_positions': ur5.get_link_global_positions(), 'ur5': ur5, 'pose': ur5.get_pose(), 'colliding': False if not colliding else ur5.check_collision(), 'target_pose': target_eef_pose, 'reached_target': self.check_ur5_reached_target(i, ur5, target_eef_pose)} for (i, (ur5, target_eef_pose)) in enumerate(zip(self.active_ur5s, self.task_manager.get_target_end_effector_poses()))]
self.state['reach_count'] = sum([1 if ur5_state['reached_target'] else 0 for ur5_state in self.state['ur5s']])
if self.state['reach_count'] == len(self.active_ur5s):
self.on_all_ur5s_reach_target()
self.state = self.state
rewards += self.get_rewards(self.state)
for (i, ur5) in enumerate(self.active_ur5s):
if self.state['ur5s'][i]['colliding']:
self.stats['collisions'][i] += +1
self.task_manager.set_timer(self.current_step * self.simulation_steps_per_action_step + t_sim / self.episode_length * self.simulation_steps_per_action_step)
if self.gui and p.readUserDebugParameter(self.real_time_debug) == 1.0:
time.sleep(BaseEnv.SIMULATION_TIMESTEP)
rKey = ord('r')
keys = p.getKeyboardEvents()
if rKey in keys and keys[rKey] & p.KEY_WAS_TRIGGERED:
self.terminate_episode = True
if self.terminate_episode:
break
self.terminate_episode = self.terminate_episode or self.current_step >= self.episode_length
pass
self.observations = [self.preprocess_obs(o) for o in self.get_observations(state=self.state)]
self.episode_reward_sum += rewards
for (o, r, m) in zip(self.observations, rewards, self.ur5_episode_memories):
m.add_rewards_and_termination(r, self.terminate_episode)
m.add_value('next_observations', o)
if self.centralized_critic:
critic_next_obs = []
for obs in m.data['next_observations'][-1]:
critic_next_obs.append(torch.cat((obs, torch.FloatTensor([0.0] * 6))))
m.data['critic_next_observations'].append(torch.stack(critic_next_obs))
if not self.terminate_episode:
m.add_observation(o)
if self.terminate_episode:
success = self.stats['collective_reach_count'] if sum(self.stats['collisions']) == 0 else 0
if success != 0:
self.on_success()
self.failed_in_task = self.stats['collective_reach_count'] == 0 or sum(self.stats['collisions']) != 0
self.send_memory_to_clusters()
self.send_stats_to_logger()
return (self.obs_to_policy(self.observations), self.ray_id)
|
decentralized-multiarm
|
positive
|
def regularPolygonWall(self, corners=3, r=None, h=None, side=None, edges='e', hole=None, callback=None, move=None):
"""Create regular polygon as a wall
:param corners: number of corners of the polygon
:param r: radius distance center to one of the corners
:param h: distance center to one of the sides (height of sector)
:param side: length of one side
:param edges: (Default value = "e", may be string/list of length corners)
:param hole: diameter of central hole (Default value = 0)
:param callback: (Default value = None, middle=0, then sides=1..)
:param move: (Default value = None)
"""
<DeepExtract>
if r:
side = 2 * math.sin(math.radians(180.0 / corners)) * r
h = r * math.cos(math.radians(180.0 / corners))
elif h:
side = 2 * math.tan(math.radians(180.0 / corners)) * h
r = ((side / 2.0) ** 2 + h ** 2) ** 0.5
elif side:
h = 0.5 * side * math.tan(math.radians(90 - 180.0 / corners))
r = ((side / 2.0) ** 2 + h ** 2) ** 0.5
(r, h, side) = (r, h, side)
</DeepExtract>
t = self.thickness
if not hasattr(edges, '__getitem__') or len(edges) == 1:
edges = [edges] * corners
edges = [self.edges.get(e, e) for e in edges]
edges += edges
if corners % 2:
th = r + h + edges[0].spacing() + max(edges[corners // 2].spacing(), edges[corners // 2 + 1].spacing()) / math.sin(math.radians(90 - 180 / corners))
else:
th = 2 * h + edges[0].spacing() + edges[corners // 2].spacing()
tw = 0
for i in range(corners):
ang = (180 + 360 * i) / corners
tw = max(tw, 2 * abs(math.sin(math.radians(ang)) * (r + max(edges[i].spacing(), edges[i + 1].spacing()) / math.sin(math.radians(90 - 180 / corners)))))
if self.move(tw, th, move, before=True):
return
<DeepExtract>
self.ctx.move_to(0, 0)
self.ctx.translate(0.5 * tw - 0.5 * side, edges[0].margin())
self.ctx.rotate(degrees * math.pi / 180.0)
self.ctx.move_to(0, 0)
</DeepExtract>
if hole:
<DeepExtract>
if not hole / 2.0:
hole / 2.0 = d / 2.0
if hole / 2.0 < self.burn:
hole / 2.0 = self.burn + 1e-09
r_ = hole / 2.0 - self.burn
self.moveTo(side / 2.0 + r_, h + edges[0].startwidth() + self.burn, -90)
self.corner(-360, hole / 2.0, tabs)
</DeepExtract>
<DeepExtract>
if h + edges[0].startwidth() + self.burn is None:
h + edges[0].startwidth() + self.burn = self.burn
if hasattr(callback, '__getitem__'):
try:
callback = callback[0]
0 = None
except (KeyError, IndexError):
pass
if callback and callable(callback):
with self.saved_context():
self.moveTo(side / 2.0, h + edges[0].startwidth() + self.burn, a)
if 0 is None:
callback()
else:
callback(0)
self.ctx.move_to(0, 0)
</DeepExtract>
for i in range(corners):
<DeepExtract>
if edges[i].startwidth() + self.burn is None:
edges[i].startwidth() + self.burn = self.burn
if hasattr(callback, '__getitem__'):
try:
callback = callback[i + 1]
i + 1 = None
except (KeyError, IndexError):
pass
if callback and callable(callback):
with self.saved_context():
self.moveTo(0, edges[i].startwidth() + self.burn, a)
if i + 1 is None:
callback()
else:
callback(i + 1)
self.ctx.move_to(0, 0)
</DeepExtract>
edges[i](side)
<DeepExtract>
edges[i] = self.edges.get(edges[i], edges[i])
edges[i + 1] = self.edges.get(edges[i + 1], edges[i + 1])
self.edge(edges[i + 1].startwidth() * math.tan(math.radians(360.0 / corners / 2.0)))
self.corner(360.0 / corners)
self.edge(edges[i].endwidth() * math.tan(math.radians(360.0 / corners / 2.0)))
</DeepExtract>
<DeepExtract>
if not move:
move = ''
terms = move.split()
dontdraw = before and 'only' in terms
tw += self.spacing
th += self.spacing
if 'rotated' in terms:
(tw, th) = (th, tw)
moves = {'up': (0, th, False), 'down': (0, -th, True), 'left': (-tw, 0, True), 'right': (tw, 0, False), 'only': (0, 0, None), 'mirror': (0, 0, None), 'rotated': (0, 0, None)}
if not before:
self.ctx.restore()
if self.labels and label:
self.text(label, tw / 2, th / 2, align='middle center', color=Color.ANNOTATIONS, fontsize=4)
self.ctx.stroke()
for term in terms:
if not term in moves:
raise ValueError("Unknown direction: '%s'" % term)
(mx, my, movebeforeprint) = moves[term]
if movebeforeprint and before:
self.moveTo(mx, my)
elif not movebeforeprint and (not before) or dontdraw:
self.moveTo(mx, my)
if not dontdraw:
if before:
if self.debug:
with self.saved_context():
self.set_source_color(Color.ANNOTATIONS)
self.ctx.rectangle(0, 0, tw, th)
self.ctx.save()
if 'rotated' in terms:
self.moveTo(tw, 0, 90)
(tw, th) = (th, tw)
if 'mirror' in terms:
self.moveTo(tw, 0)
self.ctx.scale(-1, 1)
self.moveTo(self.spacing / 2.0, self.spacing / 2.0)
self.ctx.new_part()
return dontdraw
</DeepExtract>
|
def regularPolygonWall(self, corners=3, r=None, h=None, side=None, edges='e', hole=None, callback=None, move=None):
"""Create regular polygon as a wall
:param corners: number of corners of the polygon
:param r: radius distance center to one of the corners
:param h: distance center to one of the sides (height of sector)
:param side: length of one side
:param edges: (Default value = "e", may be string/list of length corners)
:param hole: diameter of central hole (Default value = 0)
:param callback: (Default value = None, middle=0, then sides=1..)
:param move: (Default value = None)
"""
if r:
side = 2 * math.sin(math.radians(180.0 / corners)) * r
h = r * math.cos(math.radians(180.0 / corners))
elif h:
side = 2 * math.tan(math.radians(180.0 / corners)) * h
r = ((side / 2.0) ** 2 + h ** 2) ** 0.5
elif side:
h = 0.5 * side * math.tan(math.radians(90 - 180.0 / corners))
r = ((side / 2.0) ** 2 + h ** 2) ** 0.5
(r, h, side) = (r, h, side)
t = self.thickness
if not hasattr(edges, '__getitem__') or len(edges) == 1:
edges = [edges] * corners
edges = [self.edges.get(e, e) for e in edges]
edges += edges
if corners % 2:
th = r + h + edges[0].spacing() + max(edges[corners // 2].spacing(), edges[corners // 2 + 1].spacing()) / math.sin(math.radians(90 - 180 / corners))
else:
th = 2 * h + edges[0].spacing() + edges[corners // 2].spacing()
tw = 0
for i in range(corners):
ang = (180 + 360 * i) / corners
tw = max(tw, 2 * abs(math.sin(math.radians(ang)) * (r + max(edges[i].spacing(), edges[i + 1].spacing()) / math.sin(math.radians(90 - 180 / corners)))))
if self.move(tw, th, move, before=True):
return
self.ctx.move_to(0, 0)
self.ctx.translate(0.5 * tw - 0.5 * side, edges[0].margin())
self.ctx.rotate(degrees * math.pi / 180.0)
self.ctx.move_to(0, 0)
if hole:
if not hole / 2.0:
hole / 2.0 = d / 2.0
if hole / 2.0 < self.burn:
hole / 2.0 = self.burn + 1e-09
r_ = hole / 2.0 - self.burn
self.moveTo(side / 2.0 + r_, h + edges[0].startwidth() + self.burn, -90)
self.corner(-360, hole / 2.0, tabs)
if h + edges[0].startwidth() + self.burn is None:
h + edges[0].startwidth() + self.burn = self.burn
if hasattr(callback, '__getitem__'):
try:
callback = callback[0]
0 = None
except (KeyError, IndexError):
pass
if callback and callable(callback):
with self.saved_context():
self.moveTo(side / 2.0, h + edges[0].startwidth() + self.burn, a)
if 0 is None:
callback()
else:
callback(0)
self.ctx.move_to(0, 0)
for i in range(corners):
if edges[i].startwidth() + self.burn is None:
edges[i].startwidth() + self.burn = self.burn
if hasattr(callback, '__getitem__'):
try:
callback = callback[i + 1]
i + 1 = None
except (KeyError, IndexError):
pass
if callback and callable(callback):
with self.saved_context():
self.moveTo(0, edges[i].startwidth() + self.burn, a)
if i + 1 is None:
callback()
else:
callback(i + 1)
self.ctx.move_to(0, 0)
edges[i](side)
edges[i] = self.edges.get(edges[i], edges[i])
edges[i + 1] = self.edges.get(edges[i + 1], edges[i + 1])
self.edge(edges[i + 1].startwidth() * math.tan(math.radians(360.0 / corners / 2.0)))
self.corner(360.0 / corners)
self.edge(edges[i].endwidth() * math.tan(math.radians(360.0 / corners / 2.0)))
if not move:
move = ''
terms = move.split()
dontdraw = before and 'only' in terms
tw += self.spacing
th += self.spacing
if 'rotated' in terms:
(tw, th) = (th, tw)
moves = {'up': (0, th, False), 'down': (0, -th, True), 'left': (-tw, 0, True), 'right': (tw, 0, False), 'only': (0, 0, None), 'mirror': (0, 0, None), 'rotated': (0, 0, None)}
if not before:
self.ctx.restore()
if self.labels and label:
self.text(label, tw / 2, th / 2, align='middle center', color=Color.ANNOTATIONS, fontsize=4)
self.ctx.stroke()
for term in terms:
if not term in moves:
raise ValueError("Unknown direction: '%s'" % term)
(mx, my, movebeforeprint) = moves[term]
if movebeforeprint and before:
self.moveTo(mx, my)
elif not movebeforeprint and (not before) or dontdraw:
self.moveTo(mx, my)
if not dontdraw:
if before:
if self.debug:
with self.saved_context():
self.set_source_color(Color.ANNOTATIONS)
self.ctx.rectangle(0, 0, tw, th)
self.ctx.save()
if 'rotated' in terms:
self.moveTo(tw, 0, 90)
(tw, th) = (th, tw)
if 'mirror' in terms:
self.moveTo(tw, 0)
self.ctx.scale(-1, 1)
self.moveTo(self.spacing / 2.0, self.spacing / 2.0)
self.ctx.new_part()
return dontdraw
</DeepExtract>
|
boxes
|
positive
|
def _remove_unaccepted_files(self, path):
"""
Recursively delete unaccepted files based on their mimetype
and return the final number or files in the directory.
"""
ls = []
for p in map(lambda x: os.path.join(path, x), os.listdir(path)):
if os.path.islink(p):
raise Exception(ElfinderErrorMessages.ERROR_ARC_SYMLINKS)
<DeepExtract>
if os.path.isdir(p):
mime = 'directory'
mime = magic.Magic(mime=True).from_file(p.encode('utf-8'))
int_mime = None
if not mime or mime in ['inode/x-empty', 'application/empty']:
int_mime = mimetypes.guess_type(name if name else p)[0]
mime = int_mime if int_mime else mime
</DeepExtract>
if not self.mime_accepted(mime) or not self._name_accepted(self._basename(p)):
if mime != 'directory':
os.unlink(p)
else:
shutil.rmtree(p)
elif mime != 'directory' or self._remove_unaccepted_files(p):
ls.append(p)
return ls
|
def _remove_unaccepted_files(self, path):
"""
Recursively delete unaccepted files based on their mimetype
and return the final number or files in the directory.
"""
ls = []
for p in map(lambda x: os.path.join(path, x), os.listdir(path)):
if os.path.islink(p):
raise Exception(ElfinderErrorMessages.ERROR_ARC_SYMLINKS)
if os.path.isdir(p):
mime = 'directory'
mime = magic.Magic(mime=True).from_file(p.encode('utf-8'))
int_mime = None
if not mime or mime in ['inode/x-empty', 'application/empty']:
int_mime = mimetypes.guess_type(name if name else p)[0]
mime = int_mime if int_mime else mime
if not self.mime_accepted(mime) or not self._name_accepted(self._basename(p)):
if mime != 'directory':
os.unlink(p)
else:
shutil.rmtree(p)
elif mime != 'directory' or self._remove_unaccepted_files(p):
ls.append(p)
return ls
|
adminset
|
positive
|
def forward(self, input: torch.Tensor, logdet=None, reverse=False, ft=None):
if not reverse:
z = input
assert z.shape[1] == self.in_channels, (z.shape[1], self.in_channels)
<DeepExtract>
z = torch.cat([ft, self.fFeatures], dim=1)
h = self.f(z)
(shift, scale) = thops.split_feature(h, 'cross')
scale = torch.sigmoid(scale + 2.0) + self.affine_eps
(scaleFt, shiftFt) = (scale, shift)
</DeepExtract>
z = z + shiftFt
z = z * scaleFt
logdet = logdet + self.get_logdet(scaleFt)
<DeepExtract>
z1 = z[:, :self.channels_for_nn]
z2 = z[:, self.channels_for_nn:]
assert z1.shape[1] + z2.shape[1] == z.shape[1], (z1.shape[1], z2.shape[1], z.shape[1])
(z1, z2) = (z1, z2)
</DeepExtract>
<DeepExtract>
z = torch.cat([z1, self.fAffine], dim=1)
h = self.f(z)
(shift, scale) = thops.split_feature(h, 'cross')
scale = torch.sigmoid(scale + 2.0) + self.affine_eps
(scale, shift) = (scale, shift)
</DeepExtract>
<DeepExtract>
assert z1.shape[1] == self.channels_for_nn, (z1.shape[1], self.channels_for_nn)
assert z2.shape[1] == self.channels_for_co, (z2.shape[1], self.channels_for_co)
assert scale.shape[1] == shift.shape[1], (scale.shape[1], shift.shape[1])
assert scale.shape[1] == z2.shape[1], (scale.shape[1], z1.shape[1], z2.shape[1])
</DeepExtract>
z2 = z2 + shift
z2 = z2 * scale
logdet = logdet + self.get_logdet(scale)
z = thops.cat_feature(z1, z2)
output = z
else:
z = input
<DeepExtract>
z1 = z[:, :self.channels_for_nn]
z2 = z[:, self.channels_for_nn:]
assert z1.shape[1] + z2.shape[1] == z.shape[1], (z1.shape[1], z2.shape[1], z.shape[1])
(z1, z2) = (z1, z2)
</DeepExtract>
<DeepExtract>
z = torch.cat([z1, self.fAffine], dim=1)
h = self.f(z)
(shift, scale) = thops.split_feature(h, 'cross')
scale = torch.sigmoid(scale + 2.0) + self.affine_eps
(scale, shift) = (scale, shift)
</DeepExtract>
<DeepExtract>
assert z1.shape[1] == self.channels_for_nn, (z1.shape[1], self.channels_for_nn)
assert z2.shape[1] == self.channels_for_co, (z2.shape[1], self.channels_for_co)
assert scale.shape[1] == shift.shape[1], (scale.shape[1], shift.shape[1])
assert scale.shape[1] == z2.shape[1], (scale.shape[1], z1.shape[1], z2.shape[1])
</DeepExtract>
z2 = z2 / scale
z2 = z2 - shift
z = thops.cat_feature(z1, z2)
logdet = logdet - self.get_logdet(scale)
<DeepExtract>
z = torch.cat([ft, self.fFeatures], dim=1)
h = self.f(z)
(shift, scale) = thops.split_feature(h, 'cross')
scale = torch.sigmoid(scale + 2.0) + self.affine_eps
(scaleFt, shiftFt) = (scale, shift)
</DeepExtract>
z = z / scaleFt
z = z - shiftFt
logdet = logdet - self.get_logdet(scaleFt)
output = z
return (output, logdet)
|
def forward(self, input: torch.Tensor, logdet=None, reverse=False, ft=None):
if not reverse:
z = input
assert z.shape[1] == self.in_channels, (z.shape[1], self.in_channels)
z = torch.cat([ft, self.fFeatures], dim=1)
h = self.f(z)
(shift, scale) = thops.split_feature(h, 'cross')
scale = torch.sigmoid(scale + 2.0) + self.affine_eps
(scaleFt, shiftFt) = (scale, shift)
z = z + shiftFt
z = z * scaleFt
logdet = logdet + self.get_logdet(scaleFt)
z1 = z[:, :self.channels_for_nn]
z2 = z[:, self.channels_for_nn:]
assert z1.shape[1] + z2.shape[1] == z.shape[1], (z1.shape[1], z2.shape[1], z.shape[1])
(z1, z2) = (z1, z2)
z = torch.cat([z1, self.fAffine], dim=1)
h = self.f(z)
(shift, scale) = thops.split_feature(h, 'cross')
scale = torch.sigmoid(scale + 2.0) + self.affine_eps
(scale, shift) = (scale, shift)
assert z1.shape[1] == self.channels_for_nn, (z1.shape[1], self.channels_for_nn)
assert z2.shape[1] == self.channels_for_co, (z2.shape[1], self.channels_for_co)
assert scale.shape[1] == shift.shape[1], (scale.shape[1], shift.shape[1])
assert scale.shape[1] == z2.shape[1], (scale.shape[1], z1.shape[1], z2.shape[1])
z2 = z2 + shift
z2 = z2 * scale
logdet = logdet + self.get_logdet(scale)
z = thops.cat_feature(z1, z2)
output = z
else:
z = input
z1 = z[:, :self.channels_for_nn]
z2 = z[:, self.channels_for_nn:]
assert z1.shape[1] + z2.shape[1] == z.shape[1], (z1.shape[1], z2.shape[1], z.shape[1])
(z1, z2) = (z1, z2)
z = torch.cat([z1, self.fAffine], dim=1)
h = self.f(z)
(shift, scale) = thops.split_feature(h, 'cross')
scale = torch.sigmoid(scale + 2.0) + self.affine_eps
(scale, shift) = (scale, shift)
assert z1.shape[1] == self.channels_for_nn, (z1.shape[1], self.channels_for_nn)
assert z2.shape[1] == self.channels_for_co, (z2.shape[1], self.channels_for_co)
assert scale.shape[1] == shift.shape[1], (scale.shape[1], shift.shape[1])
assert scale.shape[1] == z2.shape[1], (scale.shape[1], z1.shape[1], z2.shape[1])
z2 = z2 / scale
z2 = z2 - shift
z = thops.cat_feature(z1, z2)
logdet = logdet - self.get_logdet(scale)
z = torch.cat([ft, self.fFeatures], dim=1)
h = self.f(z)
(shift, scale) = thops.split_feature(h, 'cross')
scale = torch.sigmoid(scale + 2.0) + self.affine_eps
(scaleFt, shiftFt) = (scale, shift)
z = z / scaleFt
z = z - shiftFt
logdet = logdet - self.get_logdet(scaleFt)
output = z
return (output, logdet)
|
DeFlow
|
positive
|
def comparePrettyValues(self, aPretty, bPretty, tolerance=0.01):
<DeepExtract>
values = aPretty.split()
aList = values
</DeepExtract>
<DeepExtract>
values = bPretty.split()
bList = values
</DeepExtract>
if len(aList) != len(bList):
return False
for (a, b) in zip(aList, bList):
try:
aNum = float(a)
bNum = float(b)
error = abs(aNum - bNum)
if error > tolerance:
return False
except ValueError:
if a.strip() != b.strip():
return False
return True
|
def comparePrettyValues(self, aPretty, bPretty, tolerance=0.01):
values = aPretty.split()
aList = values
values = bPretty.split()
bList = values
if len(aList) != len(bList):
return False
for (a, b) in zip(aList, bList):
try:
aNum = float(a)
bNum = float(b)
error = abs(aNum - bNum)
if error > tolerance:
return False
except ValueError:
if a.strip() != b.strip():
return False
return True
|
comp90054-cheat
|
positive
|
def parse_and_preprocess(self, value, batch_position):
assert self.supports_datasets()
<DeepExtract>
feature_map = {'image/encoded': tf.FixedLenFeature([], dtype=tf.string, default_value=''), 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64, default_value=-1)}
features = tf.parse_single_example(value, feature_map)
label = tf.cast(features['image/class/label'], dtype=tf.int32)
(image_buffer, label_index, _, _) = (features['image/encoded'], label, None, None)
</DeepExtract>
<DeepExtract>
raise NotImplementedError('Must be implemented by subclass.')
</DeepExtract>
return (image, label_index)
|
def parse_and_preprocess(self, value, batch_position):
assert self.supports_datasets()
feature_map = {'image/encoded': tf.FixedLenFeature([], dtype=tf.string, default_value=''), 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64, default_value=-1)}
features = tf.parse_single_example(value, feature_map)
label = tf.cast(features['image/class/label'], dtype=tf.int32)
(image_buffer, label_index, _, _) = (features['image/encoded'], label, None, None)
raise NotImplementedError('Must be implemented by subclass.')
return (image, label_index)
|
AOFP
|
positive
|
def main(conf):
<DeepExtract>
if not conf['compute_wer']:
compute_metrics = COMPUTE_METRICS
try:
from espnet2.bin.asr_inference import Speech2Text
from espnet_model_zoo.downloader import ModelDownloader
except ModuleNotFoundError:
import warnings
warnings.warn("Couldn't find espnet installation. Continuing without.")
compute_metrics = COMPUTE_METRICS
compute_metrics = COMPUTE_METRICS + ['wer']
</DeepExtract>
anno_df = pd.read_csv(Path(conf['test_dir']).parent.parent.parent / 'test_annotations.csv')
wer_tracker = MockWERTracker() if not conf['compute_wer'] else WERTracker(ASR_MODEL_PATH, anno_df)
model_path = os.path.join(conf['exp_dir'], 'best_model.pth')
model = DCCRNet.from_pretrained(model_path)
if conf['use_gpu']:
model.cuda()
model_device = next(model.parameters()).device
test_set = LibriMix(csv_dir=conf['test_dir'], task=conf['task'], sample_rate=conf['sample_rate'], n_src=conf['train_conf']['data']['n_src'], segment=None, return_id=True)
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from='pw_mtx')
eval_save_dir = os.path.join(conf['exp_dir'], conf['out_dir'])
ex_save_dir = os.path.join(eval_save_dir, 'examples/')
if conf['n_save_ex'] == -1:
conf['n_save_ex'] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf['n_save_ex'])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
(mix, sources, ids) = test_set[idx]
(mix, sources) = tensors_to_device([mix, sources], device=model_device)
est_sources = model(mix.unsqueeze(0))
(loss, reordered_sources) = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix.cpu().data.numpy()
sources_np = sources.cpu().data.numpy()
est_sources_np = reordered_sources.squeeze(0).cpu().data.numpy()
utt_metrics = get_metrics(mix_np, sources_np, est_sources_np, sample_rate=conf['sample_rate'], metrics_list=COMPUTE_METRICS)
utt_metrics['mix_path'] = test_set.mixture_path
est_sources_np_normalized = normalize_estimates(est_sources_np, mix_np)
utt_metrics.update(**wer_tracker(mix=mix_np, clean=sources_np, estimate=est_sources_np_normalized, wav_id=ids, sample_rate=conf['sample_rate']))
series_list.append(pd.Series(utt_metrics))
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, 'ex_{}/'.format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + 'mixture.wav', mix_np, conf['sample_rate'])
for (src_idx, src) in enumerate(sources_np):
sf.write(local_save_dir + 's{}.wav'.format(src_idx), src, conf['sample_rate'])
for (src_idx, est_src) in enumerate(est_sources_np_normalized):
sf.write(local_save_dir + 's{}_estimate.wav'.format(src_idx), est_src, conf['sample_rate'])
with open(local_save_dir + 'metrics.json', 'w') as f:
json.dump(utt_metrics, f, indent=0)
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(eval_save_dir, 'all_metrics.csv'))
final_results = {}
for metric_name in compute_metrics:
input_metric_name = 'input_' + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + '_imp'] = ldf.mean()
print('Overall metrics :')
pprint(final_results)
if conf['compute_wer']:
print('\nWER report')
wer_card = wer_tracker.final_report_as_markdown()
print(wer_card)
with open(os.path.join(eval_save_dir, 'final_wer.md'), 'w') as f:
f.write(wer_card)
with open(os.path.join(eval_save_dir, 'final_metrics.json'), 'w') as f:
json.dump(final_results, f, indent=0)
model_dict = torch.load(model_path, map_location='cpu')
os.makedirs(os.path.join(conf['exp_dir'], 'publish_dir'), exist_ok=True)
publishable = save_publishable(os.path.join(conf['exp_dir'], 'publish_dir'), model_dict, metrics=final_results, train_conf=train_conf)
|
def main(conf):
if not conf['compute_wer']:
compute_metrics = COMPUTE_METRICS
try:
from espnet2.bin.asr_inference import Speech2Text
from espnet_model_zoo.downloader import ModelDownloader
except ModuleNotFoundError:
import warnings
warnings.warn("Couldn't find espnet installation. Continuing without.")
compute_metrics = COMPUTE_METRICS
compute_metrics = COMPUTE_METRICS + ['wer']
anno_df = pd.read_csv(Path(conf['test_dir']).parent.parent.parent / 'test_annotations.csv')
wer_tracker = MockWERTracker() if not conf['compute_wer'] else WERTracker(ASR_MODEL_PATH, anno_df)
model_path = os.path.join(conf['exp_dir'], 'best_model.pth')
model = DCCRNet.from_pretrained(model_path)
if conf['use_gpu']:
model.cuda()
model_device = next(model.parameters()).device
test_set = LibriMix(csv_dir=conf['test_dir'], task=conf['task'], sample_rate=conf['sample_rate'], n_src=conf['train_conf']['data']['n_src'], segment=None, return_id=True)
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from='pw_mtx')
eval_save_dir = os.path.join(conf['exp_dir'], conf['out_dir'])
ex_save_dir = os.path.join(eval_save_dir, 'examples/')
if conf['n_save_ex'] == -1:
conf['n_save_ex'] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf['n_save_ex'])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
(mix, sources, ids) = test_set[idx]
(mix, sources) = tensors_to_device([mix, sources], device=model_device)
est_sources = model(mix.unsqueeze(0))
(loss, reordered_sources) = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix.cpu().data.numpy()
sources_np = sources.cpu().data.numpy()
est_sources_np = reordered_sources.squeeze(0).cpu().data.numpy()
utt_metrics = get_metrics(mix_np, sources_np, est_sources_np, sample_rate=conf['sample_rate'], metrics_list=COMPUTE_METRICS)
utt_metrics['mix_path'] = test_set.mixture_path
est_sources_np_normalized = normalize_estimates(est_sources_np, mix_np)
utt_metrics.update(**wer_tracker(mix=mix_np, clean=sources_np, estimate=est_sources_np_normalized, wav_id=ids, sample_rate=conf['sample_rate']))
series_list.append(pd.Series(utt_metrics))
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, 'ex_{}/'.format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + 'mixture.wav', mix_np, conf['sample_rate'])
for (src_idx, src) in enumerate(sources_np):
sf.write(local_save_dir + 's{}.wav'.format(src_idx), src, conf['sample_rate'])
for (src_idx, est_src) in enumerate(est_sources_np_normalized):
sf.write(local_save_dir + 's{}_estimate.wav'.format(src_idx), est_src, conf['sample_rate'])
with open(local_save_dir + 'metrics.json', 'w') as f:
json.dump(utt_metrics, f, indent=0)
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(eval_save_dir, 'all_metrics.csv'))
final_results = {}
for metric_name in compute_metrics:
input_metric_name = 'input_' + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + '_imp'] = ldf.mean()
print('Overall metrics :')
pprint(final_results)
if conf['compute_wer']:
print('\nWER report')
wer_card = wer_tracker.final_report_as_markdown()
print(wer_card)
with open(os.path.join(eval_save_dir, 'final_wer.md'), 'w') as f:
f.write(wer_card)
with open(os.path.join(eval_save_dir, 'final_metrics.json'), 'w') as f:
json.dump(final_results, f, indent=0)
model_dict = torch.load(model_path, map_location='cpu')
os.makedirs(os.path.join(conf['exp_dir'], 'publish_dir'), exist_ok=True)
publishable = save_publishable(os.path.join(conf['exp_dir'], 'publish_dir'), model_dict, metrics=final_results, train_conf=train_conf)
|
asteroid
|
positive
|
def _get_or_create_profiles(bundle_ids: Sequence[BundleId], certificates: Sequence[SigningCertificate], profile_type: ProfileType, create_resource: bool, platform: Optional[BundleIdPlatform]=None):
def has_certificate(profile) -> bool:
try:
profile_certificates = self.api_client.profiles.list_certificate_ids(profile)
return bool(certificate_ids.issubset({c.id for c in profile_certificates}))
except AppStoreConnectApiError as err:
error = f'Listing {SigningCertificate.s} for {Profile} {profile.id} failed unexpectedly'
self.logger.warning(Colors.YELLOW(f'{error}: {err.error_response}'))
return False
def missing_profile(bundle_id) -> bool:
try:
bundle_ids_profiles = self.api_client.bundle_ids.list_profile_ids(bundle_id)
return not profile_ids & {p.id for p in bundle_ids_profiles}
except AppStoreConnectApiError as err:
error = f'Listing {Profile.s} for {BundleId} {bundle_id.id} failed unexpectedly'
self.logger.warning(Colors.YELLOW(f'{error}: {err.error_response}'))
return True
certificate_ids = {c.id for c in certificates}
<DeepExtract>
profiles_filter = self.api_client.profiles.Filter(profile_type=profile_type, profile_state=ProfileState.ACTIVE, name=profile_name)
profiles = []
for resource_id in set([bundle_id.id for bundle_id in bundle_ids]):
bundle_id_profiles = self._list_related_resources(resource_id, BundleId, Profile, self.api_client.bundle_ids.list_profiles, profiles_filter, False)
profiles.extend(bundle_id_profiles)
if save:
self._save_profiles(profiles)
profiles = profiles
</DeepExtract>
profiles = list(filter(has_certificate, profiles))
certificate_names = ', '.join((c.get_display_info() for c in certificates))
message = f'that contain {SigningCertificate.plural(len(certificates))} {certificate_names}'
self.printer.log_filtered(Profile, profiles, message)
for profile in profiles:
self.logger.info(f'- {profile.get_display_info()}')
profile_ids = {p.id for p in profiles}
bundle_ids_without_profiles = list(filter(missing_profile, bundle_ids))
if bundle_ids_without_profiles and (not create_resource):
missing = ', '.join((f'"{bid.attributes.identifier}" [{bid.id}]' for bid in bundle_ids_without_profiles))
raise AppStoreConnectError(f'Did not find {profile_type} {Profile.s} for {BundleId.s}: {missing}')
<DeepExtract>
if not bundle_ids_without_profiles:
created_profiles = []
if platform is None:
platform = bundle_ids_without_profiles[0].attributes.platform
devices = self.list_devices(platform=platform, device_status=DeviceStatus.ENABLED, should_print=False)
certificates = self.list_certificates(profile_type=profile_type, should_print=False)
certificate_ids = list({c.id for c in chain(certificates, certificates)})
device_ids = [d.id for d in devices if d.attributes.deviceClass.is_compatible(profile_type)]
for bundle_id in bundle_ids_without_profiles:
yield self.create_profile(bundle_id.id, certificate_ids, device_ids, profile_type=profile_type, should_print=False)
</DeepExtract>
profiles.extend(created_profiles)
return profiles
|
def _get_or_create_profiles(bundle_ids: Sequence[BundleId], certificates: Sequence[SigningCertificate], profile_type: ProfileType, create_resource: bool, platform: Optional[BundleIdPlatform]=None):
def has_certificate(profile) -> bool:
try:
profile_certificates = self.api_client.profiles.list_certificate_ids(profile)
return bool(certificate_ids.issubset({c.id for c in profile_certificates}))
except AppStoreConnectApiError as err:
error = f'Listing {SigningCertificate.s} for {Profile} {profile.id} failed unexpectedly'
self.logger.warning(Colors.YELLOW(f'{error}: {err.error_response}'))
return False
def missing_profile(bundle_id) -> bool:
try:
bundle_ids_profiles = self.api_client.bundle_ids.list_profile_ids(bundle_id)
return not profile_ids & {p.id for p in bundle_ids_profiles}
except AppStoreConnectApiError as err:
error = f'Listing {Profile.s} for {BundleId} {bundle_id.id} failed unexpectedly'
self.logger.warning(Colors.YELLOW(f'{error}: {err.error_response}'))
return True
certificate_ids = {c.id for c in certificates}
profiles_filter = self.api_client.profiles.Filter(profile_type=profile_type, profile_state=ProfileState.ACTIVE, name=profile_name)
profiles = []
for resource_id in set([bundle_id.id for bundle_id in bundle_ids]):
bundle_id_profiles = self._list_related_resources(resource_id, BundleId, Profile, self.api_client.bundle_ids.list_profiles, profiles_filter, False)
profiles.extend(bundle_id_profiles)
if save:
self._save_profiles(profiles)
profiles = profiles
profiles = list(filter(has_certificate, profiles))
certificate_names = ', '.join((c.get_display_info() for c in certificates))
message = f'that contain {SigningCertificate.plural(len(certificates))} {certificate_names}'
self.printer.log_filtered(Profile, profiles, message)
for profile in profiles:
self.logger.info(f'- {profile.get_display_info()}')
profile_ids = {p.id for p in profiles}
bundle_ids_without_profiles = list(filter(missing_profile, bundle_ids))
if bundle_ids_without_profiles and (not create_resource):
missing = ', '.join((f'"{bid.attributes.identifier}" [{bid.id}]' for bid in bundle_ids_without_profiles))
raise AppStoreConnectError(f'Did not find {profile_type} {Profile.s} for {BundleId.s}: {missing}')
if not bundle_ids_without_profiles:
created_profiles = []
if platform is None:
platform = bundle_ids_without_profiles[0].attributes.platform
devices = self.list_devices(platform=platform, device_status=DeviceStatus.ENABLED, should_print=False)
certificates = self.list_certificates(profile_type=profile_type, should_print=False)
certificate_ids = list({c.id for c in chain(certificates, certificates)})
device_ids = [d.id for d in devices if d.attributes.deviceClass.is_compatible(profile_type)]
for bundle_id in bundle_ids_without_profiles:
yield self.create_profile(bundle_id.id, certificate_ids, device_ids, profile_type=profile_type, should_print=False)
profiles.extend(created_profiles)
return profiles
|
cli-tools
|
positive
|
def _HandleLeftClickOrDoubleClick(self, evt):
"""
Handle a left click or left double click on the ListView
"""
evt.Skip()
if evt.m_altDown or evt.m_controlDown or evt.m_shiftDown:
return
if self.cellEditMode == self.CELLEDIT_NONE:
return
if evt.LeftUp() and self.cellEditMode == self.CELLEDIT_DOUBLECLICK:
return
if evt.LeftDClick() and self.cellEditMode == self.CELLEDIT_SINGLECLICK:
return
<DeepExtract>
if wx.Platform == '__WXMSW__':
(rowIndex, flags, subItemIndex) = wx.ListCtrl.HitTestSubItem(self, evt.GetPosition())
(rowIndex, flags) = self.HitTest(evt.GetPosition())
if flags & wx.LIST_HITTEST_ONITEM == 0:
(rowIndex, flags, subItemIndex) = (-1, 0, -1)
if not self.InReportView():
(rowIndex, flags, subItemIndex) = (rowIndex, wx.LIST_HITTEST_ONITEM, 0)
right = 0
scrolledX = self.GetScrollPos(wx.HORIZONTAL) + evt.GetPosition().x
for i in range(self.GetColumnCount()):
left = right
right += self.GetColumnWidth(i)
if scrolledX < right:
if scrolledX - left < self.smallImageList.GetSize(0)[0]:
flag = wx.LIST_HITTEST_ONITEMICON
else:
flag = wx.LIST_HITTEST_ONITEMLABEL
(rowIndex, flags, subItemIndex) = (rowIndex, flag, i)
(rowIndex, flags, subItemIndex) = (rowIndex, 0, -1)
</DeepExtract>
if flags & wx.LIST_HITTEST_ONITEM == 0 or subItemIndex == -1:
return
if subItemIndex == 0 and self.cellEditMode == self.CELLEDIT_SINGLECLICK:
return
<DeepExtract>
if 0 > rowIndex >= self.GetItemCount():
return
if 0 > subItemIndex >= self.GetColumnCount():
return
if self.cellEditMode == self.CELLEDIT_NONE:
return
if not self.columns[subItemIndex].isEditable:
return
if self.GetObjectAt(rowIndex) is None:
return
self.StartCellEdit(rowIndex, subItemIndex)
</DeepExtract>
|
def _HandleLeftClickOrDoubleClick(self, evt):
"""
Handle a left click or left double click on the ListView
"""
evt.Skip()
if evt.m_altDown or evt.m_controlDown or evt.m_shiftDown:
return
if self.cellEditMode == self.CELLEDIT_NONE:
return
if evt.LeftUp() and self.cellEditMode == self.CELLEDIT_DOUBLECLICK:
return
if evt.LeftDClick() and self.cellEditMode == self.CELLEDIT_SINGLECLICK:
return
if wx.Platform == '__WXMSW__':
(rowIndex, flags, subItemIndex) = wx.ListCtrl.HitTestSubItem(self, evt.GetPosition())
(rowIndex, flags) = self.HitTest(evt.GetPosition())
if flags & wx.LIST_HITTEST_ONITEM == 0:
(rowIndex, flags, subItemIndex) = (-1, 0, -1)
if not self.InReportView():
(rowIndex, flags, subItemIndex) = (rowIndex, wx.LIST_HITTEST_ONITEM, 0)
right = 0
scrolledX = self.GetScrollPos(wx.HORIZONTAL) + evt.GetPosition().x
for i in range(self.GetColumnCount()):
left = right
right += self.GetColumnWidth(i)
if scrolledX < right:
if scrolledX - left < self.smallImageList.GetSize(0)[0]:
flag = wx.LIST_HITTEST_ONITEMICON
else:
flag = wx.LIST_HITTEST_ONITEMLABEL
(rowIndex, flags, subItemIndex) = (rowIndex, flag, i)
(rowIndex, flags, subItemIndex) = (rowIndex, 0, -1)
if flags & wx.LIST_HITTEST_ONITEM == 0 or subItemIndex == -1:
return
if subItemIndex == 0 and self.cellEditMode == self.CELLEDIT_SINGLECLICK:
return
if 0 > rowIndex >= self.GetItemCount():
return
if 0 > subItemIndex >= self.GetColumnCount():
return
if self.cellEditMode == self.CELLEDIT_NONE:
return
if not self.columns[subItemIndex].isEditable:
return
if self.GetObjectAt(rowIndex) is None:
return
self.StartCellEdit(rowIndex, subItemIndex)
</DeepExtract>
|
bookhub
|
positive
|
def create_game(main_user, form):
"""
Creates a Game by:
- saving the form
- setting default values
- adding users who can play the game
- creating an avatar for the main user.
- creating the game secret in game manager
:param main_user: The user who requested game creation, and is the game owner.
:param form: The form used to submit the creation of the game.
:param users_to_add_to_game: List of User objects who are able to play this game.
:return: The initialised Game object.
"""
game = form.save(commit=False)
<DeepExtract>
game.auth_token = secrets.token_urlsafe(nbytes=NUM_BYTES_FOR_TOKEN_GENERATOR)[:TOKEN_MAX_LENGTH]
</DeepExtract>
game.generator = 'Main'
game.owner = game.game_class.teacher.new_user
game.main_user = game.game_class.teacher.new_user
game.created_by = main_user.userprofile.teacher
game.save()
create_avatar_for_user(main_user, game.id)
game_manager = GameManager()
game_manager.create_game_secret(game_id=game.id, token=game.auth_token)
return game
|
def create_game(main_user, form):
"""
Creates a Game by:
- saving the form
- setting default values
- adding users who can play the game
- creating an avatar for the main user.
- creating the game secret in game manager
:param main_user: The user who requested game creation, and is the game owner.
:param form: The form used to submit the creation of the game.
:param users_to_add_to_game: List of User objects who are able to play this game.
:return: The initialised Game object.
"""
game = form.save(commit=False)
game.auth_token = secrets.token_urlsafe(nbytes=NUM_BYTES_FOR_TOKEN_GENERATOR)[:TOKEN_MAX_LENGTH]
game.generator = 'Main'
game.owner = game.game_class.teacher.new_user
game.main_user = game.game_class.teacher.new_user
game.created_by = main_user.userprofile.teacher
game.save()
create_avatar_for_user(main_user, game.id)
game_manager = GameManager()
game_manager.create_game_secret(game_id=game.id, token=game.auth_token)
return game
|
aimmo
|
positive
|
def get_is_NNP_WP(arg_clauses, clause_index, parse_dict):
if clause_index == 0:
return 'NONE'
<DeepExtract>
DocID = arg_clauses.DocID
sent_index = arg_clauses.sent_index
curr_clause_indices = arg_clauses.clauses[clause_index][0]
subtrees = []
parse_tree = parse_dict[DocID]['sentences'][sent_index]['parsetree'].strip()
syntax_tree = Syntax_tree(parse_tree)
if syntax_tree.tree != None:
clause_leaves = set([syntax_tree.get_leaf_node_by_token_index(index) for index in curr_clause_indices])
no_need = []
for node in syntax_tree.tree.traverse(strategy='levelorder'):
if node not in no_need:
if set(node.get_leaves()) <= clause_leaves:
subtrees.append(node)
no_need.extend(node.get_descendants())
production_rule = []
for tree in subtrees:
for node in tree.traverse(strategy='levelorder'):
if not node.is_leaf():
rule = node.name + '-->' + ' '.join([child.name for child in node.get_children()])
production_rule.append(rule)
curr_production_rule = production_rule
</DeepExtract>
<DeepExtract>
DocID = arg_clauses.DocID
sent_index = arg_clauses.sent_index
curr_clause_indices = arg_clauses.clauses[clause_index - 1][0]
subtrees = []
parse_tree = parse_dict[DocID]['sentences'][sent_index]['parsetree'].strip()
syntax_tree = Syntax_tree(parse_tree)
if syntax_tree.tree != None:
clause_leaves = set([syntax_tree.get_leaf_node_by_token_index(index) for index in curr_clause_indices])
no_need = []
for node in syntax_tree.tree.traverse(strategy='levelorder'):
if node not in no_need:
if set(node.get_leaves()) <= clause_leaves:
subtrees.append(node)
no_need.extend(node.get_descendants())
production_rule = []
for tree in subtrees:
for node in tree.traverse(strategy='levelorder'):
if not node.is_leaf():
rule = node.name + '-->' + ' '.join([child.name for child in node.get_children()])
production_rule.append(rule)
prev_production_rule = production_rule
</DeepExtract>
flag = 0
for rule in curr_production_rule:
(part1, part2) = rule.split('-->')
if 'WHNP' in part1 and 'WP' in part2:
flag = 1
break
if flag == 1:
for rule in prev_production_rule:
(part1, part2) = rule.split('-->')
if 'NP' in part1 and 'NNP' in part2:
return 'yes'
return 'no'
else:
return 'no'
|
def get_is_NNP_WP(arg_clauses, clause_index, parse_dict):
if clause_index == 0:
return 'NONE'
DocID = arg_clauses.DocID
sent_index = arg_clauses.sent_index
curr_clause_indices = arg_clauses.clauses[clause_index][0]
subtrees = []
parse_tree = parse_dict[DocID]['sentences'][sent_index]['parsetree'].strip()
syntax_tree = Syntax_tree(parse_tree)
if syntax_tree.tree != None:
clause_leaves = set([syntax_tree.get_leaf_node_by_token_index(index) for index in curr_clause_indices])
no_need = []
for node in syntax_tree.tree.traverse(strategy='levelorder'):
if node not in no_need:
if set(node.get_leaves()) <= clause_leaves:
subtrees.append(node)
no_need.extend(node.get_descendants())
production_rule = []
for tree in subtrees:
for node in tree.traverse(strategy='levelorder'):
if not node.is_leaf():
rule = node.name + '-->' + ' '.join([child.name for child in node.get_children()])
production_rule.append(rule)
curr_production_rule = production_rule
DocID = arg_clauses.DocID
sent_index = arg_clauses.sent_index
curr_clause_indices = arg_clauses.clauses[clause_index - 1][0]
subtrees = []
parse_tree = parse_dict[DocID]['sentences'][sent_index]['parsetree'].strip()
syntax_tree = Syntax_tree(parse_tree)
if syntax_tree.tree != None:
clause_leaves = set([syntax_tree.get_leaf_node_by_token_index(index) for index in curr_clause_indices])
no_need = []
for node in syntax_tree.tree.traverse(strategy='levelorder'):
if node not in no_need:
if set(node.get_leaves()) <= clause_leaves:
subtrees.append(node)
no_need.extend(node.get_descendants())
production_rule = []
for tree in subtrees:
for node in tree.traverse(strategy='levelorder'):
if not node.is_leaf():
rule = node.name + '-->' + ' '.join([child.name for child in node.get_children()])
production_rule.append(rule)
prev_production_rule = production_rule
flag = 0
for rule in curr_production_rule:
(part1, part2) = rule.split('-->')
if 'WHNP' in part1 and 'WP' in part2:
flag = 1
break
if flag == 1:
for rule in prev_production_rule:
(part1, part2) = rule.split('-->')
if 'NP' in part1 and 'NNP' in part2:
return 'yes'
return 'no'
else:
return 'no'
|
conll2015_discourse
|
positive
|
def flush_dbs(self, flush_data, flush_utxos, estimate_txs_remaining):
"""Flush out cached state. History is always flushed; UTXOs are
flushed if flush_utxos."""
if flush_data.height == self.db_height:
<DeepExtract>
assert flush_data.tx_count == self.fs_tx_count == self.db_tx_count
assert flush_data.height == self.fs_height == self.db_height
assert flush_data.tip == self.db_tip
assert not flush_data.headers
assert not flush_data.block_tx_hashes
assert not flush_data.adds
assert not flush_data.deletes
assert not flush_data.undo_infos
self.history.assert_flushed()
</DeepExtract>
return
start_time = time.time()
prior_flush = self.last_flush
tx_delta = flush_data.tx_count - self.last_flush_tx_count
<DeepExtract>
prior_tx_count = self.tx_counts[self.fs_height] if self.fs_height >= 0 else 0
assert len(flush_data.block_tx_hashes) == len(flush_data.headers)
assert flush_data.height == self.fs_height + len(flush_data.headers)
assert flush_data.tx_count == (self.tx_counts[-1] if self.tx_counts else 0)
assert len(self.tx_counts) == flush_data.height + 1
hashes = b''.join(flush_data.block_tx_hashes)
flush_data.block_tx_hashes.clear()
assert len(hashes) % 32 == 0
assert len(hashes) // 32 == flush_data.tx_count - prior_tx_count
start_time = time.monotonic()
height_start = self.fs_height + 1
offset = self.header_offset(height_start)
self.headers_file.write(offset, b''.join(flush_data.headers))
self.fs_update_header_offsets(offset, height_start, flush_data.headers)
flush_data.headers.clear()
offset = height_start * self.tx_counts.itemsize
self.tx_counts_file.write(offset, self.tx_counts[height_start:].tobytes())
offset = prior_tx_count * 32
self.hashes_file.write(offset, hashes)
self.fs_height = flush_data.height
self.fs_tx_count = flush_data.tx_count
if self.utxo_db.for_sync:
elapsed = time.monotonic() - start_time
self.logger.info(f'flushed filesystem data in {elapsed:.2f}s')
</DeepExtract>
<DeepExtract>
self.history.flush()
</DeepExtract>
with self.utxo_db.write_batch() as batch:
if flush_utxos:
<DeepExtract>
start_time = time.monotonic()
add_count = len(flush_data.adds)
spend_count = len(flush_data.deletes) // 2
batch_delete = batch.delete
for key in sorted(flush_data.deletes):
batch_delete(key)
flush_data.deletes.clear()
batch_put = batch.put
for (key, value) in flush_data.adds.items():
hashX = value[:HASHX_LEN]
txout_idx = key[-4:]
tx_num = value[HASHX_LEN:HASHX_LEN + TXNUM_LEN]
value_sats = value[-8:]
suffix = txout_idx + tx_num
batch_put(b'h' + key[:COMP_TXID_LEN] + suffix, hashX)
batch_put(b'u' + hashX + suffix, value_sats)
flush_data.adds.clear()
self.flush_undo_infos(batch_put, flush_data.undo_infos)
flush_data.undo_infos.clear()
if self.utxo_db.for_sync:
block_count = flush_data.height - self.db_height
tx_count = flush_data.tx_count - self.db_tx_count
elapsed = time.monotonic() - start_time
self.logger.info(f'flushed {block_count:,d} blocks with {tx_count:,d} txs, {add_count:,d} UTXO adds, {spend_count:,d} spends in {elapsed:.1f}s, committing...')
self.utxo_flush_count = self.history.flush_count
self.db_height = flush_data.height
self.db_tx_count = flush_data.tx_count
self.db_tip = flush_data.tip
</DeepExtract>
<DeepExtract>
now = time.time()
self.wall_time += now - self.last_flush
self.last_flush = now
self.last_flush_tx_count = self.fs_tx_count
self.write_utxo_state(batch)
</DeepExtract>
<DeepExtract>
now = time.time()
self.wall_time += now - self.last_flush
self.last_flush = now
self.last_flush_tx_count = self.fs_tx_count
self.write_utxo_state(self.utxo_db)
</DeepExtract>
elapsed = self.last_flush - start_time
self.logger.info(f'flush #{self.history.flush_count:,d} took {elapsed:.1f}s. Height {flush_data.height:,d} txs: {flush_data.tx_count:,d} ({tx_delta:+,d})')
if self.utxo_db.for_sync:
flush_interval = self.last_flush - prior_flush
tx_per_sec_gen = int(flush_data.tx_count / self.wall_time)
tx_per_sec_last = 1 + int(tx_delta / flush_interval)
eta = estimate_txs_remaining() / tx_per_sec_last
self.logger.info(f'tx/sec since genesis: {tx_per_sec_gen:,d}, since last flush: {tx_per_sec_last:,d}')
self.logger.info(f'sync time: {formatted_time(self.wall_time)} ETA: {formatted_time(eta)}')
|
def flush_dbs(self, flush_data, flush_utxos, estimate_txs_remaining):
"""Flush out cached state. History is always flushed; UTXOs are
flushed if flush_utxos."""
if flush_data.height == self.db_height:
assert flush_data.tx_count == self.fs_tx_count == self.db_tx_count
assert flush_data.height == self.fs_height == self.db_height
assert flush_data.tip == self.db_tip
assert not flush_data.headers
assert not flush_data.block_tx_hashes
assert not flush_data.adds
assert not flush_data.deletes
assert not flush_data.undo_infos
self.history.assert_flushed()
return
start_time = time.time()
prior_flush = self.last_flush
tx_delta = flush_data.tx_count - self.last_flush_tx_count
prior_tx_count = self.tx_counts[self.fs_height] if self.fs_height >= 0 else 0
assert len(flush_data.block_tx_hashes) == len(flush_data.headers)
assert flush_data.height == self.fs_height + len(flush_data.headers)
assert flush_data.tx_count == (self.tx_counts[-1] if self.tx_counts else 0)
assert len(self.tx_counts) == flush_data.height + 1
hashes = b''.join(flush_data.block_tx_hashes)
flush_data.block_tx_hashes.clear()
assert len(hashes) % 32 == 0
assert len(hashes) // 32 == flush_data.tx_count - prior_tx_count
start_time = time.monotonic()
height_start = self.fs_height + 1
offset = self.header_offset(height_start)
self.headers_file.write(offset, b''.join(flush_data.headers))
self.fs_update_header_offsets(offset, height_start, flush_data.headers)
flush_data.headers.clear()
offset = height_start * self.tx_counts.itemsize
self.tx_counts_file.write(offset, self.tx_counts[height_start:].tobytes())
offset = prior_tx_count * 32
self.hashes_file.write(offset, hashes)
self.fs_height = flush_data.height
self.fs_tx_count = flush_data.tx_count
if self.utxo_db.for_sync:
elapsed = time.monotonic() - start_time
self.logger.info(f'flushed filesystem data in {elapsed:.2f}s')
self.history.flush()
with self.utxo_db.write_batch() as batch:
if flush_utxos:
start_time = time.monotonic()
add_count = len(flush_data.adds)
spend_count = len(flush_data.deletes) // 2
batch_delete = batch.delete
for key in sorted(flush_data.deletes):
batch_delete(key)
flush_data.deletes.clear()
batch_put = batch.put
for (key, value) in flush_data.adds.items():
hashX = value[:HASHX_LEN]
txout_idx = key[-4:]
tx_num = value[HASHX_LEN:HASHX_LEN + TXNUM_LEN]
value_sats = value[-8:]
suffix = txout_idx + tx_num
batch_put(b'h' + key[:COMP_TXID_LEN] + suffix, hashX)
batch_put(b'u' + hashX + suffix, value_sats)
flush_data.adds.clear()
self.flush_undo_infos(batch_put, flush_data.undo_infos)
flush_data.undo_infos.clear()
if self.utxo_db.for_sync:
block_count = flush_data.height - self.db_height
tx_count = flush_data.tx_count - self.db_tx_count
elapsed = time.monotonic() - start_time
self.logger.info(f'flushed {block_count:,d} blocks with {tx_count:,d} txs, {add_count:,d} UTXO adds, {spend_count:,d} spends in {elapsed:.1f}s, committing...')
self.utxo_flush_count = self.history.flush_count
self.db_height = flush_data.height
self.db_tx_count = flush_data.tx_count
self.db_tip = flush_data.tip
now = time.time()
self.wall_time += now - self.last_flush
self.last_flush = now
self.last_flush_tx_count = self.fs_tx_count
self.write_utxo_state(batch)
now = time.time()
self.wall_time += now - self.last_flush
self.last_flush = now
self.last_flush_tx_count = self.fs_tx_count
self.write_utxo_state(self.utxo_db)
elapsed = self.last_flush - start_time
self.logger.info(f'flush #{self.history.flush_count:,d} took {elapsed:.1f}s. Height {flush_data.height:,d} txs: {flush_data.tx_count:,d} ({tx_delta:+,d})')
if self.utxo_db.for_sync:
flush_interval = self.last_flush - prior_flush
tx_per_sec_gen = int(flush_data.tx_count / self.wall_time)
tx_per_sec_last = 1 + int(tx_delta / flush_interval)
eta = estimate_txs_remaining() / tx_per_sec_last
self.logger.info(f'tx/sec since genesis: {tx_per_sec_gen:,d}, since last flush: {tx_per_sec_last:,d}')
self.logger.info(f'sync time: {formatted_time(self.wall_time)} ETA: {formatted_time(eta)}')
|
electrumx
|
positive
|
def _get_mimetree(self):
if self._mimetree is None:
tree = self._message.get_mime_tree()
<DeepExtract>
att = settings.get_theming_attribute('thread', 'body')
att_focus = settings.get_theming_attribute('thread', 'body_focus')
mimepart = tree[1] if isinstance(tree[1], (email.message.EmailMessage, Attachment)) else None
(label, subtrees) = tree
label = ANSIText(label, att, att_focus, ANSI_BACKGROUND, mimepart=mimepart)
if subtrees is None or mimepart:
tree = (label, None)
else:
tree = (label, [self._text_tree_to_widget_tree(s) for s in subtrees])
</DeepExtract>
tree = SimpleTree([tree])
self._mimetree = ArrowTree(tree)
return self._mimetree
|
def _get_mimetree(self):
if self._mimetree is None:
tree = self._message.get_mime_tree()
att = settings.get_theming_attribute('thread', 'body')
att_focus = settings.get_theming_attribute('thread', 'body_focus')
mimepart = tree[1] if isinstance(tree[1], (email.message.EmailMessage, Attachment)) else None
(label, subtrees) = tree
label = ANSIText(label, att, att_focus, ANSI_BACKGROUND, mimepart=mimepart)
if subtrees is None or mimepart:
tree = (label, None)
else:
tree = (label, [self._text_tree_to_widget_tree(s) for s in subtrees])
tree = SimpleTree([tree])
self._mimetree = ArrowTree(tree)
return self._mimetree
|
alot
|
positive
|
def predict(self, user_ids, log_seqs, item_indices):
"""Predict scores for input item sequential.
Args:
user_ids ([type]): [description]
log_seqs ([type]): [description]
item_indices ([type]): [description]
Returns:
[type]: [logits]
"""
<DeepExtract>
seqs = self.item_emb(torch.as_tensor(log_seqs, dtype=torch.long).to(self.device))
seqs *= self.item_emb.embedding_dim ** 0.5
positions = np.tile(np.array(range(log_seqs.shape[1])), [log_seqs.shape[0], 1])
seqs += self.pos_emb(torch.as_tensor(positions, dtype=torch.long).to(self.device))
seqs = self.emb_dropout(seqs)
timeline_mask = torch.BoolTensor(log_seqs == 0).to(self.device)
seqs *= ~timeline_mask.unsqueeze(-1)
tl = seqs.shape[1]
attention_mask = ~torch.tril(torch.ones((tl, tl), dtype=torch.bool, device=self.device))
for i in range(len(self.attention_layers)):
seqs = torch.transpose(seqs, 0, 1)
Q = self.attention_layernorms[i](seqs)
(mha_outputs, _) = self.attention_layers[i](Q, seqs, seqs, attn_mask=attention_mask)
seqs = Q + mha_outputs
seqs = torch.transpose(seqs, 0, 1)
seqs = self.forward_layernorms[i](seqs)
seqs = self.forward_layers[i](seqs)
seqs *= ~timeline_mask.unsqueeze(-1)
log_feats = self.last_layernorm(seqs)
log_feats = log_feats
</DeepExtract>
final_feat = log_feats[:, -1, :]
item_embs = self.item_emb(torch.as_tensor(item_indices, dtype=torch.long, device=self.device))
logits = item_embs.matmul(final_feat.unsqueeze(-1)).squeeze(-1)
return logits
|
def predict(self, user_ids, log_seqs, item_indices):
"""Predict scores for input item sequential.
Args:
user_ids ([type]): [description]
log_seqs ([type]): [description]
item_indices ([type]): [description]
Returns:
[type]: [logits]
"""
seqs = self.item_emb(torch.as_tensor(log_seqs, dtype=torch.long).to(self.device))
seqs *= self.item_emb.embedding_dim ** 0.5
positions = np.tile(np.array(range(log_seqs.shape[1])), [log_seqs.shape[0], 1])
seqs += self.pos_emb(torch.as_tensor(positions, dtype=torch.long).to(self.device))
seqs = self.emb_dropout(seqs)
timeline_mask = torch.BoolTensor(log_seqs == 0).to(self.device)
seqs *= ~timeline_mask.unsqueeze(-1)
tl = seqs.shape[1]
attention_mask = ~torch.tril(torch.ones((tl, tl), dtype=torch.bool, device=self.device))
for i in range(len(self.attention_layers)):
seqs = torch.transpose(seqs, 0, 1)
Q = self.attention_layernorms[i](seqs)
(mha_outputs, _) = self.attention_layers[i](Q, seqs, seqs, attn_mask=attention_mask)
seqs = Q + mha_outputs
seqs = torch.transpose(seqs, 0, 1)
seqs = self.forward_layernorms[i](seqs)
seqs = self.forward_layers[i](seqs)
seqs *= ~timeline_mask.unsqueeze(-1)
log_feats = self.last_layernorm(seqs)
log_feats = log_feats
final_feat = log_feats[:, -1, :]
item_embs = self.item_emb(torch.as_tensor(item_indices, dtype=torch.long, device=self.device))
logits = item_embs.matmul(final_feat.unsqueeze(-1)).squeeze(-1)
return logits
|
beta-recsys
|
positive
|
def restore_CF(self, use_QR=True, normalize=True):
if self.canonical_form == 'right':
<DeepExtract>
if use_QR:
G0 = tm.restore_RCF_r_seq(self.A, self.r, sanity_checks=self.sanity_checks, sc_data='restore_RCF_r')
if not self._are_bond_dims_synced():
log.info('Bond dimension changed during restore_RCF.')
A = copy.copy(self.A)
r = copy.copy(self.r)
self.set_state_from_tensors(A, do_update=False)
self.r = r
else:
G_n_i = sp.eye(self.D[self.N], dtype=self.typ)
for n in range(self.N, 0, -1):
(self.r[n - 1], _, G_n_i) = tm.restore_RCF_r(self.A[n], self.r[n], G_n_i, sc_data=('site', n), zero_tol=self.zero_tol, sanity_checks=self.sanity_checks)
G0 = G_n_i
if normalize:
self.A[1] *= G0[0, 0] / abs(G0[0, 0])
norm = 1.0
else:
self.A[1] *= G0[0, 0]
self.r[0] *= abs(G0[0, 0]) ** 2
norm = abs(G0[0, 0])
if self.sanity_checks:
if not sp.allclose(self.r[0].A, norm ** 2, atol=1e-12, rtol=1e-12):
log.warning('Sanity Fail in restore_RCF!: r_0 is bad / norm failure: %g vs. %g', self.r[0].A.real, norm ** 2)
if diag_l:
G = tm.restore_RCF_l_seq(self.A, self.l, sanity_checks=self.sanity_checks, sc_data='restore_RCF_l')
if self.sanity_checks:
if not sp.allclose(self.l[self.N].A, norm ** 2, atol=1e-12, rtol=1e-12):
log.warning('Sanity Fail in restore_RCF!: l_N is bad / norm failure: %g vs. %g', self.l[self.N].A.real, norm ** 2)
for n in range(1, self.N + 1):
r_nm1 = tm.eps_r_noop(self.r[n], self.A[n], self.A[n])
if not sp.allclose(r_nm1, self.r[n - 1], atol=1e-11, rtol=1e-11):
log.warning('Sanity Fail in restore_RCF!: r_%u is bad (off by %g)', n, la.norm(r_nm1 - self.r[n - 1]))
elif update_l:
self.calc_l()
</DeepExtract>
else:
<DeepExtract>
if use_QR:
GN = tm.restore_LCF_l_seq(self.A, self.l, sanity_checks=self.sanity_checks, sc_data='restore_LCF_l')
if not self._are_bond_dims_synced():
log.info('Bond dimension changed during restore_LCF.')
A = copy.copy(self.A)
l = copy.copy(self.l)
self.set_state_from_tensors(A, do_update=False)
self.l = l
else:
G = sp.eye(self.D[0], dtype=self.typ)
for n in range(1, self.N + 1):
(self.l[n], G, _) = tm.restore_LCF_l(self.A[n], self.l[n - 1], G, zero_tol=self.zero_tol, sanity_checks=self.sanity_checks)
GN = G
if normalize:
self.A[self.N] *= GN[0, 0] / abs(GN[0, 0])
norm = 1.0
else:
self.A[self.N] *= GN[0, 0]
self.l[self.N] *= abs(GN[0, 0]) ** 2
norm = abs(GN[0, 0])
if self.sanity_checks:
lN = tm.eps_l_noop(self.l[self.N - 1], self.A[self.N], self.A[self.N])
if not sp.allclose(lN, norm ** 2, atol=1e-12, rtol=1e-12):
log.warning('Sanity Fail in restore_LCF!: l_N is bad / norm failure')
if diag_r:
tm.restore_LCF_r_seq(self.A, self.r, sanity_checks=self.sanity_checks, sc_data='restore_LCF_r')
if self.sanity_checks:
if not sp.allclose(self.r[0].A, norm ** 2, atol=1e-12, rtol=1e-12):
log.warning('Sanity Fail in restore_LCF!: r_0 is bad / norm failure')
log.warning('r_0 = %s', self.r[0].squeeze().real)
for n in range(1, self.N + 1):
l = tm.eps_l_noop(self.l[n - 1], self.A[n], self.A[n])
if not sp.allclose(l, self.l[n], atol=1e-11, rtol=1e-11):
log.warning('Sanity Fail in restore_LCF!: l_%u is bad (off by %g)', n, la.norm(l - self.l[n]))
elif update_r:
self.calc_r()
</DeepExtract>
|
def restore_CF(self, use_QR=True, normalize=True):
if self.canonical_form == 'right':
if use_QR:
G0 = tm.restore_RCF_r_seq(self.A, self.r, sanity_checks=self.sanity_checks, sc_data='restore_RCF_r')
if not self._are_bond_dims_synced():
log.info('Bond dimension changed during restore_RCF.')
A = copy.copy(self.A)
r = copy.copy(self.r)
self.set_state_from_tensors(A, do_update=False)
self.r = r
else:
G_n_i = sp.eye(self.D[self.N], dtype=self.typ)
for n in range(self.N, 0, -1):
(self.r[n - 1], _, G_n_i) = tm.restore_RCF_r(self.A[n], self.r[n], G_n_i, sc_data=('site', n), zero_tol=self.zero_tol, sanity_checks=self.sanity_checks)
G0 = G_n_i
if normalize:
self.A[1] *= G0[0, 0] / abs(G0[0, 0])
norm = 1.0
else:
self.A[1] *= G0[0, 0]
self.r[0] *= abs(G0[0, 0]) ** 2
norm = abs(G0[0, 0])
if self.sanity_checks:
if not sp.allclose(self.r[0].A, norm ** 2, atol=1e-12, rtol=1e-12):
log.warning('Sanity Fail in restore_RCF!: r_0 is bad / norm failure: %g vs. %g', self.r[0].A.real, norm ** 2)
if diag_l:
G = tm.restore_RCF_l_seq(self.A, self.l, sanity_checks=self.sanity_checks, sc_data='restore_RCF_l')
if self.sanity_checks:
if not sp.allclose(self.l[self.N].A, norm ** 2, atol=1e-12, rtol=1e-12):
log.warning('Sanity Fail in restore_RCF!: l_N is bad / norm failure: %g vs. %g', self.l[self.N].A.real, norm ** 2)
for n in range(1, self.N + 1):
r_nm1 = tm.eps_r_noop(self.r[n], self.A[n], self.A[n])
if not sp.allclose(r_nm1, self.r[n - 1], atol=1e-11, rtol=1e-11):
log.warning('Sanity Fail in restore_RCF!: r_%u is bad (off by %g)', n, la.norm(r_nm1 - self.r[n - 1]))
elif update_l:
self.calc_l()
else:
if use_QR:
GN = tm.restore_LCF_l_seq(self.A, self.l, sanity_checks=self.sanity_checks, sc_data='restore_LCF_l')
if not self._are_bond_dims_synced():
log.info('Bond dimension changed during restore_LCF.')
A = copy.copy(self.A)
l = copy.copy(self.l)
self.set_state_from_tensors(A, do_update=False)
self.l = l
else:
G = sp.eye(self.D[0], dtype=self.typ)
for n in range(1, self.N + 1):
(self.l[n], G, _) = tm.restore_LCF_l(self.A[n], self.l[n - 1], G, zero_tol=self.zero_tol, sanity_checks=self.sanity_checks)
GN = G
if normalize:
self.A[self.N] *= GN[0, 0] / abs(GN[0, 0])
norm = 1.0
else:
self.A[self.N] *= GN[0, 0]
self.l[self.N] *= abs(GN[0, 0]) ** 2
norm = abs(GN[0, 0])
if self.sanity_checks:
lN = tm.eps_l_noop(self.l[self.N - 1], self.A[self.N], self.A[self.N])
if not sp.allclose(lN, norm ** 2, atol=1e-12, rtol=1e-12):
log.warning('Sanity Fail in restore_LCF!: l_N is bad / norm failure')
if diag_r:
tm.restore_LCF_r_seq(self.A, self.r, sanity_checks=self.sanity_checks, sc_data='restore_LCF_r')
if self.sanity_checks:
if not sp.allclose(self.r[0].A, norm ** 2, atol=1e-12, rtol=1e-12):
log.warning('Sanity Fail in restore_LCF!: r_0 is bad / norm failure')
log.warning('r_0 = %s', self.r[0].squeeze().real)
for n in range(1, self.N + 1):
l = tm.eps_l_noop(self.l[n - 1], self.A[n], self.A[n])
if not sp.allclose(l, self.l[n], atol=1e-11, rtol=1e-11):
log.warning('Sanity Fail in restore_LCF!: l_%u is bad (off by %g)', n, la.norm(l - self.l[n]))
elif update_r:
self.calc_r()
</DeepExtract>
|
evoMPS
|
positive
|
def get_all_credentials(self):
auth_data = self.auths.copy()
if self.creds_store:
<DeepExtract>
if self.creds_store not in self._stores:
self._stores[self.creds_store] = Store(self.creds_store, environment=self._credstore_env)
store = self._stores[self.creds_store]
</DeepExtract>
for k in store.list().keys():
<DeepExtract>
if not k or k == INDEX_NAME:
k = INDEX_URL
log.debug('Looking for auth entry for %s', repr(k))
store = self._get_store_instance(self.creds_store)
try:
data = store.get(k)
res = {'ServerAddress': k}
if data['Username'] == TOKEN_USERNAME:
res['IdentityToken'] = data['Secret']
else:
res.update({'Username': data['Username'], 'Password': data['Secret']})
auth_data[k] = res
except CredentialsNotFound:
log.debug('No entry found')
auth_data[k] = None
except StoreError as e:
raise errors.DockerException('Credentials store error: {0}'.format(repr(e)))
</DeepExtract>
auth_data[convert_to_hostname(k)] = auth_data[k]
for (reg, store_name) in self.cred_helpers.items():
<DeepExtract>
if not reg or reg == INDEX_NAME:
reg = INDEX_URL
log.debug('Looking for auth entry for %s', repr(reg))
store = self._get_store_instance(store_name)
try:
data = store.get(reg)
res = {'ServerAddress': reg}
if data['Username'] == TOKEN_USERNAME:
res['IdentityToken'] = data['Secret']
else:
res.update({'Username': data['Username'], 'Password': data['Secret']})
auth_data[reg] = res
except CredentialsNotFound:
log.debug('No entry found')
auth_data[reg] = None
except StoreError as e:
raise errors.DockerException('Credentials store error: {0}'.format(repr(e)))
</DeepExtract>
auth_data[convert_to_hostname(reg)] = auth_data[reg]
return auth_data
|
def get_all_credentials(self):
auth_data = self.auths.copy()
if self.creds_store:
if self.creds_store not in self._stores:
self._stores[self.creds_store] = Store(self.creds_store, environment=self._credstore_env)
store = self._stores[self.creds_store]
for k in store.list().keys():
if not k or k == INDEX_NAME:
k = INDEX_URL
log.debug('Looking for auth entry for %s', repr(k))
store = self._get_store_instance(self.creds_store)
try:
data = store.get(k)
res = {'ServerAddress': k}
if data['Username'] == TOKEN_USERNAME:
res['IdentityToken'] = data['Secret']
else:
res.update({'Username': data['Username'], 'Password': data['Secret']})
auth_data[k] = res
except CredentialsNotFound:
log.debug('No entry found')
auth_data[k] = None
except StoreError as e:
raise errors.DockerException('Credentials store error: {0}'.format(repr(e)))
auth_data[convert_to_hostname(k)] = auth_data[k]
for (reg, store_name) in self.cred_helpers.items():
if not reg or reg == INDEX_NAME:
reg = INDEX_URL
log.debug('Looking for auth entry for %s', repr(reg))
store = self._get_store_instance(store_name)
try:
data = store.get(reg)
res = {'ServerAddress': reg}
if data['Username'] == TOKEN_USERNAME:
res['IdentityToken'] = data['Secret']
else:
res.update({'Username': data['Username'], 'Password': data['Secret']})
auth_data[reg] = res
except CredentialsNotFound:
log.debug('No entry found')
auth_data[reg] = None
except StoreError as e:
raise errors.DockerException('Credentials store error: {0}'.format(repr(e)))
auth_data[convert_to_hostname(reg)] = auth_data[reg]
return auth_data
|
community.docker
|
positive
|
def test_select_with_zmq_error(self):
s = self.SELECTOR()
self.addCleanup(s.close)
<DeepExtract>
(rd, wr) = socketpair()
self.addCleanup(rd.close)
self.addCleanup(wr.close)
(rd, wr) = (rd, wr)
</DeepExtract>
s.register(rd, EVENT_READ)
m = mock.Mock()
m.side_effect = zmq.ZMQError(errno.EFAULT, 'not a socket')
s._poller.poll = m
with self.assertRaises(OSError) as ctx:
s.select()
self.assertEqual(errno.EFAULT, ctx.exception.errno)
|
def test_select_with_zmq_error(self):
s = self.SELECTOR()
self.addCleanup(s.close)
(rd, wr) = socketpair()
self.addCleanup(rd.close)
self.addCleanup(wr.close)
(rd, wr) = (rd, wr)
s.register(rd, EVENT_READ)
m = mock.Mock()
m.side_effect = zmq.ZMQError(errno.EFAULT, 'not a socket')
s._poller.poll = m
with self.assertRaises(OSError) as ctx:
s.select()
self.assertEqual(errno.EFAULT, ctx.exception.errno)
|
aiozmq
|
positive
|
def _check_package_selection_parameters(args, pkg_names):
<DeepExtract>
extensions = instantiate_extensions(__name__)
for (name, extension) in extensions.items():
extension.PACKAGE_SELECTION_NAME = name
package_selection_extensions = order_extensions_by_priority(extensions)
</DeepExtract>
for extension in package_selection_extensions.values():
try:
retval = extension.check_parameters(args=args, pkg_names=pkg_names)
assert retval is None, 'check_parameters() should return None'
except Exception as e:
exc = traceback.format_exc()
logger.error(f"Exception in package selection extension '{extension.PACKAGE_SELECTION_NAME}': {e}\n{exc}")
|
def _check_package_selection_parameters(args, pkg_names):
extensions = instantiate_extensions(__name__)
for (name, extension) in extensions.items():
extension.PACKAGE_SELECTION_NAME = name
package_selection_extensions = order_extensions_by_priority(extensions)
for extension in package_selection_extensions.values():
try:
retval = extension.check_parameters(args=args, pkg_names=pkg_names)
assert retval is None, 'check_parameters() should return None'
except Exception as e:
exc = traceback.format_exc()
logger.error(f"Exception in package selection extension '{extension.PACKAGE_SELECTION_NAME}': {e}\n{exc}")
|
colcon-core
|
positive
|
def testClassificationShapes(self):
global_pool = True
num_classes = 10
<DeepExtract>
if None in [2, 224, 224, 3]:
inputs = tf.placeholder(tf.float32, (2, 224, 224, 3))
else:
inputs = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(224), [224, 1]) + np.reshape(np.arange(224), [1, 224]), [1, 224, 224, 1]), [2, 1, 1, 3]))
</DeepExtract>
with slim.arg_scope(xception.xception_arg_scope()):
<DeepExtract>
block = xception.xception_block
blocks = [block('entry_flow/block1', depth_list=[1, 1, 1], skip_connection_type='conv', activation_fn_in_separable_conv=False, regularize_depthwise=regularize_depthwise, num_units=1, stride=2), block('entry_flow/block2', depth_list=[2, 2, 2], skip_connection_type='conv', activation_fn_in_separable_conv=False, regularize_depthwise=regularize_depthwise, num_units=1, stride=2), block('entry_flow/block3', depth_list=[4, 4, 4], skip_connection_type='conv', activation_fn_in_separable_conv=False, regularize_depthwise=regularize_depthwise, num_units=1, stride=1), block('entry_flow/block4', depth_list=[4, 4, 4], skip_connection_type='conv', activation_fn_in_separable_conv=False, regularize_depthwise=regularize_depthwise, num_units=1, stride=2), block('middle_flow/block1', depth_list=[4, 4, 4], skip_connection_type='sum', activation_fn_in_separable_conv=False, regularize_depthwise=regularize_depthwise, num_units=2, stride=1), block('exit_flow/block1', depth_list=[8, 8, 8], skip_connection_type='conv', activation_fn_in_separable_conv=False, regularize_depthwise=regularize_depthwise, num_units=1, stride=2), block('exit_flow/block2', depth_list=[16, 16, 16], skip_connection_type='none', activation_fn_in_separable_conv=True, regularize_depthwise=regularize_depthwise, num_units=1, stride=1)]
(_, end_points) = xception.xception(inputs, blocks=blocks, num_classes=num_classes, is_training=is_training, global_pool=global_pool, output_stride=output_stride, reuse=reuse, scope='xception')
</DeepExtract>
endpoint_to_shape = {'xception/entry_flow/conv1_1': [2, 112, 112, 32], 'xception/entry_flow/block1': [2, 56, 56, 1], 'xception/entry_flow/block2': [2, 28, 28, 2], 'xception/entry_flow/block4': [2, 14, 14, 4], 'xception/middle_flow/block1': [2, 14, 14, 4], 'xception/exit_flow/block1': [2, 7, 7, 8], 'xception/exit_flow/block2': [2, 7, 7, 16]}
for (endpoint, shape) in six.iteritems(endpoint_to_shape):
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
|
def testClassificationShapes(self):
global_pool = True
num_classes = 10
if None in [2, 224, 224, 3]:
inputs = tf.placeholder(tf.float32, (2, 224, 224, 3))
else:
inputs = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(224), [224, 1]) + np.reshape(np.arange(224), [1, 224]), [1, 224, 224, 1]), [2, 1, 1, 3]))
with slim.arg_scope(xception.xception_arg_scope()):
block = xception.xception_block
blocks = [block('entry_flow/block1', depth_list=[1, 1, 1], skip_connection_type='conv', activation_fn_in_separable_conv=False, regularize_depthwise=regularize_depthwise, num_units=1, stride=2), block('entry_flow/block2', depth_list=[2, 2, 2], skip_connection_type='conv', activation_fn_in_separable_conv=False, regularize_depthwise=regularize_depthwise, num_units=1, stride=2), block('entry_flow/block3', depth_list=[4, 4, 4], skip_connection_type='conv', activation_fn_in_separable_conv=False, regularize_depthwise=regularize_depthwise, num_units=1, stride=1), block('entry_flow/block4', depth_list=[4, 4, 4], skip_connection_type='conv', activation_fn_in_separable_conv=False, regularize_depthwise=regularize_depthwise, num_units=1, stride=2), block('middle_flow/block1', depth_list=[4, 4, 4], skip_connection_type='sum', activation_fn_in_separable_conv=False, regularize_depthwise=regularize_depthwise, num_units=2, stride=1), block('exit_flow/block1', depth_list=[8, 8, 8], skip_connection_type='conv', activation_fn_in_separable_conv=False, regularize_depthwise=regularize_depthwise, num_units=1, stride=2), block('exit_flow/block2', depth_list=[16, 16, 16], skip_connection_type='none', activation_fn_in_separable_conv=True, regularize_depthwise=regularize_depthwise, num_units=1, stride=1)]
(_, end_points) = xception.xception(inputs, blocks=blocks, num_classes=num_classes, is_training=is_training, global_pool=global_pool, output_stride=output_stride, reuse=reuse, scope='xception')
endpoint_to_shape = {'xception/entry_flow/conv1_1': [2, 112, 112, 32], 'xception/entry_flow/block1': [2, 56, 56, 1], 'xception/entry_flow/block2': [2, 28, 28, 2], 'xception/entry_flow/block4': [2, 14, 14, 4], 'xception/middle_flow/block1': [2, 14, 14, 4], 'xception/exit_flow/block1': [2, 7, 7, 8], 'xception/exit_flow/block2': [2, 7, 7, 16]}
for (endpoint, shape) in six.iteritems(endpoint_to_shape):
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
|
data-science-bowl-2018
|
positive
|
def plot_history(history, metric, targets):
if isinstance(targets, str):
fig = plt.figure(figsize=(15, 6))
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
ax1 = plt.subplot(1, 2, 1)
ax1.set_title('Model Training vs Validation Loss')
<DeepExtract>
train_metrics = history.history['loss']
val_metrics = history.history['val_' + 'loss']
epochs = range(1, len(train_metrics) + 1)
ax1.plot(epochs, train_metrics)
ax1.plot(epochs, val_metrics)
ax1.set_xlabel('Epochs')
ax1.set_ylabel('loss')
ax1.legend(['train_' + 'loss', 'val_' + 'loss'])
</DeepExtract>
ax2 = plt.subplot(1, 2, 2)
ax2.set_title('Model Training vs Validation %s' % metric)
<DeepExtract>
train_metrics = history.history[metric]
val_metrics = history.history['val_' + metric]
epochs = range(1, len(train_metrics) + 1)
ax2.plot(epochs, train_metrics)
ax2.plot(epochs, val_metrics)
ax2.set_xlabel('Epochs')
ax2.set_ylabel(metric)
ax2.legend(['train_' + metric, 'val_' + metric])
</DeepExtract>
else:
for each_target in targets:
fig = plt.figure(figsize=(15, 6))
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
ax1 = plt.subplot(1, 2, 1)
ax1.set_title('Model Training vs Validation Loss')
<DeepExtract>
train_metrics = history.history[each_target + '_loss']
val_metrics = history.history['val_' + each_target + '_loss']
epochs = range(1, len(train_metrics) + 1)
ax1.plot(epochs, train_metrics)
ax1.plot(epochs, val_metrics)
ax1.set_xlabel('Epochs')
ax1.set_ylabel(each_target + '_loss')
ax1.legend(['train_' + each_target + '_loss', 'val_' + each_target + '_loss'])
</DeepExtract>
ax2 = plt.subplot(1, 2, 2)
metric1 = [x for x in hist.columns.tolist() if (each_target in x) & ('loss' not in x)]
metric2 = metric1[0]
ax2.set_title('Model Training vs Validation %s' % metric2)
<DeepExtract>
train_metrics = history.history[metric2]
val_metrics = history.history['val_' + metric2]
epochs = range(1, len(train_metrics) + 1)
ax2.plot(epochs, train_metrics)
ax2.plot(epochs, val_metrics)
ax2.set_xlabel('Epochs')
ax2.set_ylabel(metric2)
ax2.legend(['train_' + metric2, 'val_' + metric2])
</DeepExtract>
plt.show()
|
def plot_history(history, metric, targets):
if isinstance(targets, str):
fig = plt.figure(figsize=(15, 6))
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
ax1 = plt.subplot(1, 2, 1)
ax1.set_title('Model Training vs Validation Loss')
train_metrics = history.history['loss']
val_metrics = history.history['val_' + 'loss']
epochs = range(1, len(train_metrics) + 1)
ax1.plot(epochs, train_metrics)
ax1.plot(epochs, val_metrics)
ax1.set_xlabel('Epochs')
ax1.set_ylabel('loss')
ax1.legend(['train_' + 'loss', 'val_' + 'loss'])
ax2 = plt.subplot(1, 2, 2)
ax2.set_title('Model Training vs Validation %s' % metric)
train_metrics = history.history[metric]
val_metrics = history.history['val_' + metric]
epochs = range(1, len(train_metrics) + 1)
ax2.plot(epochs, train_metrics)
ax2.plot(epochs, val_metrics)
ax2.set_xlabel('Epochs')
ax2.set_ylabel(metric)
ax2.legend(['train_' + metric, 'val_' + metric])
else:
for each_target in targets:
fig = plt.figure(figsize=(15, 6))
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
ax1 = plt.subplot(1, 2, 1)
ax1.set_title('Model Training vs Validation Loss')
train_metrics = history.history[each_target + '_loss']
val_metrics = history.history['val_' + each_target + '_loss']
epochs = range(1, len(train_metrics) + 1)
ax1.plot(epochs, train_metrics)
ax1.plot(epochs, val_metrics)
ax1.set_xlabel('Epochs')
ax1.set_ylabel(each_target + '_loss')
ax1.legend(['train_' + each_target + '_loss', 'val_' + each_target + '_loss'])
ax2 = plt.subplot(1, 2, 2)
metric1 = [x for x in hist.columns.tolist() if (each_target in x) & ('loss' not in x)]
metric2 = metric1[0]
ax2.set_title('Model Training vs Validation %s' % metric2)
train_metrics = history.history[metric2]
val_metrics = history.history['val_' + metric2]
epochs = range(1, len(train_metrics) + 1)
ax2.plot(epochs, train_metrics)
ax2.plot(epochs, val_metrics)
ax2.set_xlabel('Epochs')
ax2.set_ylabel(metric2)
ax2.legend(['train_' + metric2, 'val_' + metric2])
plt.show()
|
deep_autoviml
|
positive
|
def delete_last(self, num_to_delete=1):
"""Deletes the last N datapoints from the dataset.
Parameters
----------
num_to_delete : int
the number of datapoints to remove from the end of the dataset
"""
if self._access_mode == READ_ONLY_ACCESS:
raise ValueError('Cannot delete datapoints with read-only access')
if num_to_delete > self._num_datapoints:
raise ValueError('Cannot remove more than the number of datapoints in the dataset')
last_datapoint_ind = self._num_datapoints - 1
last_tensor_ind = last_datapoint_ind // self._datapoints_per_file
new_last_datapoint_ind = self._num_datapoints - 1 - num_to_delete
new_num_datapoints = new_last_datapoint_ind + 1
new_last_datapoint_ind = max(new_last_datapoint_ind, 0)
new_last_tensor_ind = new_last_datapoint_ind // self._datapoints_per_file
delete_tensor_ind = range(new_last_tensor_ind + 1, last_tensor_ind + 1)
for tensor_ind in delete_tensor_ind:
for field_name in self.field_names:
<DeepExtract>
file_ext = TENSOR_EXT
if compressed:
file_ext = COMPRESSED_TENSOR_EXT
filename = os.path.join(self.filename, 'tensors', ('%s_%0' + str(self._filename_numeric_label_place) + 'd%s') % (field_name, tensor_ind, file_ext))
filename = filename
</DeepExtract>
os.remove(filename)
dataset_empty = False
target_tensor_size = new_num_datapoints % self._datapoints_per_file
if target_tensor_size == 0:
if new_num_datapoints > 0:
target_tensor_size = self._datapoints_per_file
else:
dataset_empty = True
for field_name in self.field_names:
<DeepExtract>
if new_last_tensor_ind == self._tensor_cache_file_num[field_name]:
new_last_tensor = self._tensors[field_name]
filename = self.generate_tensor_filename(field_name, new_last_tensor_ind, compressed=True)
Tensor.load(filename, compressed=True, prealloc=self._tensors[field_name])
self._tensor_cache_file_num[field_name] = new_last_tensor_ind
new_last_tensor = self._tensors[field_name]
</DeepExtract>
while new_last_tensor.size > target_tensor_size:
new_last_tensor.delete_last()
<DeepExtract>
file_ext = TENSOR_EXT
if compressed:
file_ext = COMPRESSED_TENSOR_EXT
filename = os.path.join(self.filename, 'tensors', ('%s_%0' + str(self._filename_numeric_label_place) + 'd%s') % (field_name, new_last_tensor_ind, file_ext))
filename = filename
</DeepExtract>
new_last_tensor.save(filename, compressed=True)
if not new_last_tensor.has_data:
os.remove(filename)
new_last_tensor.reset()
if self._num_datapoints - 1 - num_to_delete >= 0:
self._num_datapoints = new_num_datapoints
else:
self._num_datapoints = 0
self._num_tensors = new_last_tensor_ind + 1
if dataset_empty:
self._num_tensors = 0
|
def delete_last(self, num_to_delete=1):
"""Deletes the last N datapoints from the dataset.
Parameters
----------
num_to_delete : int
the number of datapoints to remove from the end of the dataset
"""
if self._access_mode == READ_ONLY_ACCESS:
raise ValueError('Cannot delete datapoints with read-only access')
if num_to_delete > self._num_datapoints:
raise ValueError('Cannot remove more than the number of datapoints in the dataset')
last_datapoint_ind = self._num_datapoints - 1
last_tensor_ind = last_datapoint_ind // self._datapoints_per_file
new_last_datapoint_ind = self._num_datapoints - 1 - num_to_delete
new_num_datapoints = new_last_datapoint_ind + 1
new_last_datapoint_ind = max(new_last_datapoint_ind, 0)
new_last_tensor_ind = new_last_datapoint_ind // self._datapoints_per_file
delete_tensor_ind = range(new_last_tensor_ind + 1, last_tensor_ind + 1)
for tensor_ind in delete_tensor_ind:
for field_name in self.field_names:
file_ext = TENSOR_EXT
if compressed:
file_ext = COMPRESSED_TENSOR_EXT
filename = os.path.join(self.filename, 'tensors', ('%s_%0' + str(self._filename_numeric_label_place) + 'd%s') % (field_name, tensor_ind, file_ext))
filename = filename
os.remove(filename)
dataset_empty = False
target_tensor_size = new_num_datapoints % self._datapoints_per_file
if target_tensor_size == 0:
if new_num_datapoints > 0:
target_tensor_size = self._datapoints_per_file
else:
dataset_empty = True
for field_name in self.field_names:
if new_last_tensor_ind == self._tensor_cache_file_num[field_name]:
new_last_tensor = self._tensors[field_name]
filename = self.generate_tensor_filename(field_name, new_last_tensor_ind, compressed=True)
Tensor.load(filename, compressed=True, prealloc=self._tensors[field_name])
self._tensor_cache_file_num[field_name] = new_last_tensor_ind
new_last_tensor = self._tensors[field_name]
while new_last_tensor.size > target_tensor_size:
new_last_tensor.delete_last()
file_ext = TENSOR_EXT
if compressed:
file_ext = COMPRESSED_TENSOR_EXT
filename = os.path.join(self.filename, 'tensors', ('%s_%0' + str(self._filename_numeric_label_place) + 'd%s') % (field_name, new_last_tensor_ind, file_ext))
filename = filename
new_last_tensor.save(filename, compressed=True)
if not new_last_tensor.has_data:
os.remove(filename)
new_last_tensor.reset()
if self._num_datapoints - 1 - num_to_delete >= 0:
self._num_datapoints = new_num_datapoints
else:
self._num_datapoints = 0
self._num_tensors = new_last_tensor_ind + 1
if dataset_empty:
self._num_tensors = 0
|
autolab_core
|
positive
|
def _fit_implementation(self, x, y, verbose, callbacks, sample_weight, validation_data, validation_steps, steps_per_epoch, class_weight) -> tf.keras.callbacks.History:
"""Train the model.
This method performs operations that resembles as the Keras' fit function.
Details:
1. Load the training dataset in Yggdrasil (locally or remotely).
2. Load the validation dataset in Yggdrasil (locally or ignored).
3. Train the Yggdrasil model. Depending on the learner, this might also
evaluate the model (on the validation dataset or with out-of-bag).
4. Evaluate the model using the Keras metrics.
We recommend training / validating models with finite datasets. For
backward compatibility with the early version of TF-DF, we also support
infinite dataset with specified number of steps (`steps_per_epoch` and
`validation_steps` arguments). Using a number of steps currently raises
a warning, and will later raise an error (and the logic be removed).
"""
if not isinstance(callbacks, tf.keras.callbacks.CallbackList):
callbacks = tf.keras.callbacks.CallbackList(callbacks, model=self, add_history=False)
history = tf.keras.callbacks.History()
history.model = self
history.on_train_begin()
history.on_epoch_begin(0)
callbacks.on_train_begin()
callbacks.on_epoch_begin(0)
distribution_config = tf_core.get_distribution_configuration(self.distribute_strategy)
validation_data_handler = None
if validation_data:
(val_x, val_y, val_sample_weight) = tf.keras.utils.unpack_x_y_sample_weight(validation_data)
if distribution_config is None or not self.support_distributed_training():
validation_data_handler = get_data_handler(x=val_x, y=val_y, sample_weight=val_sample_weight, model=self, steps_per_epoch=validation_steps, class_weight=class_weight)
time_begin_train_dataset_reading = datetime.now()
if self._verbose >= 1:
tf_logging.info('Reading training dataset...')
coordinator = None
if distribution_config is None or not self.support_distributed_training():
data_handler = get_data_handler(x=x, y=y, sample_weight=sample_weight, model=self, class_weight=class_weight)
iterator = iter(data_handler._dataset)
if steps_per_epoch is None:
<DeepExtract>
num_examples = 0
for data in iterator:
num_examples += self.train_step(data)
self._num_training_examples = num_examples
</DeepExtract>
else:
tf_logging.warning('You are using non-distributed training with steps_per_epoch. This solution will lead to a sub-optimal model. Instead, use a finite training dataset (e.g. a dataset without repeat operation) and remove the `steps_per_epoch` argument. This warning will be turned into an error in the future.')
def consumes_one_training_batch(iterator):
data = next(iterator)
return self.train_step(data)
tf_consumes_one_training_batch = tf.function(consumes_one_training_batch, reduce_retracing=True)
num_examples = 0
try:
for _ in range(steps_per_epoch):
num_examples += tf_consumes_one_training_batch(iterator)
except tf.errors.OutOfRangeError:
pass
self._num_training_examples = num_examples
else:
if class_weight is not None:
raise ValueError('class_weight not support for distributed training. Feed the example weights through the dataset.')
if not self.capabilities().support_partial_cache_dataset_format:
raise ValueError(f'The model {type(self)} does not support training with a TF Distribution strategy (i.e. model.capabilities().support_partial_cache_dataset_format == False). If the dataset is small, simply remove the distribution strategy scope (i.e. `with strategy.scope():` around the model construction). If the dataset is large, use a distributed version of the model. For example, use DistributedGradientBoostedTreesModel instead of GradientBoostedTreesModel.')
coordinator = tf.distribute.experimental.coordinator.ClusterCoordinator(self.distribute_strategy)
with self.distribute_strategy.scope():
per_worker_dataset = coordinator.create_per_worker_dataset(x)
per_worker_iter = iter(per_worker_dataset)
if steps_per_epoch is not None:
tf_logging.warning('You are using distributed training with steps_per_epoch. This solution will lead to a sub-optimal model. Instead, use a finite training dataset (e.g. a dataset without repeat operation) and remove the `steps_per_epoch` argument.')
def remote_consumes_one_training_batch(iterator):
data = next(iterator)
<DeepExtract>
return self.collect_data_step(data, is_training_example=True)
</DeepExtract>
tf_remote_consumes_one_training_batch = tf.function(remote_consumes_one_training_batch, reduce_retracing=True)
tf_logging.info('Scheduling of %d steps', steps_per_epoch)
for _ in range(steps_per_epoch):
coordinator.schedule(tf_remote_consumes_one_training_batch, args=(per_worker_iter,))
tf_logging.info('Waiting for scheduled steps to complete')
coordinator.join()
self._num_training_examples = -1
else:
def remote_consumes_training_examples_until_eof(iterator):
return self._consumes_training_examples_until_eof(iterator)
tf_remote_consumes_training_examples_until_eof = tf.function(remote_consumes_training_examples_until_eof, reduce_retracing=True)
self._num_training_examples = tf_core.execute_function_on_each_worker(coordinator, tf_remote_consumes_training_examples_until_eof, (per_worker_iter,))
if self._verbose >= 1:
t = datetime.now() - time_begin_train_dataset_reading
tf_logging.info(f'Training dataset read in {t}. Found {self._num_training_examples} examples.')
if validation_data is not None:
if coordinator is not None and self.support_distributed_training():
assert validation_data_handler is None
tf_logging.warning('With distributed training, the validation dataset is not (yet) used for early stopping.')
self._num_validation_examples = None
else:
assert validation_data_handler is not None
time_begin_valid_dataset_reading = datetime.now()
if self._verbose >= 1:
tf_logging.info('Reading validation dataset...')
iterator = iter(validation_data_handler._dataset)
if validation_steps is None:
<DeepExtract>
num_examples = 0
for data in iterator:
num_examples += self.valid_step(data)
self._num_validation_examples = num_examples
</DeepExtract>
else:
tf_logging.warning('You are using non-distributed validation with steps_per_epoch. This solution will lead to a sub-optimal model. Instead, use a finite validation dataset (e.g. a dataset without repeat operation) and remove the `validation_steps` argument. This warning will be turned into an error in the future.')
def consumes_one_valid_batch(iterator):
data = next(iterator)
return self.valid_step(data)
tf_consumes_one_valid_batch = tf.function(consumes_one_valid_batch, reduce_retracing=True)
num_examples = 0
try:
for _ in range(validation_steps):
num_examples += tf_consumes_one_valid_batch(iterator)
except tf.errors.OutOfRangeError:
pass
self._num_validation_examples = num_examples
tf_logging.info('Num validation examples: %s', self.num_validation_examples)
if self._verbose >= 1:
t = datetime.now() - time_begin_valid_dataset_reading
tf_logging.info(f'Validation dataset read in {t}. Found {self._num_validation_examples} examples.')
time_begin_training_model = datetime.now()
if self._verbose >= 1:
tf_logging.info('Training model...')
<DeepExtract>
if self._normalized_input_feature_keys is None:
raise Exception('The training graph was not built.')
train_model_path = self._temp_directory
model_path = os.path.join(train_model_path, 'model')
guide = self._build_guide()
training_config = copy.deepcopy(self._advanced_arguments.yggdrasil_training_config)
if self._weighted_training:
training_config.weight_definition.attribute = WEIGHTS
training_config.weight_definition.numerical.SetInParent()
for feature_key in self._normalized_input_feature_keys:
feature_regex = tf_core.normalize_inputs_regexp(feature_key, False)
training_config.features.append(feature_regex)
deployment_config = copy.deepcopy(self._advanced_arguments.yggdrasil_deployment_config)
if not deployment_config.HasField('num_threads'):
deployment_config.num_threads = self._num_threads
distribution_config = tf_core.get_distribution_configuration(self.distribute_strategy)
(resource_ids, feature_names) = tf_core.column_keys_to_resource_ids(self._normalized_column_keys, self._training_model_id, True)
with cc_logging.capture_cpp_log_context(verbose=self._verbose >= 2):
if distribution_config is None or not self.support_distributed_training():
tf_core.train(resource_ids=resource_ids, model_id=self._training_model_id, model_dir=train_model_path, generic_hparms=tf_core.hparams_dict_to_generic_proto(self._learner_params), keep_model_in_resource=True, guide=guide, training_config=training_config, deployment_config=deployment_config, try_resume_training=self._try_resume_training, has_validation_dataset=self._has_validation_dataset, node_format=self._advanced_arguments.node_format)
else:
tf_core.finalize_distributed_dataset_collection(cluster_coordinator=coordinator, resource_ids=resource_ids, feature_names=feature_names, dataset_path=self._distributed_partial_dataset_cache_path())
tf_core.train_on_file_dataset(train_dataset_path='partial_dataset_cache:' + self._distributed_partial_dataset_cache_path(), valid_dataset_path=None, model_id=self._training_model_id, model_dir=train_model_path, generic_hparms=tf_core.hparams_dict_to_generic_proto(self._learner_params), keep_model_in_resource=True, guide=guide, training_config=training_config, deployment_config=deployment_config, working_cache_path=os.path.join(self._temp_directory, 'working_cache'), distribution_config=distribution_config, try_resume_training=self._try_resume_training, cluster_coordinator=coordinator, node_format=self._advanced_arguments.node_format, force_ydf_port=self._advanced_arguments.force_ydf_port)
self._description = training_op.SimpleMLShowModel(model_identifier=self._training_model_id).numpy().decode('utf-8')
training_op.SimpleMLUnloadModel(model_identifier=self._training_model_id)
self._is_trained.assign(True)
if self._is_multitask:
self._models = []
for task_idx in range(len(self._multitask)):
self._models.append(tf_op.ModelV2(model_path=model_path, verbose=False, file_prefix=f'{self._training_model_id}_{task_idx}', allow_slow_inference=self._advanced_arguments.allow_slow_inference))
else:
self._models = [tf_op.ModelV2(model_path=model_path, verbose=False, file_prefix=self._training_model_id, allow_slow_inference=self._advanced_arguments.allow_slow_inference)]
</DeepExtract>
if self._verbose >= 1:
tf_logging.info('Model trained in %s', datetime.now() - time_begin_training_model)
self._build(x)
if validation_data is not None and (not self._advanced_arguments.populate_history_with_yggdrasil_logs):
val_logs = self.evaluate(x=val_x, y=val_y, verbose='auto' if self._verbose > 0 else 0, sample_weight=val_sample_weight, return_dict=True, steps=validation_steps, callbacks=callbacks)
val_logs = {'val_' + name: val for (name, val) in val_logs.items()}
callbacks.on_epoch_end(0, val_logs)
history.on_epoch_end(0, val_logs)
else:
<DeepExtract>
if inspector is None:
inspector = self.make_inspector()
training_logs = inspector.training_logs()
last_logs = None
if training_logs is not None:
for src_logs in training_logs:
if src_logs.evaluation is not None:
last_logs = src_logs.evaluation.to_dict()
history.on_batch_end(src_logs.num_trees, last_logs)
last_logs = last_logs
</DeepExtract>
callbacks.on_epoch_end(0, last_logs)
history.on_epoch_end(0, last_logs)
callbacks.on_train_end()
self.history = history
return self.history
|
def _fit_implementation(self, x, y, verbose, callbacks, sample_weight, validation_data, validation_steps, steps_per_epoch, class_weight) -> tf.keras.callbacks.History:
"""Train the model.
This method performs operations that resembles as the Keras' fit function.
Details:
1. Load the training dataset in Yggdrasil (locally or remotely).
2. Load the validation dataset in Yggdrasil (locally or ignored).
3. Train the Yggdrasil model. Depending on the learner, this might also
evaluate the model (on the validation dataset or with out-of-bag).
4. Evaluate the model using the Keras metrics.
We recommend training / validating models with finite datasets. For
backward compatibility with the early version of TF-DF, we also support
infinite dataset with specified number of steps (`steps_per_epoch` and
`validation_steps` arguments). Using a number of steps currently raises
a warning, and will later raise an error (and the logic be removed).
"""
if not isinstance(callbacks, tf.keras.callbacks.CallbackList):
callbacks = tf.keras.callbacks.CallbackList(callbacks, model=self, add_history=False)
history = tf.keras.callbacks.History()
history.model = self
history.on_train_begin()
history.on_epoch_begin(0)
callbacks.on_train_begin()
callbacks.on_epoch_begin(0)
distribution_config = tf_core.get_distribution_configuration(self.distribute_strategy)
validation_data_handler = None
if validation_data:
(val_x, val_y, val_sample_weight) = tf.keras.utils.unpack_x_y_sample_weight(validation_data)
if distribution_config is None or not self.support_distributed_training():
validation_data_handler = get_data_handler(x=val_x, y=val_y, sample_weight=val_sample_weight, model=self, steps_per_epoch=validation_steps, class_weight=class_weight)
time_begin_train_dataset_reading = datetime.now()
if self._verbose >= 1:
tf_logging.info('Reading training dataset...')
coordinator = None
if distribution_config is None or not self.support_distributed_training():
data_handler = get_data_handler(x=x, y=y, sample_weight=sample_weight, model=self, class_weight=class_weight)
iterator = iter(data_handler._dataset)
if steps_per_epoch is None:
num_examples = 0
for data in iterator:
num_examples += self.train_step(data)
self._num_training_examples = num_examples
else:
tf_logging.warning('You are using non-distributed training with steps_per_epoch. This solution will lead to a sub-optimal model. Instead, use a finite training dataset (e.g. a dataset without repeat operation) and remove the `steps_per_epoch` argument. This warning will be turned into an error in the future.')
def consumes_one_training_batch(iterator):
data = next(iterator)
return self.train_step(data)
tf_consumes_one_training_batch = tf.function(consumes_one_training_batch, reduce_retracing=True)
num_examples = 0
try:
for _ in range(steps_per_epoch):
num_examples += tf_consumes_one_training_batch(iterator)
except tf.errors.OutOfRangeError:
pass
self._num_training_examples = num_examples
else:
if class_weight is not None:
raise ValueError('class_weight not support for distributed training. Feed the example weights through the dataset.')
if not self.capabilities().support_partial_cache_dataset_format:
raise ValueError(f'The model {type(self)} does not support training with a TF Distribution strategy (i.e. model.capabilities().support_partial_cache_dataset_format == False). If the dataset is small, simply remove the distribution strategy scope (i.e. `with strategy.scope():` around the model construction). If the dataset is large, use a distributed version of the model. For example, use DistributedGradientBoostedTreesModel instead of GradientBoostedTreesModel.')
coordinator = tf.distribute.experimental.coordinator.ClusterCoordinator(self.distribute_strategy)
with self.distribute_strategy.scope():
per_worker_dataset = coordinator.create_per_worker_dataset(x)
per_worker_iter = iter(per_worker_dataset)
if steps_per_epoch is not None:
tf_logging.warning('You are using distributed training with steps_per_epoch. This solution will lead to a sub-optimal model. Instead, use a finite training dataset (e.g. a dataset without repeat operation) and remove the `steps_per_epoch` argument.')
def remote_consumes_one_training_batch(iterator):
data = next(iterator)
return self.collect_data_step(data, is_training_example=True)
tf_remote_consumes_one_training_batch = tf.function(remote_consumes_one_training_batch, reduce_retracing=True)
tf_logging.info('Scheduling of %d steps', steps_per_epoch)
for _ in range(steps_per_epoch):
coordinator.schedule(tf_remote_consumes_one_training_batch, args=(per_worker_iter,))
tf_logging.info('Waiting for scheduled steps to complete')
coordinator.join()
self._num_training_examples = -1
else:
def remote_consumes_training_examples_until_eof(iterator):
return self._consumes_training_examples_until_eof(iterator)
tf_remote_consumes_training_examples_until_eof = tf.function(remote_consumes_training_examples_until_eof, reduce_retracing=True)
self._num_training_examples = tf_core.execute_function_on_each_worker(coordinator, tf_remote_consumes_training_examples_until_eof, (per_worker_iter,))
if self._verbose >= 1:
t = datetime.now() - time_begin_train_dataset_reading
tf_logging.info(f'Training dataset read in {t}. Found {self._num_training_examples} examples.')
if validation_data is not None:
if coordinator is not None and self.support_distributed_training():
assert validation_data_handler is None
tf_logging.warning('With distributed training, the validation dataset is not (yet) used for early stopping.')
self._num_validation_examples = None
else:
assert validation_data_handler is not None
time_begin_valid_dataset_reading = datetime.now()
if self._verbose >= 1:
tf_logging.info('Reading validation dataset...')
iterator = iter(validation_data_handler._dataset)
if validation_steps is None:
num_examples = 0
for data in iterator:
num_examples += self.valid_step(data)
self._num_validation_examples = num_examples
else:
tf_logging.warning('You are using non-distributed validation with steps_per_epoch. This solution will lead to a sub-optimal model. Instead, use a finite validation dataset (e.g. a dataset without repeat operation) and remove the `validation_steps` argument. This warning will be turned into an error in the future.')
def consumes_one_valid_batch(iterator):
data = next(iterator)
return self.valid_step(data)
tf_consumes_one_valid_batch = tf.function(consumes_one_valid_batch, reduce_retracing=True)
num_examples = 0
try:
for _ in range(validation_steps):
num_examples += tf_consumes_one_valid_batch(iterator)
except tf.errors.OutOfRangeError:
pass
self._num_validation_examples = num_examples
tf_logging.info('Num validation examples: %s', self.num_validation_examples)
if self._verbose >= 1:
t = datetime.now() - time_begin_valid_dataset_reading
tf_logging.info(f'Validation dataset read in {t}. Found {self._num_validation_examples} examples.')
time_begin_training_model = datetime.now()
if self._verbose >= 1:
tf_logging.info('Training model...')
if self._normalized_input_feature_keys is None:
raise Exception('The training graph was not built.')
train_model_path = self._temp_directory
model_path = os.path.join(train_model_path, 'model')
guide = self._build_guide()
training_config = copy.deepcopy(self._advanced_arguments.yggdrasil_training_config)
if self._weighted_training:
training_config.weight_definition.attribute = WEIGHTS
training_config.weight_definition.numerical.SetInParent()
for feature_key in self._normalized_input_feature_keys:
feature_regex = tf_core.normalize_inputs_regexp(feature_key, False)
training_config.features.append(feature_regex)
deployment_config = copy.deepcopy(self._advanced_arguments.yggdrasil_deployment_config)
if not deployment_config.HasField('num_threads'):
deployment_config.num_threads = self._num_threads
distribution_config = tf_core.get_distribution_configuration(self.distribute_strategy)
(resource_ids, feature_names) = tf_core.column_keys_to_resource_ids(self._normalized_column_keys, self._training_model_id, True)
with cc_logging.capture_cpp_log_context(verbose=self._verbose >= 2):
if distribution_config is None or not self.support_distributed_training():
tf_core.train(resource_ids=resource_ids, model_id=self._training_model_id, model_dir=train_model_path, generic_hparms=tf_core.hparams_dict_to_generic_proto(self._learner_params), keep_model_in_resource=True, guide=guide, training_config=training_config, deployment_config=deployment_config, try_resume_training=self._try_resume_training, has_validation_dataset=self._has_validation_dataset, node_format=self._advanced_arguments.node_format)
else:
tf_core.finalize_distributed_dataset_collection(cluster_coordinator=coordinator, resource_ids=resource_ids, feature_names=feature_names, dataset_path=self._distributed_partial_dataset_cache_path())
tf_core.train_on_file_dataset(train_dataset_path='partial_dataset_cache:' + self._distributed_partial_dataset_cache_path(), valid_dataset_path=None, model_id=self._training_model_id, model_dir=train_model_path, generic_hparms=tf_core.hparams_dict_to_generic_proto(self._learner_params), keep_model_in_resource=True, guide=guide, training_config=training_config, deployment_config=deployment_config, working_cache_path=os.path.join(self._temp_directory, 'working_cache'), distribution_config=distribution_config, try_resume_training=self._try_resume_training, cluster_coordinator=coordinator, node_format=self._advanced_arguments.node_format, force_ydf_port=self._advanced_arguments.force_ydf_port)
self._description = training_op.SimpleMLShowModel(model_identifier=self._training_model_id).numpy().decode('utf-8')
training_op.SimpleMLUnloadModel(model_identifier=self._training_model_id)
self._is_trained.assign(True)
if self._is_multitask:
self._models = []
for task_idx in range(len(self._multitask)):
self._models.append(tf_op.ModelV2(model_path=model_path, verbose=False, file_prefix=f'{self._training_model_id}_{task_idx}', allow_slow_inference=self._advanced_arguments.allow_slow_inference))
else:
self._models = [tf_op.ModelV2(model_path=model_path, verbose=False, file_prefix=self._training_model_id, allow_slow_inference=self._advanced_arguments.allow_slow_inference)]
if self._verbose >= 1:
tf_logging.info('Model trained in %s', datetime.now() - time_begin_training_model)
self._build(x)
if validation_data is not None and (not self._advanced_arguments.populate_history_with_yggdrasil_logs):
val_logs = self.evaluate(x=val_x, y=val_y, verbose='auto' if self._verbose > 0 else 0, sample_weight=val_sample_weight, return_dict=True, steps=validation_steps, callbacks=callbacks)
val_logs = {'val_' + name: val for (name, val) in val_logs.items()}
callbacks.on_epoch_end(0, val_logs)
history.on_epoch_end(0, val_logs)
else:
if inspector is None:
inspector = self.make_inspector()
training_logs = inspector.training_logs()
last_logs = None
if training_logs is not None:
for src_logs in training_logs:
if src_logs.evaluation is not None:
last_logs = src_logs.evaluation.to_dict()
history.on_batch_end(src_logs.num_trees, last_logs)
last_logs = last_logs
callbacks.on_epoch_end(0, last_logs)
history.on_epoch_end(0, last_logs)
callbacks.on_train_end()
self.history = history
return self.history
|
decision-forests
|
positive
|
def runOrPrompt() -> None:
if self.cmdpending:
<DeepExtract>
pp = None
def runOrPrompt() -> None:
if self.cmdpending:
self.runCommand()
else:
self.showPrompt()
def parse_arguments(arguments: list[str]) -> list[str]:
parsed_arguments = []
for arg in arguments:
parsed_arguments.append(arg)
return parsed_arguments
def parse_file_arguments(arguments: str) -> list[str]:
"""
Look up arguments in the file system
"""
parsed_arguments = []
for arg in arguments:
matches = self.protocol.fs.resolve_path_wc(arg, self.protocol.cwd)
if matches:
parsed_arguments.extend(matches)
else:
parsed_arguments.append(arg)
return parsed_arguments
if not self.cmdpending:
if self.protocol.pp.next_command is None:
if self.interactive:
self.showPrompt()
elif len(self.protocol.cmdstack) == 1:
ret = failure.Failure(error.ProcessDone(status=''))
self.protocol.terminal.transport.processEnded(ret)
else:
return
else:
pass
return
cmdAndArgs = self.cmdpending.pop(0)
cmd2 = copy.copy(cmdAndArgs)
environ = copy.copy(self.environ)
cmd_array = []
cmd: dict[str, Any] = {}
while cmdAndArgs:
piece = cmdAndArgs.pop(0)
if piece.count('='):
(key, val) = piece.split('=', 1)
environ[key] = val
continue
cmd['command'] = piece
cmd['rargs'] = []
break
if 'command' not in cmd or not cmd['command']:
runOrPrompt()
return
pipe_indices = [i for (i, x) in enumerate(cmdAndArgs) if x == '|']
multipleCmdArgs: list[list[str]] = []
pipe_indices.append(len(cmdAndArgs))
start = 0
for (_index, pipe_indice) in enumerate(pipe_indices):
multipleCmdArgs.append(cmdAndArgs[start:pipe_indice])
start = pipe_indice + 1
cmd['rargs'] = parse_arguments(multipleCmdArgs.pop(0))
cmd_array.append(cmd)
cmd = {}
for value in multipleCmdArgs:
cmd['command'] = value.pop(0)
cmd['rargs'] = parse_arguments(value)
cmd_array.append(cmd)
cmd = {}
lastpp = None
for (index, cmd) in reversed(list(enumerate(cmd_array))):
cmdclass = self.protocol.getCommand(cmd['command'], environ['PATH'].split(':'))
if cmdclass:
log.msg(input=cmd['command'] + ' ' + ' '.join(cmd['rargs']), format='Command found: %(input)s')
if index == len(cmd_array) - 1:
lastpp = StdOutStdErrEmulationProtocol(self.protocol, cmdclass, cmd['rargs'], None, None, self.redirect)
pp = lastpp
else:
pp = StdOutStdErrEmulationProtocol(self.protocol, cmdclass, cmd['rargs'], None, lastpp, self.redirect)
lastpp = pp
else:
log.msg(eventid='cowrie.command.failed', input=' '.join(cmd2), format='Command not found: %(input)s')
self.protocol.terminal.write('-bash: {}: command not found\n'.format(cmd['command']).encode('utf8'))
if not self.interactive:
stat = failure.Failure(error.ProcessDone(status=''))
self.protocol.terminal.transport.processEnded(stat)
runOrPrompt()
pp = None
break
if pp:
self.protocol.call_command(pp, cmdclass, *cmd_array[0]['rargs'])
</DeepExtract>
else:
<DeepExtract>
if not self.interactive:
return
prompt = ''
if CowrieConfig.has_option('honeypot', 'prompt'):
prompt = CowrieConfig.get('honeypot', 'prompt')
prompt += ' '
else:
cwd = self.protocol.cwd
homelen = len(self.protocol.user.avatar.home)
if cwd == self.protocol.user.avatar.home:
cwd = '~'
elif len(cwd) > homelen + 1 and cwd[:homelen + 1] == self.protocol.user.avatar.home + '/':
cwd = '~' + cwd[homelen:]
prompt = f'{self.protocol.user.username}@{self.protocol.hostname}:{cwd}'
if not self.protocol.user.uid:
prompt += '# '
else:
prompt += '$ '
self.protocol.terminal.write(prompt.encode('ascii'))
self.protocol.ps = (prompt.encode('ascii'), b'> ')
</DeepExtract>
|
def runOrPrompt() -> None:
if self.cmdpending:
pp = None
def runOrPrompt() -> None:
if self.cmdpending:
self.runCommand()
else:
self.showPrompt()
def parse_arguments(arguments: list[str]) -> list[str]:
parsed_arguments = []
for arg in arguments:
parsed_arguments.append(arg)
return parsed_arguments
def parse_file_arguments(arguments: str) -> list[str]:
"""
Look up arguments in the file system
"""
parsed_arguments = []
for arg in arguments:
matches = self.protocol.fs.resolve_path_wc(arg, self.protocol.cwd)
if matches:
parsed_arguments.extend(matches)
else:
parsed_arguments.append(arg)
return parsed_arguments
if not self.cmdpending:
if self.protocol.pp.next_command is None:
if self.interactive:
self.showPrompt()
elif len(self.protocol.cmdstack) == 1:
ret = failure.Failure(error.ProcessDone(status=''))
self.protocol.terminal.transport.processEnded(ret)
else:
return
else:
pass
return
cmdAndArgs = self.cmdpending.pop(0)
cmd2 = copy.copy(cmdAndArgs)
environ = copy.copy(self.environ)
cmd_array = []
cmd: dict[str, Any] = {}
while cmdAndArgs:
piece = cmdAndArgs.pop(0)
if piece.count('='):
(key, val) = piece.split('=', 1)
environ[key] = val
continue
cmd['command'] = piece
cmd['rargs'] = []
break
if 'command' not in cmd or not cmd['command']:
runOrPrompt()
return
pipe_indices = [i for (i, x) in enumerate(cmdAndArgs) if x == '|']
multipleCmdArgs: list[list[str]] = []
pipe_indices.append(len(cmdAndArgs))
start = 0
for (_index, pipe_indice) in enumerate(pipe_indices):
multipleCmdArgs.append(cmdAndArgs[start:pipe_indice])
start = pipe_indice + 1
cmd['rargs'] = parse_arguments(multipleCmdArgs.pop(0))
cmd_array.append(cmd)
cmd = {}
for value in multipleCmdArgs:
cmd['command'] = value.pop(0)
cmd['rargs'] = parse_arguments(value)
cmd_array.append(cmd)
cmd = {}
lastpp = None
for (index, cmd) in reversed(list(enumerate(cmd_array))):
cmdclass = self.protocol.getCommand(cmd['command'], environ['PATH'].split(':'))
if cmdclass:
log.msg(input=cmd['command'] + ' ' + ' '.join(cmd['rargs']), format='Command found: %(input)s')
if index == len(cmd_array) - 1:
lastpp = StdOutStdErrEmulationProtocol(self.protocol, cmdclass, cmd['rargs'], None, None, self.redirect)
pp = lastpp
else:
pp = StdOutStdErrEmulationProtocol(self.protocol, cmdclass, cmd['rargs'], None, lastpp, self.redirect)
lastpp = pp
else:
log.msg(eventid='cowrie.command.failed', input=' '.join(cmd2), format='Command not found: %(input)s')
self.protocol.terminal.write('-bash: {}: command not found\n'.format(cmd['command']).encode('utf8'))
if not self.interactive:
stat = failure.Failure(error.ProcessDone(status=''))
self.protocol.terminal.transport.processEnded(stat)
runOrPrompt()
pp = None
break
if pp:
self.protocol.call_command(pp, cmdclass, *cmd_array[0]['rargs'])
else:
if not self.interactive:
return
prompt = ''
if CowrieConfig.has_option('honeypot', 'prompt'):
prompt = CowrieConfig.get('honeypot', 'prompt')
prompt += ' '
else:
cwd = self.protocol.cwd
homelen = len(self.protocol.user.avatar.home)
if cwd == self.protocol.user.avatar.home:
cwd = '~'
elif len(cwd) > homelen + 1 and cwd[:homelen + 1] == self.protocol.user.avatar.home + '/':
cwd = '~' + cwd[homelen:]
prompt = f'{self.protocol.user.username}@{self.protocol.hostname}:{cwd}'
if not self.protocol.user.uid:
prompt += '# '
else:
prompt += '$ '
self.protocol.terminal.write(prompt.encode('ascii'))
self.protocol.ps = (prompt.encode('ascii'), b'> ')
</DeepExtract>
|
cowrie
|
positive
|
def plot_tracking(image, tlwhs, obj_ids, scores=None, frame_id=0, fps=0.0, ids2=None):
im = np.ascontiguousarray(np.copy(image))
(im_h, im_w) = im.shape[:2]
top_view = np.zeros([im_w, im_w, 3], dtype=np.uint8) + 255
text_scale = max(1, image.shape[1] / 1600.0)
text_thickness = 1 if text_scale > 1.1 else 1
line_thickness = max(1, int(image.shape[1] / 500.0))
radius = max(5, int(im_w / 140.0))
cv2.putText(im, 'frame: %d fps: %.2f num: %d' % (frame_id, fps, len(tlwhs)), (0, int(15 * text_scale)), cv2.FONT_HERSHEY_PLAIN, text_scale, (0, 0, 255), thickness=2)
for (i, tlwh) in enumerate(tlwhs):
(x1, y1, w, h) = tlwh
intbox = tuple(map(int, (x1, y1, x1 + w, y1 + h)))
obj_id = int(obj_ids[i])
id_text = '{}'.format(int(obj_id))
if ids2 is not None:
id_text = id_text + ', {}'.format(int(ids2[i]))
_line_thickness = 1 if obj_id <= 0 else line_thickness
<DeepExtract>
abs(obj_id) = abs(obj_id) * 3
color = (37 * abs(obj_id) % 255, 17 * abs(obj_id) % 255, 29 * abs(obj_id) % 255)
color = color
</DeepExtract>
cv2.rectangle(im, intbox[0:2], intbox[2:4], color=color, thickness=line_thickness)
cv2.putText(im, id_text, (intbox[0], intbox[1] + 30), cv2.FONT_HERSHEY_PLAIN, text_scale, (0, 0, 255), thickness=text_thickness)
return im
|
def plot_tracking(image, tlwhs, obj_ids, scores=None, frame_id=0, fps=0.0, ids2=None):
im = np.ascontiguousarray(np.copy(image))
(im_h, im_w) = im.shape[:2]
top_view = np.zeros([im_w, im_w, 3], dtype=np.uint8) + 255
text_scale = max(1, image.shape[1] / 1600.0)
text_thickness = 1 if text_scale > 1.1 else 1
line_thickness = max(1, int(image.shape[1] / 500.0))
radius = max(5, int(im_w / 140.0))
cv2.putText(im, 'frame: %d fps: %.2f num: %d' % (frame_id, fps, len(tlwhs)), (0, int(15 * text_scale)), cv2.FONT_HERSHEY_PLAIN, text_scale, (0, 0, 255), thickness=2)
for (i, tlwh) in enumerate(tlwhs):
(x1, y1, w, h) = tlwh
intbox = tuple(map(int, (x1, y1, x1 + w, y1 + h)))
obj_id = int(obj_ids[i])
id_text = '{}'.format(int(obj_id))
if ids2 is not None:
id_text = id_text + ', {}'.format(int(ids2[i]))
_line_thickness = 1 if obj_id <= 0 else line_thickness
abs(obj_id) = abs(obj_id) * 3
color = (37 * abs(obj_id) % 255, 17 * abs(obj_id) % 255, 29 * abs(obj_id) % 255)
color = color
cv2.rectangle(im, intbox[0:2], intbox[2:4], color=color, thickness=line_thickness)
cv2.putText(im, id_text, (intbox[0], intbox[1] + 30), cv2.FONT_HERSHEY_PLAIN, text_scale, (0, 0, 255), thickness=text_thickness)
return im
|
AlphAction
|
positive
|
def ancestor(self, nid, level=None):
"""
For a given id, get ancestor node object at a given level.
If no level is provided, the parent node is returned.
"""
if not self.contains(nid):
raise NodeIDAbsentError("Node '%s' is not in the tree" % nid)
descendant = self[nid]
ascendant = self[nid].bpointer
<DeepExtract>
ascendant_level = len([n for n in self.rsearch(ascendant, filter_fn)]) - 1
</DeepExtract>
if level is None:
return ascendant
elif nid == self.root:
return self[nid]
elif level >= self.level(descendant.identifier):
raise AttributeError("Descendant level (level %s) must be greater than its ancestor's level (level %s)" % (str(self.level(descendant.identifier)), level))
while ascendant is not None:
if ascendant_level == level:
return self[ascendant]
else:
descendant = ascendant
ascendant = self[descendant].bpointer
<DeepExtract>
ascendant_level = len([n for n in self.rsearch(ascendant, filter_fn)]) - 1
</DeepExtract>
return None
|
def ancestor(self, nid, level=None):
"""
For a given id, get ancestor node object at a given level.
If no level is provided, the parent node is returned.
"""
if not self.contains(nid):
raise NodeIDAbsentError("Node '%s' is not in the tree" % nid)
descendant = self[nid]
ascendant = self[nid].bpointer
ascendant_level = len([n for n in self.rsearch(ascendant, filter_fn)]) - 1
if level is None:
return ascendant
elif nid == self.root:
return self[nid]
elif level >= self.level(descendant.identifier):
raise AttributeError("Descendant level (level %s) must be greater than its ancestor's level (level %s)" % (str(self.level(descendant.identifier)), level))
while ascendant is not None:
if ascendant_level == level:
return self[ascendant]
else:
descendant = ascendant
ascendant = self[descendant].bpointer
ascendant_level = len([n for n in self.rsearch(ascendant, filter_fn)]) - 1
return None
|
cloudkeeper
|
positive
|
def test_crop(self):
box = [400, 250, 500, 300]
<DeepExtract>
diff = self.M.crop(box).get_mask_tensor() - self.P.crop(box).get_mask_tensor()
diff = torch.sum(torch.abs(diff.float())).item()
diff = diff
</DeepExtract>
self.assertTrue(diff <= 1.0)
|
def test_crop(self):
box = [400, 250, 500, 300]
diff = self.M.crop(box).get_mask_tensor() - self.P.crop(box).get_mask_tensor()
diff = torch.sum(torch.abs(diff.float())).item()
diff = diff
self.assertTrue(diff <= 1.0)
|
EmbedMask
|
positive
|
def test_jinja_variable_def(self):
expected_message = 'Jinja2 variable definitions are suggested to take a ``{{%<one space>set<one space><variable name><one space>=<one space><expression><one space>%}}`` form. See lines {}'.format([2])
with tmp_directory() as recipe_dir:
def assert_jinja(jinja_var, is_good=True):
with io.open(os.path.join(recipe_dir, 'meta.yaml'), 'w') as fh:
fh.write('\n {{% set name = "conda-smithy" %}}\n {}\n '.format(jinja_var))
(lints, hints) = linter.lintify({}, recipe_dir)
if is_good:
message = "Found lints when there shouldn't have been a lint for '{}'.".format(jinja_var)
else:
message = "Expecting lints for '{}', but didn't get any.".format(jinja_var)
self.assertEqual(not is_good, any((lint.startswith(expected_message) for lint in lints)), message)
<DeepExtract>
with io.open(os.path.join(recipe_dir, 'meta.yaml'), 'w') as fh:
fh.write('\n {{% set name = "conda-smithy" %}}\n {}\n '.format('{% set version = "0.27.3" %}'))
(lints, hints) = linter.lintify({}, recipe_dir)
if is_good:
message = "Found lints when there shouldn't have been a lint for '{}'.".format('{% set version = "0.27.3" %}')
else:
message = "Expecting lints for '{}', but didn't get any.".format('{% set version = "0.27.3" %}')
self.assertEqual(not is_good, any((lint.startswith(expected_message) for lint in lints)), message)
</DeepExtract>
<DeepExtract>
with io.open(os.path.join(recipe_dir, 'meta.yaml'), 'w') as fh:
fh.write('\n {{% set name = "conda-smithy" %}}\n {}\n '.format('{% set version="0.27.3" %}'))
(lints, hints) = linter.lintify({}, recipe_dir)
if False:
message = "Found lints when there shouldn't have been a lint for '{}'.".format('{% set version="0.27.3" %}')
else:
message = "Expecting lints for '{}', but didn't get any.".format('{% set version="0.27.3" %}')
self.assertEqual(not False, any((lint.startswith(expected_message) for lint in lints)), message)
</DeepExtract>
<DeepExtract>
with io.open(os.path.join(recipe_dir, 'meta.yaml'), 'w') as fh:
fh.write('\n {{% set name = "conda-smithy" %}}\n {}\n '.format('{%set version = "0.27.3" %}'))
(lints, hints) = linter.lintify({}, recipe_dir)
if False:
message = "Found lints when there shouldn't have been a lint for '{}'.".format('{%set version = "0.27.3" %}')
else:
message = "Expecting lints for '{}', but didn't get any.".format('{%set version = "0.27.3" %}')
self.assertEqual(not False, any((lint.startswith(expected_message) for lint in lints)), message)
</DeepExtract>
<DeepExtract>
with io.open(os.path.join(recipe_dir, 'meta.yaml'), 'w') as fh:
fh.write('\n {{% set name = "conda-smithy" %}}\n {}\n '.format('{% set version = "0.27.3"%}'))
(lints, hints) = linter.lintify({}, recipe_dir)
if False:
message = "Found lints when there shouldn't have been a lint for '{}'.".format('{% set version = "0.27.3"%}')
else:
message = "Expecting lints for '{}', but didn't get any.".format('{% set version = "0.27.3"%}')
self.assertEqual(not False, any((lint.startswith(expected_message) for lint in lints)), message)
</DeepExtract>
<DeepExtract>
with io.open(os.path.join(recipe_dir, 'meta.yaml'), 'w') as fh:
fh.write('\n {{% set name = "conda-smithy" %}}\n {}\n '.format('{% set version= "0.27.3"%}'))
(lints, hints) = linter.lintify({}, recipe_dir)
if False:
message = "Found lints when there shouldn't have been a lint for '{}'.".format('{% set version= "0.27.3"%}')
else:
message = "Expecting lints for '{}', but didn't get any.".format('{% set version= "0.27.3"%}')
self.assertEqual(not False, any((lint.startswith(expected_message) for lint in lints)), message)
</DeepExtract>
|
def test_jinja_variable_def(self):
expected_message = 'Jinja2 variable definitions are suggested to take a ``{{%<one space>set<one space><variable name><one space>=<one space><expression><one space>%}}`` form. See lines {}'.format([2])
with tmp_directory() as recipe_dir:
def assert_jinja(jinja_var, is_good=True):
with io.open(os.path.join(recipe_dir, 'meta.yaml'), 'w') as fh:
fh.write('\n {{% set name = "conda-smithy" %}}\n {}\n '.format(jinja_var))
(lints, hints) = linter.lintify({}, recipe_dir)
if is_good:
message = "Found lints when there shouldn't have been a lint for '{}'.".format(jinja_var)
else:
message = "Expecting lints for '{}', but didn't get any.".format(jinja_var)
self.assertEqual(not is_good, any((lint.startswith(expected_message) for lint in lints)), message)
with io.open(os.path.join(recipe_dir, 'meta.yaml'), 'w') as fh:
fh.write('\n {{% set name = "conda-smithy" %}}\n {}\n '.format('{% set version = "0.27.3" %}'))
(lints, hints) = linter.lintify({}, recipe_dir)
if is_good:
message = "Found lints when there shouldn't have been a lint for '{}'.".format('{% set version = "0.27.3" %}')
else:
message = "Expecting lints for '{}', but didn't get any.".format('{% set version = "0.27.3" %}')
self.assertEqual(not is_good, any((lint.startswith(expected_message) for lint in lints)), message)
with io.open(os.path.join(recipe_dir, 'meta.yaml'), 'w') as fh:
fh.write('\n {{% set name = "conda-smithy" %}}\n {}\n '.format('{% set version="0.27.3" %}'))
(lints, hints) = linter.lintify({}, recipe_dir)
if False:
message = "Found lints when there shouldn't have been a lint for '{}'.".format('{% set version="0.27.3" %}')
else:
message = "Expecting lints for '{}', but didn't get any.".format('{% set version="0.27.3" %}')
self.assertEqual(not False, any((lint.startswith(expected_message) for lint in lints)), message)
with io.open(os.path.join(recipe_dir, 'meta.yaml'), 'w') as fh:
fh.write('\n {{% set name = "conda-smithy" %}}\n {}\n '.format('{%set version = "0.27.3" %}'))
(lints, hints) = linter.lintify({}, recipe_dir)
if False:
message = "Found lints when there shouldn't have been a lint for '{}'.".format('{%set version = "0.27.3" %}')
else:
message = "Expecting lints for '{}', but didn't get any.".format('{%set version = "0.27.3" %}')
self.assertEqual(not False, any((lint.startswith(expected_message) for lint in lints)), message)
with io.open(os.path.join(recipe_dir, 'meta.yaml'), 'w') as fh:
fh.write('\n {{% set name = "conda-smithy" %}}\n {}\n '.format('{% set version = "0.27.3"%}'))
(lints, hints) = linter.lintify({}, recipe_dir)
if False:
message = "Found lints when there shouldn't have been a lint for '{}'.".format('{% set version = "0.27.3"%}')
else:
message = "Expecting lints for '{}', but didn't get any.".format('{% set version = "0.27.3"%}')
self.assertEqual(not False, any((lint.startswith(expected_message) for lint in lints)), message)
with io.open(os.path.join(recipe_dir, 'meta.yaml'), 'w') as fh:
fh.write('\n {{% set name = "conda-smithy" %}}\n {}\n '.format('{% set version= "0.27.3"%}'))
(lints, hints) = linter.lintify({}, recipe_dir)
if False:
message = "Found lints when there shouldn't have been a lint for '{}'.".format('{% set version= "0.27.3"%}')
else:
message = "Expecting lints for '{}', but didn't get any.".format('{% set version= "0.27.3"%}')
self.assertEqual(not False, any((lint.startswith(expected_message) for lint in lints)), message)
</DeepExtract>
|
conda-smithy
|
positive
|
def __check(options, default):
if not isinstance(default, dict):
return options
for key in default.keys():
if not key in options:
options[key] = default[key]
if not key == 'transform':
<DeepExtract>
if not isinstance(default[key], dict):
options[key][key] = options[key]
for key in default[key].keys():
if not key in options[key]:
options[key][key] = default[key][key]
if not key == 'transform':
options[key][key] = __check(options[key][key], default[key][key])
options[key][key] = options[key]
</DeepExtract>
return options
|
def __check(options, default):
if not isinstance(default, dict):
return options
for key in default.keys():
if not key in options:
options[key] = default[key]
if not key == 'transform':
if not isinstance(default[key], dict):
options[key][key] = options[key]
for key in default[key].keys():
if not key in options[key]:
options[key][key] = default[key][key]
if not key == 'transform':
options[key][key] = __check(options[key][key], default[key][key])
options[key][key] = options[key]
return options
|
aerial_wildlife_detection
|
positive
|
def main():
argument_spec = dict(stack_name=dict(), all_facts=dict(required=False, default=False, type='bool'), stack_policy=dict(required=False, default=False, type='bool'), stack_events=dict(required=False, default=False, type='bool'), stack_resources=dict(required=False, default=False, type='bool'), stack_template=dict(required=False, default=False, type='bool'), stack_change_sets=dict(required=False, default=False, type='bool'))
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
service_mgr = CloudFormationServiceManager(module)
result = {'cloudformation': {}}
for stack_description in service_mgr.describe_stacks(module.params.get('stack_name')):
facts = {'stack_description': stack_description}
stack_name = stack_description.get('StackName')
if facts['stack_description']:
<DeepExtract>
if facts['stack_description'].get('Outputs'):
facts['stack_outputs'] = dict(zip([i.get('OutputKey') for i in facts['stack_description'].get('Outputs')], [i.get('OutputValue') for i in facts['stack_description'].get('Outputs')]))
else:
facts['stack_outputs'] = dict()
</DeepExtract>
<DeepExtract>
if facts['stack_description'].get('Parameters'):
facts['stack_parameters'] = dict(zip([i.get('ParameterKey') for i in facts['stack_description'].get('Parameters')], [i.get('ParameterValue') for i in facts['stack_description'].get('Parameters')]))
else:
facts['stack_parameters'] = dict()
</DeepExtract>
facts['stack_tags'] = boto3_tag_list_to_ansible_dict(facts['stack_description'].get('Tags'))
all_facts = module.params.get('all_facts')
if all_facts or module.params.get('stack_resources'):
facts['stack_resource_list'] = service_mgr.list_stack_resources(stack_name)
<DeepExtract>
if facts.get('stack_resource_list'):
facts['stack_resources'] = dict(zip([i.get('LogicalResourceId') for i in facts.get('stack_resource_list')], [i.get('PhysicalResourceId') for i in facts.get('stack_resource_list')]))
else:
facts['stack_resources'] = dict()
</DeepExtract>
if all_facts or module.params.get('stack_template'):
facts['stack_template'] = service_mgr.get_template(stack_name)
if all_facts or module.params.get('stack_policy'):
facts['stack_policy'] = service_mgr.get_stack_policy(stack_name)
if all_facts or module.params.get('stack_events'):
facts['stack_events'] = service_mgr.describe_stack_events(stack_name)
if all_facts or module.params.get('stack_change_sets'):
facts['stack_change_sets'] = service_mgr.describe_stack_change_sets(stack_name)
result['cloudformation'][stack_name] = camel_dict_to_snake_dict(facts, ignore_list=('stack_outputs', 'stack_parameters', 'stack_policy', 'stack_resources', 'stack_tags', 'stack_template'))
module.exit_json(changed=False, **result)
|
def main():
argument_spec = dict(stack_name=dict(), all_facts=dict(required=False, default=False, type='bool'), stack_policy=dict(required=False, default=False, type='bool'), stack_events=dict(required=False, default=False, type='bool'), stack_resources=dict(required=False, default=False, type='bool'), stack_template=dict(required=False, default=False, type='bool'), stack_change_sets=dict(required=False, default=False, type='bool'))
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
service_mgr = CloudFormationServiceManager(module)
result = {'cloudformation': {}}
for stack_description in service_mgr.describe_stacks(module.params.get('stack_name')):
facts = {'stack_description': stack_description}
stack_name = stack_description.get('StackName')
if facts['stack_description']:
if facts['stack_description'].get('Outputs'):
facts['stack_outputs'] = dict(zip([i.get('OutputKey') for i in facts['stack_description'].get('Outputs')], [i.get('OutputValue') for i in facts['stack_description'].get('Outputs')]))
else:
facts['stack_outputs'] = dict()
if facts['stack_description'].get('Parameters'):
facts['stack_parameters'] = dict(zip([i.get('ParameterKey') for i in facts['stack_description'].get('Parameters')], [i.get('ParameterValue') for i in facts['stack_description'].get('Parameters')]))
else:
facts['stack_parameters'] = dict()
facts['stack_tags'] = boto3_tag_list_to_ansible_dict(facts['stack_description'].get('Tags'))
all_facts = module.params.get('all_facts')
if all_facts or module.params.get('stack_resources'):
facts['stack_resource_list'] = service_mgr.list_stack_resources(stack_name)
if facts.get('stack_resource_list'):
facts['stack_resources'] = dict(zip([i.get('LogicalResourceId') for i in facts.get('stack_resource_list')], [i.get('PhysicalResourceId') for i in facts.get('stack_resource_list')]))
else:
facts['stack_resources'] = dict()
if all_facts or module.params.get('stack_template'):
facts['stack_template'] = service_mgr.get_template(stack_name)
if all_facts or module.params.get('stack_policy'):
facts['stack_policy'] = service_mgr.get_stack_policy(stack_name)
if all_facts or module.params.get('stack_events'):
facts['stack_events'] = service_mgr.describe_stack_events(stack_name)
if all_facts or module.params.get('stack_change_sets'):
facts['stack_change_sets'] = service_mgr.describe_stack_change_sets(stack_name)
result['cloudformation'][stack_name] = camel_dict_to_snake_dict(facts, ignore_list=('stack_outputs', 'stack_parameters', 'stack_policy', 'stack_resources', 'stack_tags', 'stack_template'))
module.exit_json(changed=False, **result)
|
amazon.aws
|
positive
|
def write_build_prepend(self):
"""Write out any custom supplied commands at the start of the %build section and every build type."""
if self.config.build_prepend:
<DeepExtract>
self.specfile.write_strip('## build_prepend content')
</DeepExtract>
for line in self.config.build_prepend:
<DeepExtract>
self.specfile.write_strip('{}\n'.format(line))
</DeepExtract>
<DeepExtract>
self.specfile.write_strip('## build_prepend end')
</DeepExtract>
|
def write_build_prepend(self):
"""Write out any custom supplied commands at the start of the %build section and every build type."""
if self.config.build_prepend:
self.specfile.write_strip('## build_prepend content')
for line in self.config.build_prepend:
self.specfile.write_strip('{}\n'.format(line))
self.specfile.write_strip('## build_prepend end')
</DeepExtract>
|
autospec
|
positive
|
def files_destination_settings(fdict):
"""Get global resources files destination
:param dict fdict: configuration block
:rtype: DestinationSettings
:return: destination settings
"""
conf = fdict['destination']
try:
shared = conf['shared_data_volume']
except KeyError:
shared = None
try:
storage = conf['storage_account_settings']
except KeyError:
storage = None
try:
rdp = conf['relative_destination_path']
if rdp is not None:
rdp = rdp.lstrip('/').rstrip('/')
if len(rdp) == 0:
rdp = None
except KeyError:
rdp = None
try:
method = conf['data_transfer']['method'].lower()
except KeyError:
if storage is None:
raise RuntimeError('no transfer method specified for data transfer of source: {} to {} rdp={}'.format(files_source_settings(fdict).path, shared, rdp))
else:
method = None
try:
ssh_eo = conf['data_transfer']['scp_ssh_extra_options']
if ssh_eo is None:
raise KeyError()
except KeyError:
ssh_eo = ''
try:
rsync_eo = conf['data_transfer']['rsync_extra_options']
if rsync_eo is None:
raise KeyError()
except KeyError:
rsync_eo = ''
try:
mpt = conf['data_transfer']['max_parallel_transfers_per_node']
if mpt is not None and mpt <= 0:
raise KeyError()
except KeyError:
mpt = None
if mpt is None:
mpt = 1
try:
split = conf['data_transfer']['split_files_megabytes']
if split is not None and split <= 0:
raise KeyError()
if split is not None:
split <<= 20
except KeyError:
split = None
<DeepExtract>
try:
ret = conf['data_transfer']['ssh_private_key']
if util.is_none_or_empty(ret):
raise KeyError()
except KeyError:
ret = default
ssh_private_key = ret
</DeepExtract>
if util.is_not_empty(ssh_private_key):
ssh_private_key = pathlib.Path(ssh_private_key)
try:
container = conf['data_transfer']['container']
if util.is_none_or_empty(container):
raise KeyError()
except KeyError:
container = None
try:
fshare = conf['data_transfer']['file_share']
if util.is_none_or_empty(fshare):
raise KeyError()
except KeyError:
fshare = None
try:
bx_eo = conf['data_transfer']['blobxfer_extra_options']
if bx_eo is None:
bx_eo = ''
except KeyError:
bx_eo = ''
return DestinationSettings(storage_account_settings=storage, shared_data_volume=shared, relative_destination_path=rdp, data_transfer=DataTransferSettings(container=container, file_share=fshare, blobxfer_extra_options=bx_eo, method=method, ssh_private_key=ssh_private_key, scp_ssh_extra_options=ssh_eo, rsync_extra_options=rsync_eo, split_files_megabytes=split, max_parallel_transfers_per_node=mpt))
|
def files_destination_settings(fdict):
"""Get global resources files destination
:param dict fdict: configuration block
:rtype: DestinationSettings
:return: destination settings
"""
conf = fdict['destination']
try:
shared = conf['shared_data_volume']
except KeyError:
shared = None
try:
storage = conf['storage_account_settings']
except KeyError:
storage = None
try:
rdp = conf['relative_destination_path']
if rdp is not None:
rdp = rdp.lstrip('/').rstrip('/')
if len(rdp) == 0:
rdp = None
except KeyError:
rdp = None
try:
method = conf['data_transfer']['method'].lower()
except KeyError:
if storage is None:
raise RuntimeError('no transfer method specified for data transfer of source: {} to {} rdp={}'.format(files_source_settings(fdict).path, shared, rdp))
else:
method = None
try:
ssh_eo = conf['data_transfer']['scp_ssh_extra_options']
if ssh_eo is None:
raise KeyError()
except KeyError:
ssh_eo = ''
try:
rsync_eo = conf['data_transfer']['rsync_extra_options']
if rsync_eo is None:
raise KeyError()
except KeyError:
rsync_eo = ''
try:
mpt = conf['data_transfer']['max_parallel_transfers_per_node']
if mpt is not None and mpt <= 0:
raise KeyError()
except KeyError:
mpt = None
if mpt is None:
mpt = 1
try:
split = conf['data_transfer']['split_files_megabytes']
if split is not None and split <= 0:
raise KeyError()
if split is not None:
split <<= 20
except KeyError:
split = None
try:
ret = conf['data_transfer']['ssh_private_key']
if util.is_none_or_empty(ret):
raise KeyError()
except KeyError:
ret = default
ssh_private_key = ret
if util.is_not_empty(ssh_private_key):
ssh_private_key = pathlib.Path(ssh_private_key)
try:
container = conf['data_transfer']['container']
if util.is_none_or_empty(container):
raise KeyError()
except KeyError:
container = None
try:
fshare = conf['data_transfer']['file_share']
if util.is_none_or_empty(fshare):
raise KeyError()
except KeyError:
fshare = None
try:
bx_eo = conf['data_transfer']['blobxfer_extra_options']
if bx_eo is None:
bx_eo = ''
except KeyError:
bx_eo = ''
return DestinationSettings(storage_account_settings=storage, shared_data_volume=shared, relative_destination_path=rdp, data_transfer=DataTransferSettings(container=container, file_share=fshare, blobxfer_extra_options=bx_eo, method=method, ssh_private_key=ssh_private_key, scp_ssh_extra_options=ssh_eo, rsync_extra_options=rsync_eo, split_files_megabytes=split, max_parallel_transfers_per_node=mpt))
|
cortana-intelligence-inventory-optimization
|
positive
|
def evaluateStack(self, s):
op = s.pop()
if op == 'unary -':
return -self.evaluateStack(s)
if op in '+-x/^':
<DeepExtract>
op = s.pop()
if op == 'unary -':
op2 = -self.evaluateStack(s)
if op in '+-x/^':
op2 = self.evaluateStack(s)
op1 = self.evaluateStack(s)
op2 = self.opn[op](op1, op2)
elif op == 'PI':
op2 = math.pi
elif op == 'E':
op2 = math.e
elif op in self.fn:
op2 = self.fn[op](self.evaluateStack(s))
elif op[0].isalpha():
op2 = 0
else:
op2 = float(op)
</DeepExtract>
<DeepExtract>
op = s.pop()
if op == 'unary -':
op1 = -self.evaluateStack(s)
if op in '+-x/^':
op2 = self.evaluateStack(s)
op1 = self.evaluateStack(s)
op1 = self.opn[op](op1, op2)
elif op == 'PI':
op1 = math.pi
elif op == 'E':
op1 = math.e
elif op in self.fn:
op1 = self.fn[op](self.evaluateStack(s))
elif op[0].isalpha():
op1 = 0
else:
op1 = float(op)
</DeepExtract>
return self.opn[op](op1, op2)
elif op == 'PI':
return math.pi
elif op == 'E':
return math.e
elif op in self.fn:
return self.fn[op](self.evaluateStack(s))
elif op[0].isalpha():
return 0
else:
return float(op)
|
def evaluateStack(self, s):
op = s.pop()
if op == 'unary -':
return -self.evaluateStack(s)
if op in '+-x/^':
op = s.pop()
if op == 'unary -':
op2 = -self.evaluateStack(s)
if op in '+-x/^':
op2 = self.evaluateStack(s)
op1 = self.evaluateStack(s)
op2 = self.opn[op](op1, op2)
elif op == 'PI':
op2 = math.pi
elif op == 'E':
op2 = math.e
elif op in self.fn:
op2 = self.fn[op](self.evaluateStack(s))
elif op[0].isalpha():
op2 = 0
else:
op2 = float(op)
op = s.pop()
if op == 'unary -':
op1 = -self.evaluateStack(s)
if op in '+-x/^':
op2 = self.evaluateStack(s)
op1 = self.evaluateStack(s)
op1 = self.opn[op](op1, op2)
elif op == 'PI':
op1 = math.pi
elif op == 'E':
op1 = math.e
elif op in self.fn:
op1 = self.fn[op](self.evaluateStack(s))
elif op[0].isalpha():
op1 = 0
else:
op1 = float(op)
return self.opn[op](op1, op2)
elif op == 'PI':
return math.pi
elif op == 'E':
return math.e
elif op in self.fn:
return self.fn[op](self.evaluateStack(s))
elif op[0].isalpha():
return 0
else:
return float(op)
|
CorpBot.py
|
positive
|
def main(args):
<DeepExtract>
gold_docs = get_documents(args.documents_path, args.gold_path)
pred_docs = get_documents(args.documents_path, args.pred_path)
evaluator = evaluate_documents(gold_docs, pred_docs, language=args.language)
</DeepExtract>
print()
print(evaluator.entity_level())
print()
print(evaluator.token_level())
print()
print(evaluator.token_level_blind())
|
def main(args):
gold_docs = get_documents(args.documents_path, args.gold_path)
pred_docs = get_documents(args.documents_path, args.pred_path)
evaluator = evaluate_documents(gold_docs, pred_docs, language=args.language)
print()
print(evaluator.entity_level())
print()
print(evaluator.token_level())
print()
print(evaluator.token_level_blind())
|
deidentify
|
positive
|
def create_object_for_package_header(stix_package_header, env, type_of_obj):
<DeepExtract>
instance = {'type': type_of_obj}
if get_option_value('spec_version') == '2.1':
instance['spec_version'] = '2.1'
instance['id'] = generate_stix2x_id(type_of_obj, None.id_ if None and hasattr(None, 'id_') and None.id_ else parent_id, id_used)
if None:
timestamp = convert_timestamp_of_stix_object(None, env.timestamp, True)
else:
timestamp = strftime_with_appropriate_fractional_seconds(env.timestamp, True)
instance['created'] = timestamp
if type_of_obj != 'marking-definition':
instance['modified'] = timestamp
instance['description'] = ''
if False:
instance['external_references'] = []
sdo_instance = instance
</DeepExtract>
if hasattr(stix_package_header, 'title') and stix_package_header.title is not None:
sdo_instance['name'] = stix_package_header.title
<DeepExtract>
if hasattr(stix_package_header, 'descriptions') and len(stix_package_header.descriptions) != 0:
description_as_text = str(process_structured_text_list(stix_package_header.descriptions))
if description_as_text:
if parent_info and sdo_instance['description']:
sdo_instance['description'] += '\nPARENT_DESCRIPTION: \n' + description_as_text
else:
sdo_instance['description'] += description_as_text
elif hasattr(stix_package_header, 'description') and stix_package_header.description is not None:
sdo_instance['description'] += str(stix_package_header.description.value)
if hasattr(stix_package_header, 'short_descriptions') and len(stix_package_header.short_descriptions) != 0:
if sdo_instance['description']:
sdo_instance['description'] += '\n'
process_short_description(sdo_instance, process_structured_text_list(stix_package_header.short_descriptions), 'short_descriptions', parent_info)
elif hasattr(stix_package_header, 'short_description') and stix_package_header.short_description is not None:
if sdo_instance['description']:
sdo_instance['description'] += '\n'
process_short_description(sdo_instance, stix_package_header.short_description, 'short_description', parent_info)
</DeepExtract>
if env.created_by_ref:
sdo_instance['created_by_ref'] = env.created_by_ref
if type_of_obj == 'report':
spec_version = get_option_value('spec_version')
convert_controlled_vocabs_to_open_vocabs(sdo_instance, 'labels' if spec_version == '2.0' else 'report_types', stix_package_header.package_intents, REPORT_LABELS_MAP, False, required=spec_version == '2.0')
else:
sdo_instance['context'] = 'header_information'
<DeepExtract>
(container, extension_definition_id) = determine_container_for_missing_properties(type_of_obj, sdo_instance)
if container is not None:
if stix_package_header.package_intents:
handle_missing_string_property(container, 'package_intents', stix_package_header.package_intents, sdo_instance['id'], True)
fill_in_extension_properties(sdo_instance, container, extension_definition_id)
</DeepExtract>
sdo_instance['object_refs'] = [x['id'] for x in env.bundle_instance['objects']]
if type_of_obj == 'report':
sdo_instance['published'] = strftime_with_appropriate_fractional_seconds(datetime.now(), True)
if 'description' in sdo_instance and sdo_instance['description'] == '':
del sdo_instance['description']
info('%s was created to store the extra STIX package header information', 220, sdo_instance['id'])
return sdo_instance
|
def create_object_for_package_header(stix_package_header, env, type_of_obj):
instance = {'type': type_of_obj}
if get_option_value('spec_version') == '2.1':
instance['spec_version'] = '2.1'
instance['id'] = generate_stix2x_id(type_of_obj, None.id_ if None and hasattr(None, 'id_') and None.id_ else parent_id, id_used)
if None:
timestamp = convert_timestamp_of_stix_object(None, env.timestamp, True)
else:
timestamp = strftime_with_appropriate_fractional_seconds(env.timestamp, True)
instance['created'] = timestamp
if type_of_obj != 'marking-definition':
instance['modified'] = timestamp
instance['description'] = ''
if False:
instance['external_references'] = []
sdo_instance = instance
if hasattr(stix_package_header, 'title') and stix_package_header.title is not None:
sdo_instance['name'] = stix_package_header.title
if hasattr(stix_package_header, 'descriptions') and len(stix_package_header.descriptions) != 0:
description_as_text = str(process_structured_text_list(stix_package_header.descriptions))
if description_as_text:
if parent_info and sdo_instance['description']:
sdo_instance['description'] += '\nPARENT_DESCRIPTION: \n' + description_as_text
else:
sdo_instance['description'] += description_as_text
elif hasattr(stix_package_header, 'description') and stix_package_header.description is not None:
sdo_instance['description'] += str(stix_package_header.description.value)
if hasattr(stix_package_header, 'short_descriptions') and len(stix_package_header.short_descriptions) != 0:
if sdo_instance['description']:
sdo_instance['description'] += '\n'
process_short_description(sdo_instance, process_structured_text_list(stix_package_header.short_descriptions), 'short_descriptions', parent_info)
elif hasattr(stix_package_header, 'short_description') and stix_package_header.short_description is not None:
if sdo_instance['description']:
sdo_instance['description'] += '\n'
process_short_description(sdo_instance, stix_package_header.short_description, 'short_description', parent_info)
if env.created_by_ref:
sdo_instance['created_by_ref'] = env.created_by_ref
if type_of_obj == 'report':
spec_version = get_option_value('spec_version')
convert_controlled_vocabs_to_open_vocabs(sdo_instance, 'labels' if spec_version == '2.0' else 'report_types', stix_package_header.package_intents, REPORT_LABELS_MAP, False, required=spec_version == '2.0')
else:
sdo_instance['context'] = 'header_information'
(container, extension_definition_id) = determine_container_for_missing_properties(type_of_obj, sdo_instance)
if container is not None:
if stix_package_header.package_intents:
handle_missing_string_property(container, 'package_intents', stix_package_header.package_intents, sdo_instance['id'], True)
fill_in_extension_properties(sdo_instance, container, extension_definition_id)
sdo_instance['object_refs'] = [x['id'] for x in env.bundle_instance['objects']]
if type_of_obj == 'report':
sdo_instance['published'] = strftime_with_appropriate_fractional_seconds(datetime.now(), True)
if 'description' in sdo_instance and sdo_instance['description'] == '':
del sdo_instance['description']
info('%s was created to store the extra STIX package header information', 220, sdo_instance['id'])
return sdo_instance
|
cti-stix-elevator
|
positive
|
def revert_sources(self):
"""Revert all sources to the revisions specified in :attr:`sources`.
"""
for (target, desc) in self.sources.items():
if desc[0] in ('local', 'downloadable'):
continue
(vcs_type, vcs_spec, options) = desc
local_dir = self.odoo_dir if target is main_software else target
<DeepExtract>
if os.path.isabs(local_dir):
local_dir = local_dir
local_dir = join(self.buildout_dir, local_dir)
</DeepExtract>
repo = vcs.repo(vcs_type, local_dir, vcs_spec[0], **options)
try:
repo.revert(vcs_spec[1])
except NotImplementedError:
logger.warn('vcs-revert: not implemented for %s repository at %s', vcs_type, local_dir)
else:
logger.info('Reverted %s repository at %s', vcs_type, local_dir)
|
def revert_sources(self):
"""Revert all sources to the revisions specified in :attr:`sources`.
"""
for (target, desc) in self.sources.items():
if desc[0] in ('local', 'downloadable'):
continue
(vcs_type, vcs_spec, options) = desc
local_dir = self.odoo_dir if target is main_software else target
if os.path.isabs(local_dir):
local_dir = local_dir
local_dir = join(self.buildout_dir, local_dir)
repo = vcs.repo(vcs_type, local_dir, vcs_spec[0], **options)
try:
repo.revert(vcs_spec[1])
except NotImplementedError:
logger.warn('vcs-revert: not implemented for %s repository at %s', vcs_type, local_dir)
else:
logger.info('Reverted %s repository at %s', vcs_type, local_dir)
|
anybox.recipe.odoo
|
positive
|
@config.on_load
def on_load():
if config.LOG_NOT_FOUND:
global notfound_logger
<DeepExtract>
logger = logging.getLogger('notfound')
logger.setLevel(logging.DEBUG)
filename = Path(config.LOG_DIR).joinpath('{}.log'.format('notfound'))
try:
handler = logging.handlers.TimedRotatingFileHandler(str(filename), when='midnight')
except FileNotFoundError:
print('Unable to write to {}'.format(filename))
else:
logger.addHandler(handler)
notfound_logger = logger
</DeepExtract>
if config.LOG_QUERIES:
global query_logger
<DeepExtract>
logger = logging.getLogger('queries')
logger.setLevel(logging.DEBUG)
filename = Path(config.LOG_DIR).joinpath('{}.log'.format('queries'))
try:
handler = logging.handlers.TimedRotatingFileHandler(str(filename), when='midnight')
except FileNotFoundError:
print('Unable to write to {}'.format(filename))
else:
logger.addHandler(handler)
query_logger = logger
</DeepExtract>
if config.SLOW_QUERIES:
global slow_query_logger
<DeepExtract>
logger = logging.getLogger('slow_queries')
logger.setLevel(logging.DEBUG)
filename = Path(config.LOG_DIR).joinpath('{}.log'.format('slow_queries'))
try:
handler = logging.handlers.TimedRotatingFileHandler(str(filename), when='midnight')
except FileNotFoundError:
print('Unable to write to {}'.format(filename))
else:
logger.addHandler(handler)
slow_query_logger = logger
</DeepExtract>
|
@config.on_load
def on_load():
if config.LOG_NOT_FOUND:
global notfound_logger
logger = logging.getLogger('notfound')
logger.setLevel(logging.DEBUG)
filename = Path(config.LOG_DIR).joinpath('{}.log'.format('notfound'))
try:
handler = logging.handlers.TimedRotatingFileHandler(str(filename), when='midnight')
except FileNotFoundError:
print('Unable to write to {}'.format(filename))
else:
logger.addHandler(handler)
notfound_logger = logger
if config.LOG_QUERIES:
global query_logger
logger = logging.getLogger('queries')
logger.setLevel(logging.DEBUG)
filename = Path(config.LOG_DIR).joinpath('{}.log'.format('queries'))
try:
handler = logging.handlers.TimedRotatingFileHandler(str(filename), when='midnight')
except FileNotFoundError:
print('Unable to write to {}'.format(filename))
else:
logger.addHandler(handler)
query_logger = logger
if config.SLOW_QUERIES:
global slow_query_logger
logger = logging.getLogger('slow_queries')
logger.setLevel(logging.DEBUG)
filename = Path(config.LOG_DIR).joinpath('{}.log'.format('slow_queries'))
try:
handler = logging.handlers.TimedRotatingFileHandler(str(filename), when='midnight')
except FileNotFoundError:
print('Unable to write to {}'.format(filename))
else:
logger.addHandler(handler)
slow_query_logger = logger
</DeepExtract>
|
addok
|
positive
|
@cached(3600)
def get_dialcode(destination_number, dialcode):
"""
Retrieve the correct dialcode for a destination_number
"""
if dialcode and len(dialcode) > 0:
return dialcode
else:
<DeepExtract>
settings.PREFIX_TO_IGNORE = settings.PREFIX_TO_IGNORE.strip(' \t\n\r')
if settings.PREFIX_TO_IGNORE and len(settings.PREFIX_TO_IGNORE) > 0:
settings.PREFIX_TO_IGNORE = settings.PREFIX_TO_IGNORE.split(',')
settings.PREFIX_TO_IGNORE = sorted(settings.PREFIX_TO_IGNORE, key=len, reverse=True)
for rprefix in settings.PREFIX_TO_IGNORE:
rprefix = rprefix.strip(' \t\n\r')
rprefix = re.sub('\\+', '\\\\+', rprefix)
if rprefix and len(rprefix) > 0:
destination_number = re.sub('^%s' % rprefix, '', destination_number)
sanitized_destination = destination_number
</DeepExtract>
<DeepExtract>
sanitized_destination = str(sanitized_destination)
if len(sanitized_destination) > 0 and sanitized_destination[0] == '+':
sanitized_destination = sanitized_destination[1:]
m = re.search('(\\d*)', sanitized_destination)
sanitized_destination = m.group(0)
try:
int(sanitized_destination)
except ValueError:
prefix_list = False
prefix_range = range(settings.PREFIX_LIMIT_MIN, settings.PREFIX_LIMIT_MAX + 1)
prefix_range.reverse()
destination_prefix_list = ''
for i in prefix_range:
if i == settings.PREFIX_LIMIT_MIN:
destination_prefix_list = destination_prefix_list + sanitized_destination[0:i]
else:
destination_prefix_list = destination_prefix_list + sanitized_destination[0:i] + ', '
prefix_list = str(destination_prefix_list)
</DeepExtract>
if prefix_list and len(sanitized_destination) > settings.PN_MAX_DIGITS and (not sanitized_destination[:1].isalpha()):
<DeepExtract>
country_id = None
prefix = None
if not prefix_list:
(country_id, prefix_id) = (country_id, prefix)
try:
prefix_obj = Prefix.objects.filter(prefix__in=eval(prefix_list)).order_by('prefix')
for i in xrange(0, len(prefix_obj)):
if prefix_obj[i].country_id:
country_id = prefix_obj[i].country_id.id
prefix = prefix_obj[i].prefix
(country_id, prefix_id) = (country_id, prefix)
except:
(country_id, prefix_id) = (country_id, prefix)
</DeepExtract>
dialcode = prefix_id
else:
dialcode = ''
return dialcode
|
@cached(3600)
def get_dialcode(destination_number, dialcode):
"""
Retrieve the correct dialcode for a destination_number
"""
if dialcode and len(dialcode) > 0:
return dialcode
else:
settings.PREFIX_TO_IGNORE = settings.PREFIX_TO_IGNORE.strip(' \t\n\r')
if settings.PREFIX_TO_IGNORE and len(settings.PREFIX_TO_IGNORE) > 0:
settings.PREFIX_TO_IGNORE = settings.PREFIX_TO_IGNORE.split(',')
settings.PREFIX_TO_IGNORE = sorted(settings.PREFIX_TO_IGNORE, key=len, reverse=True)
for rprefix in settings.PREFIX_TO_IGNORE:
rprefix = rprefix.strip(' \t\n\r')
rprefix = re.sub('\\+', '\\\\+', rprefix)
if rprefix and len(rprefix) > 0:
destination_number = re.sub('^%s' % rprefix, '', destination_number)
sanitized_destination = destination_number
sanitized_destination = str(sanitized_destination)
if len(sanitized_destination) > 0 and sanitized_destination[0] == '+':
sanitized_destination = sanitized_destination[1:]
m = re.search('(\\d*)', sanitized_destination)
sanitized_destination = m.group(0)
try:
int(sanitized_destination)
except ValueError:
prefix_list = False
prefix_range = range(settings.PREFIX_LIMIT_MIN, settings.PREFIX_LIMIT_MAX + 1)
prefix_range.reverse()
destination_prefix_list = ''
for i in prefix_range:
if i == settings.PREFIX_LIMIT_MIN:
destination_prefix_list = destination_prefix_list + sanitized_destination[0:i]
else:
destination_prefix_list = destination_prefix_list + sanitized_destination[0:i] + ', '
prefix_list = str(destination_prefix_list)
if prefix_list and len(sanitized_destination) > settings.PN_MAX_DIGITS and (not sanitized_destination[:1].isalpha()):
country_id = None
prefix = None
if not prefix_list:
(country_id, prefix_id) = (country_id, prefix)
try:
prefix_obj = Prefix.objects.filter(prefix__in=eval(prefix_list)).order_by('prefix')
for i in xrange(0, len(prefix_obj)):
if prefix_obj[i].country_id:
country_id = prefix_obj[i].country_id.id
prefix = prefix_obj[i].prefix
(country_id, prefix_id) = (country_id, prefix)
except:
(country_id, prefix_id) = (country_id, prefix)
dialcode = prefix_id
else:
dialcode = ''
return dialcode
|
cdr-stats
|
positive
|
def test_invalid_method_result(self):
"""Test a method with an invalid result."""
<DeepExtract>
self.object = Mock(__dbus_xml__=dedent('\n <node>\n <interface name="Interface">\n <method name="Method">\n <arg direction="out" name="return" type="t"/>\n </method>\n </interface>\n </node>\n '))
del self.object.Get
del self.object.Set
del self.object.GetAll
self.object.Signal1 = Signal()
self.object.Signal2 = Signal()
self.object.PropertiesChanged = Signal()
self.handler = ServerObjectHandler(self.message_bus, self.object_path, self.object, error_mapper=self.error_mapper)
self.handler.connect_object()
</DeepExtract>
self.object.Method.return_value = -1
<DeepExtract>
invocation = Mock()
with self.assertLogs(level='WARN'):
self.handler._method_callback(invocation, 'Interface', 'Method', parameters)
invocation.return_dbus_error.assert_called_once()
invocation.return_value.assert_not_called()
((name, msg), kwargs) = invocation.return_dbus_error.call_args
self.assertEqual(kwargs, {})
self.assertEqual(name, 'not.known.Error.OverflowError', 'Unexpected error name.')
if error_message is not None:
self.assertEqual(msg, error_message, 'Unexpected error message.')
</DeepExtract>
|
def test_invalid_method_result(self):
"""Test a method with an invalid result."""
self.object = Mock(__dbus_xml__=dedent('\n <node>\n <interface name="Interface">\n <method name="Method">\n <arg direction="out" name="return" type="t"/>\n </method>\n </interface>\n </node>\n '))
del self.object.Get
del self.object.Set
del self.object.GetAll
self.object.Signal1 = Signal()
self.object.Signal2 = Signal()
self.object.PropertiesChanged = Signal()
self.handler = ServerObjectHandler(self.message_bus, self.object_path, self.object, error_mapper=self.error_mapper)
self.handler.connect_object()
self.object.Method.return_value = -1
invocation = Mock()
with self.assertLogs(level='WARN'):
self.handler._method_callback(invocation, 'Interface', 'Method', parameters)
invocation.return_dbus_error.assert_called_once()
invocation.return_value.assert_not_called()
((name, msg), kwargs) = invocation.return_dbus_error.call_args
self.assertEqual(kwargs, {})
self.assertEqual(name, 'not.known.Error.OverflowError', 'Unexpected error name.')
if error_message is not None:
self.assertEqual(msg, error_message, 'Unexpected error message.')
</DeepExtract>
|
dasbus
|
positive
|
def __init__(self, env_args, lut_holder, is_evaluating):
"""
Args:
env_args: Depending on game type an instance of PokerEnvArgs or DiscretePokerEnvArgs
lut_holder: Depending on game type. An instance of a subclass of LutHolder. It is not checked
whether the correct one is passed, so be sure to pass the right one!
lut_holder could theoretically be created encapsulated by an instance of this class,
but for optimization (i.e. only one per machine, not per env), we pass it.
is_evaluating (bool): Whether the environment shall be spawned in evaluation mode (i.e. no randomization)
or not.
"""
assert env_args.n_seats >= 2
self._args = copy.deepcopy(env_args)
self.lut_holder = lut_holder
self.IS_EVALUATING = is_evaluating
self.deck = DeckOfCards(num_suits=self.N_SUITS, num_ranks=self.N_RANKS)
self.BTN_POS = NotImplementedError
self.SB_POS = NotImplementedError
self.BB_POS = NotImplementedError
self._USE_SIMPLE_HU_OBS = NotImplementedError
self.RETURN_PRE_TRANSITION_STATE_IN_INFO = NotImplementedError
self.N_SEATS = NotImplementedError
self.MAX_CHIPS = NotImplementedError
self.STACK_RANDOMIZATION_RANGE = NotImplementedError
self.REWARD_SCALAR = NotImplementedError
self.seats = NotImplementedError
<DeepExtract>
a = copy.deepcopy(env_args)
if a.n_seats == 2:
self.BTN_POS = 0
self.SB_POS = 0
self.BB_POS = 1
else:
self.BTN_POS = 0
self.SB_POS = 1
self.BB_POS = 2
self._USE_SIMPLE_HU_OBS = a.use_simplified_headsup_obs
self.RETURN_PRE_TRANSITION_STATE_IN_INFO = a.RETURN_PRE_TRANSITION_STATE_IN_INFO
self.N_SEATS = int(a.n_seats)
try:
self.MAX_CHIPS = sum(a.starting_stack_sizes_list) + a.stack_randomization_range[1] * a.n_seats + 1
except TypeError:
self.MAX_CHIPS = a.n_seats * (self.DEFAULT_STACK_SIZE + a.stack_randomization_range[1]) + 1
self.STACK_RANDOMIZATION_RANGE = a.stack_randomization_range
if a.scale_rewards:
try:
self.REWARD_SCALAR = float(sum(a.starting_stack_sizes_list)) / float(a.n_seats) / 5
except TypeError:
self.REWARD_SCALAR = self.DEFAULT_STACK_SIZE / 5.0
else:
self.REWARD_SCALAR = 1.0
self.seats = [PokerPlayer(seat_id=i, poker_env=self, is_evaluating=is_evaluating, starting_stack=a.starting_stack_sizes_list[i] if a.starting_stack_sizes_list[i] is not None else self.DEFAULT_STACK_SIZE, stack_randomization_range=a.stack_randomization_range) for i in range(a.n_seats)]
</DeepExtract>
<DeepExtract>
obs_idx_dict = {}
obs_parts_idxs_dict = {'board': [], 'players': [[] for _ in range(self.N_SEATS)], 'table_state': []}
next_idx = [0]
def get_discrete(size, name, _curr_idx):
obs_idx_dict[name] = _curr_idx[0]
_curr_idx[0] += 1
(self.observation_space, self.obs_idx_dict, self.obs_parts_idxs_dict) = spaces.Discrete(size)
def get_new_box(name, _curr_idx, high, low=0):
obs_idx_dict[name] = _curr_idx[0]
_curr_idx[0] += 1
(self.observation_space, self.obs_idx_dict, self.obs_parts_idxs_dict) = spaces.Box(low=low, high=high, shape=(1,), dtype=np.float32)
if self.N_SEATS == 2 and self._USE_SIMPLE_HU_OBS:
_k = next_idx[0]
_table_space = [get_new_box('ante', next_idx, self.N_SEATS), get_new_box('small_blind', next_idx, self.N_SEATS), get_new_box('big_blind', next_idx, self.N_SEATS), get_new_box('min_raise', next_idx, self.N_SEATS), get_new_box('pot_amt', next_idx, self.N_SEATS), get_new_box('total_to_call', next_idx, self.N_SEATS), get_new_box('last_action_how_much', next_idx, self.N_SEATS)]
for i in range(3):
_table_space.append(get_discrete(1, 'last_action_what_' + str(i), next_idx))
for i in range(self.N_SEATS):
_table_space.append(get_discrete(1, 'last_action_who_' + str(i), next_idx))
for i in range(self.N_SEATS):
_table_space.append(get_discrete(1, 'p' + str(i) + '_acts_next', next_idx))
for i in range(max(self.ALL_ROUNDS_LIST) + 1):
(_table_space.append(get_discrete(1, 'round_' + Poker.INT2STRING_ROUND[i], next_idx)),)
obs_parts_idxs_dict['table_state'] += list(range(_k, next_idx[0]))
_player_space = []
for i in range(self.N_SEATS):
_k = next_idx[0]
_player_space += [get_new_box('stack_p' + str(i), next_idx, self.N_SEATS), get_new_box('curr_bet_p' + str(i), next_idx, self.N_SEATS), get_discrete(1, 'is_allin_p' + str(i), next_idx)]
obs_parts_idxs_dict['players'][i] += list(range(_k, next_idx[0]))
_board_space = []
_k = next_idx[0]
for i in range(self.N_TOTAL_BOARD_CARDS):
_board_space.append(get_discrete(1, str(i) + 'th_board_card_rank', next_idx))
_board_space.append(get_discrete(1, str(i) + 'th_board_card_suit', next_idx))
_board_space.append(get_discrete(1, str(i) + 'th_board_card_card', next_idx))
obs_parts_idxs_dict['board'] += list(range(_k, next_idx[0]))
_observation_space = spaces.Tuple(_table_space + _player_space + _board_space)
_observation_space.shape = [len(_observation_space.spaces)]
else:
raise NotImplementedError
(self.observation_space, self.obs_idx_dict, self.obs_parts_idxs_dict) = (_observation_space, obs_idx_dict, obs_parts_idxs_dict)
</DeepExtract>
self.hole_card_space_shape = [self.N_HOLE_CARDS, 2]
self.current_round = None
self.side_pots = None
self.main_pot = None
self.board = None
self.last_action = None
self.capped_raise = CappedRaise()
self.current_player = None
self.last_raiser = None
self.n_actions_this_episode = None
self.n_raises_this_round = NotImplementedError
|
def __init__(self, env_args, lut_holder, is_evaluating):
"""
Args:
env_args: Depending on game type an instance of PokerEnvArgs or DiscretePokerEnvArgs
lut_holder: Depending on game type. An instance of a subclass of LutHolder. It is not checked
whether the correct one is passed, so be sure to pass the right one!
lut_holder could theoretically be created encapsulated by an instance of this class,
but for optimization (i.e. only one per machine, not per env), we pass it.
is_evaluating (bool): Whether the environment shall be spawned in evaluation mode (i.e. no randomization)
or not.
"""
assert env_args.n_seats >= 2
self._args = copy.deepcopy(env_args)
self.lut_holder = lut_holder
self.IS_EVALUATING = is_evaluating
self.deck = DeckOfCards(num_suits=self.N_SUITS, num_ranks=self.N_RANKS)
self.BTN_POS = NotImplementedError
self.SB_POS = NotImplementedError
self.BB_POS = NotImplementedError
self._USE_SIMPLE_HU_OBS = NotImplementedError
self.RETURN_PRE_TRANSITION_STATE_IN_INFO = NotImplementedError
self.N_SEATS = NotImplementedError
self.MAX_CHIPS = NotImplementedError
self.STACK_RANDOMIZATION_RANGE = NotImplementedError
self.REWARD_SCALAR = NotImplementedError
self.seats = NotImplementedError
a = copy.deepcopy(env_args)
if a.n_seats == 2:
self.BTN_POS = 0
self.SB_POS = 0
self.BB_POS = 1
else:
self.BTN_POS = 0
self.SB_POS = 1
self.BB_POS = 2
self._USE_SIMPLE_HU_OBS = a.use_simplified_headsup_obs
self.RETURN_PRE_TRANSITION_STATE_IN_INFO = a.RETURN_PRE_TRANSITION_STATE_IN_INFO
self.N_SEATS = int(a.n_seats)
try:
self.MAX_CHIPS = sum(a.starting_stack_sizes_list) + a.stack_randomization_range[1] * a.n_seats + 1
except TypeError:
self.MAX_CHIPS = a.n_seats * (self.DEFAULT_STACK_SIZE + a.stack_randomization_range[1]) + 1
self.STACK_RANDOMIZATION_RANGE = a.stack_randomization_range
if a.scale_rewards:
try:
self.REWARD_SCALAR = float(sum(a.starting_stack_sizes_list)) / float(a.n_seats) / 5
except TypeError:
self.REWARD_SCALAR = self.DEFAULT_STACK_SIZE / 5.0
else:
self.REWARD_SCALAR = 1.0
self.seats = [PokerPlayer(seat_id=i, poker_env=self, is_evaluating=is_evaluating, starting_stack=a.starting_stack_sizes_list[i] if a.starting_stack_sizes_list[i] is not None else self.DEFAULT_STACK_SIZE, stack_randomization_range=a.stack_randomization_range) for i in range(a.n_seats)]
obs_idx_dict = {}
obs_parts_idxs_dict = {'board': [], 'players': [[] for _ in range(self.N_SEATS)], 'table_state': []}
next_idx = [0]
def get_discrete(size, name, _curr_idx):
obs_idx_dict[name] = _curr_idx[0]
_curr_idx[0] += 1
(self.observation_space, self.obs_idx_dict, self.obs_parts_idxs_dict) = spaces.Discrete(size)
def get_new_box(name, _curr_idx, high, low=0):
obs_idx_dict[name] = _curr_idx[0]
_curr_idx[0] += 1
(self.observation_space, self.obs_idx_dict, self.obs_parts_idxs_dict) = spaces.Box(low=low, high=high, shape=(1,), dtype=np.float32)
if self.N_SEATS == 2 and self._USE_SIMPLE_HU_OBS:
_k = next_idx[0]
_table_space = [get_new_box('ante', next_idx, self.N_SEATS), get_new_box('small_blind', next_idx, self.N_SEATS), get_new_box('big_blind', next_idx, self.N_SEATS), get_new_box('min_raise', next_idx, self.N_SEATS), get_new_box('pot_amt', next_idx, self.N_SEATS), get_new_box('total_to_call', next_idx, self.N_SEATS), get_new_box('last_action_how_much', next_idx, self.N_SEATS)]
for i in range(3):
_table_space.append(get_discrete(1, 'last_action_what_' + str(i), next_idx))
for i in range(self.N_SEATS):
_table_space.append(get_discrete(1, 'last_action_who_' + str(i), next_idx))
for i in range(self.N_SEATS):
_table_space.append(get_discrete(1, 'p' + str(i) + '_acts_next', next_idx))
for i in range(max(self.ALL_ROUNDS_LIST) + 1):
(_table_space.append(get_discrete(1, 'round_' + Poker.INT2STRING_ROUND[i], next_idx)),)
obs_parts_idxs_dict['table_state'] += list(range(_k, next_idx[0]))
_player_space = []
for i in range(self.N_SEATS):
_k = next_idx[0]
_player_space += [get_new_box('stack_p' + str(i), next_idx, self.N_SEATS), get_new_box('curr_bet_p' + str(i), next_idx, self.N_SEATS), get_discrete(1, 'is_allin_p' + str(i), next_idx)]
obs_parts_idxs_dict['players'][i] += list(range(_k, next_idx[0]))
_board_space = []
_k = next_idx[0]
for i in range(self.N_TOTAL_BOARD_CARDS):
_board_space.append(get_discrete(1, str(i) + 'th_board_card_rank', next_idx))
_board_space.append(get_discrete(1, str(i) + 'th_board_card_suit', next_idx))
_board_space.append(get_discrete(1, str(i) + 'th_board_card_card', next_idx))
obs_parts_idxs_dict['board'] += list(range(_k, next_idx[0]))
_observation_space = spaces.Tuple(_table_space + _player_space + _board_space)
_observation_space.shape = [len(_observation_space.spaces)]
else:
raise NotImplementedError
(self.observation_space, self.obs_idx_dict, self.obs_parts_idxs_dict) = (_observation_space, obs_idx_dict, obs_parts_idxs_dict)
self.hole_card_space_shape = [self.N_HOLE_CARDS, 2]
self.current_round = None
self.side_pots = None
self.main_pot = None
self.board = None
self.last_action = None
self.capped_raise = CappedRaise()
self.current_player = None
self.last_raiser = None
self.n_actions_this_episode = None
self.n_raises_this_round = NotImplementedError
|
DREAM
|
positive
|
def test_unblacklist_multiple_channels(self):
name = 'commands'
channels = ['#channel1', '#channel2']
<DeepExtract>
plugin_manager = plugin_manager or self.plugin_manager
failed_plugins = plugin_manager.load(name)
if not isinstance(name, list):
name = [name]
assert failed_plugins == []
assert len(list(plugin_manager.plugins.keys())) == len(name)
for name in name:
class_ = 'Test'
name_pieces = name.split('_')
for name_piece in name_pieces:
class_ += name_piece[0].upper() + name_piece[1:].lower()
class_ += 'Plugin'
assert name in list(plugin_manager.plugins.keys())
assert plugin_manager.plugins[name]['name'] == name
assert isinstance(plugin_manager.plugins[name]['instance'], object)
if False:
assert plugin_manager.plugins[name]['commands'] == []
if assert_callbacks_is_empty:
assert plugin_manager.plugins[name]['callbacks'] == []
assert plugin_manager.plugins[name]['callback_ids'] == {}
if assert_config_is_none:
assert plugin_manager.plugins[name]['config'] is None
if assert_blacklist_is_empty:
assert plugin_manager.plugins[name]['blacklist'] == []
</DeepExtract>
assert self.plugin_manager.blacklist(name, channels) is True
assert self.plugin_manager.plugins[name]['blacklist'] == channels
assert self.plugin_manager.unblacklist(name, channels) == []
assert self.plugin_manager.plugins[name]['blacklist'] == []
|
def test_unblacklist_multiple_channels(self):
name = 'commands'
channels = ['#channel1', '#channel2']
plugin_manager = plugin_manager or self.plugin_manager
failed_plugins = plugin_manager.load(name)
if not isinstance(name, list):
name = [name]
assert failed_plugins == []
assert len(list(plugin_manager.plugins.keys())) == len(name)
for name in name:
class_ = 'Test'
name_pieces = name.split('_')
for name_piece in name_pieces:
class_ += name_piece[0].upper() + name_piece[1:].lower()
class_ += 'Plugin'
assert name in list(plugin_manager.plugins.keys())
assert plugin_manager.plugins[name]['name'] == name
assert isinstance(plugin_manager.plugins[name]['instance'], object)
if False:
assert plugin_manager.plugins[name]['commands'] == []
if assert_callbacks_is_empty:
assert plugin_manager.plugins[name]['callbacks'] == []
assert plugin_manager.plugins[name]['callback_ids'] == {}
if assert_config_is_none:
assert plugin_manager.plugins[name]['config'] is None
if assert_blacklist_is_empty:
assert plugin_manager.plugins[name]['blacklist'] == []
assert self.plugin_manager.blacklist(name, channels) is True
assert self.plugin_manager.plugins[name]['blacklist'] == channels
assert self.plugin_manager.unblacklist(name, channels) == []
assert self.plugin_manager.plugins[name]['blacklist'] == []
|
Cardinal
|
positive
|
def _reset(self):
self.curr_step = 0
<DeepExtract>
if not self.moon:
return
self.world.contactListener = None
self._clean_particles(True)
self.world.DestroyBody(self.moon)
self.moon = None
self.world.DestroyBody(self.lander)
self.lander = None
self.world.DestroyBody(self.legs[0])
self.world.DestroyBody(self.legs[1])
</DeepExtract>
self.world.contactListener_keepref = ContactDetector(self)
self.world.contactListener = self.world.contactListener_keepref
self.game_over = False
self.prev_shaping = None
W = VIEWPORT_W / SCALE
H = VIEWPORT_H / SCALE
CHUNKS = 11
height = self.np_random.uniform(0, H / 2, size=(CHUNKS + 1,))
chunk_x = [W / (CHUNKS - 1) * i for i in range(CHUNKS)]
helipad_chunk = np.random.choice(range(1, CHUNKS - 1))
self.helipad_x1 = chunk_x[helipad_chunk - 1]
self.helipad_x2 = chunk_x[helipad_chunk + 1]
self.helipad_y = H / 4
height[helipad_chunk - 2] = self.helipad_y
height[helipad_chunk - 1] = self.helipad_y
height[helipad_chunk + 0] = self.helipad_y
height[helipad_chunk + 1] = self.helipad_y
height[helipad_chunk + 2] = self.helipad_y
smooth_y = [0.33 * (height[i - 1] + height[i + 0] + height[i + 1]) for i in range(CHUNKS)]
self.moon = self.world.CreateStaticBody(shapes=edgeShape(vertices=[(0, 0), (W, 0)]))
self.sky_polys = []
for i in range(CHUNKS - 1):
p1 = (chunk_x[i], smooth_y[i])
p2 = (chunk_x[i + 1], smooth_y[i + 1])
self.moon.CreateEdgeFixture(vertices=[p1, p2], density=0, friction=0.1)
self.sky_polys.append([p1, p2, (p2[0], H), (p1[0], H)])
self.moon.color1 = (0.0, 0.0, 0.0)
self.moon.color2 = (0.0, 0.0, 0.0)
initial_y = VIEWPORT_H / SCALE
self.lander = self.world.CreateDynamicBody(position=(VIEWPORT_W / SCALE / 2, initial_y), angle=0.0, fixtures=fixtureDef(shape=polygonShape(vertices=[(x / SCALE, y / SCALE) for (x, y) in LANDER_POLY]), density=5.0, friction=0.1, categoryBits=16, maskBits=1, restitution=0.0))
self.lander.color1 = (0.5, 0.4, 0.9)
self.lander.color2 = (0.3, 0.3, 0.5)
self.lander.ApplyForceToCenter((self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM), self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM)), True)
self.legs = []
for i in [-1, +1]:
leg = self.world.CreateDynamicBody(position=(VIEWPORT_W / SCALE / 2 - i * LEG_AWAY / SCALE, initial_y), angle=i * 0.05, fixtures=fixtureDef(shape=polygonShape(box=(LEG_W / SCALE, LEG_H / SCALE)), density=1.0, restitution=0.0, categoryBits=32, maskBits=1))
leg.ground_contact = False
leg.color1 = (0.5, 0.4, 0.9)
leg.color2 = (0.3, 0.3, 0.5)
rjd = revoluteJointDef(bodyA=self.lander, bodyB=leg, localAnchorA=(0, 0), localAnchorB=(i * LEG_AWAY / SCALE, LEG_DOWN / SCALE), enableMotor=True, enableLimit=True, maxMotorTorque=LEG_SPRING_TORQUE, motorSpeed=+0.3 * i)
if i == -1:
rjd.lowerAngle = +0.9 - 0.5
rjd.upperAngle = +0.9
else:
rjd.lowerAngle = -0.9
rjd.upperAngle = -0.9 + 0.5
leg.joint = self.world.CreateJoint(rjd)
self.legs.append(leg)
self.drawlist = [self.lander] + self.legs
return self._step(NOOP)[0]
|
def _reset(self):
self.curr_step = 0
if not self.moon:
return
self.world.contactListener = None
self._clean_particles(True)
self.world.DestroyBody(self.moon)
self.moon = None
self.world.DestroyBody(self.lander)
self.lander = None
self.world.DestroyBody(self.legs[0])
self.world.DestroyBody(self.legs[1])
self.world.contactListener_keepref = ContactDetector(self)
self.world.contactListener = self.world.contactListener_keepref
self.game_over = False
self.prev_shaping = None
W = VIEWPORT_W / SCALE
H = VIEWPORT_H / SCALE
CHUNKS = 11
height = self.np_random.uniform(0, H / 2, size=(CHUNKS + 1,))
chunk_x = [W / (CHUNKS - 1) * i for i in range(CHUNKS)]
helipad_chunk = np.random.choice(range(1, CHUNKS - 1))
self.helipad_x1 = chunk_x[helipad_chunk - 1]
self.helipad_x2 = chunk_x[helipad_chunk + 1]
self.helipad_y = H / 4
height[helipad_chunk - 2] = self.helipad_y
height[helipad_chunk - 1] = self.helipad_y
height[helipad_chunk + 0] = self.helipad_y
height[helipad_chunk + 1] = self.helipad_y
height[helipad_chunk + 2] = self.helipad_y
smooth_y = [0.33 * (height[i - 1] + height[i + 0] + height[i + 1]) for i in range(CHUNKS)]
self.moon = self.world.CreateStaticBody(shapes=edgeShape(vertices=[(0, 0), (W, 0)]))
self.sky_polys = []
for i in range(CHUNKS - 1):
p1 = (chunk_x[i], smooth_y[i])
p2 = (chunk_x[i + 1], smooth_y[i + 1])
self.moon.CreateEdgeFixture(vertices=[p1, p2], density=0, friction=0.1)
self.sky_polys.append([p1, p2, (p2[0], H), (p1[0], H)])
self.moon.color1 = (0.0, 0.0, 0.0)
self.moon.color2 = (0.0, 0.0, 0.0)
initial_y = VIEWPORT_H / SCALE
self.lander = self.world.CreateDynamicBody(position=(VIEWPORT_W / SCALE / 2, initial_y), angle=0.0, fixtures=fixtureDef(shape=polygonShape(vertices=[(x / SCALE, y / SCALE) for (x, y) in LANDER_POLY]), density=5.0, friction=0.1, categoryBits=16, maskBits=1, restitution=0.0))
self.lander.color1 = (0.5, 0.4, 0.9)
self.lander.color2 = (0.3, 0.3, 0.5)
self.lander.ApplyForceToCenter((self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM), self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM)), True)
self.legs = []
for i in [-1, +1]:
leg = self.world.CreateDynamicBody(position=(VIEWPORT_W / SCALE / 2 - i * LEG_AWAY / SCALE, initial_y), angle=i * 0.05, fixtures=fixtureDef(shape=polygonShape(box=(LEG_W / SCALE, LEG_H / SCALE)), density=1.0, restitution=0.0, categoryBits=32, maskBits=1))
leg.ground_contact = False
leg.color1 = (0.5, 0.4, 0.9)
leg.color2 = (0.3, 0.3, 0.5)
rjd = revoluteJointDef(bodyA=self.lander, bodyB=leg, localAnchorA=(0, 0), localAnchorB=(i * LEG_AWAY / SCALE, LEG_DOWN / SCALE), enableMotor=True, enableLimit=True, maxMotorTorque=LEG_SPRING_TORQUE, motorSpeed=+0.3 * i)
if i == -1:
rjd.lowerAngle = +0.9 - 0.5
rjd.upperAngle = +0.9
else:
rjd.lowerAngle = -0.9
rjd.upperAngle = -0.9 + 0.5
leg.joint = self.world.CreateJoint(rjd)
self.legs.append(leg)
self.drawlist = [self.lander] + self.legs
return self._step(NOOP)[0]
|
cs294-112_hws
|
positive
|
def test_delete_quiz_with_submissions_and_correct_user(self):
kwargs = {'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'}
<DeepExtract>
client = Client()
client.login(username=TEST_USER_USERNAME, password=TEST_USER_PASSWORD)
client = client
</DeepExtract>
response = client.post('/course/1/quiz/1/submit_quiz', {}, **kwargs)
self.assertEqual(response.status_code, 200)
response = client.post('/course/1/quiz_delete', {'quiz_id': 1}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['status'], 'success')
self.assertEqual(array['message'], 'deleted')
|
def test_delete_quiz_with_submissions_and_correct_user(self):
kwargs = {'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'}
client = Client()
client.login(username=TEST_USER_USERNAME, password=TEST_USER_PASSWORD)
client = client
response = client.post('/course/1/quiz/1/submit_quiz', {}, **kwargs)
self.assertEqual(response.status_code, 200)
response = client.post('/course/1/quiz_delete', {'quiz_id': 1}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['status'], 'success')
self.assertEqual(array['message'], 'deleted')
|
academicstoday-django
|
positive
|
def mark_endpoint_unavailable(unavailable_endpoint, unavailable_operation_type):
unavailablility_info = self.location_unavailability_info_by_endpoint[unavailable_endpoint] if unavailable_endpoint in self.location_unavailability_info_by_endpoint else None
<DeepExtract>
current_time = int(round(time.time() * 1000))
</DeepExtract>
if not unavailablility_info:
self.location_unavailability_info_by_endpoint[unavailable_endpoint] = {'lastUnavailabilityCheckTimeStamp': current_time, 'operationType': set([unavailable_operation_type])}
else:
unavailable_operations = set([unavailable_operation_type]).union(unavailablility_info['operationType'])
self.location_unavailability_info_by_endpoint[unavailable_endpoint] = {'lastUnavailabilityCheckTimeStamp': current_time, 'operationType': unavailable_operations}
<DeepExtract>
if enable_multiple_writable_locations:
self.enable_multiple_writable_locations = enable_multiple_writable_locations
self.clear_stale_endpoint_unavailability_info()
if self.enable_endpoint_discovery:
if read_locations:
(self.available_read_endpoint_by_locations, self.available_read_locations) = self.get_endpoint_by_location(read_locations)
if write_locations:
(self.available_write_endpoint_by_locations, self.available_write_locations) = self.get_endpoint_by_location(write_locations)
self.write_endpoints = self.get_preferred_available_endpoints(self.available_write_endpoint_by_locations, self.available_write_locations, EndpointOperationType.WriteType, self.default_endpoint)
self.read_endpoints = self.get_preferred_available_endpoints(self.available_read_endpoint_by_locations, self.available_read_locations, EndpointOperationType.ReadType, self.write_endpoints[0])
self.last_cache_update_timestamp = self.current_time_millis()
</DeepExtract>
|
def mark_endpoint_unavailable(unavailable_endpoint, unavailable_operation_type):
unavailablility_info = self.location_unavailability_info_by_endpoint[unavailable_endpoint] if unavailable_endpoint in self.location_unavailability_info_by_endpoint else None
current_time = int(round(time.time() * 1000))
if not unavailablility_info:
self.location_unavailability_info_by_endpoint[unavailable_endpoint] = {'lastUnavailabilityCheckTimeStamp': current_time, 'operationType': set([unavailable_operation_type])}
else:
unavailable_operations = set([unavailable_operation_type]).union(unavailablility_info['operationType'])
self.location_unavailability_info_by_endpoint[unavailable_endpoint] = {'lastUnavailabilityCheckTimeStamp': current_time, 'operationType': unavailable_operations}
if enable_multiple_writable_locations:
self.enable_multiple_writable_locations = enable_multiple_writable_locations
self.clear_stale_endpoint_unavailability_info()
if self.enable_endpoint_discovery:
if read_locations:
(self.available_read_endpoint_by_locations, self.available_read_locations) = self.get_endpoint_by_location(read_locations)
if write_locations:
(self.available_write_endpoint_by_locations, self.available_write_locations) = self.get_endpoint_by_location(write_locations)
self.write_endpoints = self.get_preferred_available_endpoints(self.available_write_endpoint_by_locations, self.available_write_locations, EndpointOperationType.WriteType, self.default_endpoint)
self.read_endpoints = self.get_preferred_available_endpoints(self.available_read_endpoint_by_locations, self.available_read_locations, EndpointOperationType.ReadType, self.write_endpoints[0])
self.last_cache_update_timestamp = self.current_time_millis()
</DeepExtract>
|
azure-cosmos-python
|
positive
|
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
d_d = args[2]
n = args[3]
result = d_d + (d_a * (d_b & 65535) << n.value >> 16)
c = 0
v = overflow(result)
av = advanced_overflow(result)
<DeepExtract>
psw = self.get('psw', Type.int_32)
</DeepExtract>
cond_sv = v == 0
cond_sav = av == 0
sv = psw & SV_MASK & cond_sv | 1 & (cond_sv ^ 1)
sav = psw & ASV_MASK & cond_sav | 1 & (cond_sav ^ 1)
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, 'psw')
return result
|
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
d_d = args[2]
n = args[3]
result = d_d + (d_a * (d_b & 65535) << n.value >> 16)
c = 0
v = overflow(result)
av = advanced_overflow(result)
psw = self.get('psw', Type.int_32)
cond_sv = v == 0
cond_sav = av == 0
sv = psw & SV_MASK & cond_sv | 1 & (cond_sv ^ 1)
sav = psw & ASV_MASK & cond_sav | 1 & (cond_sav ^ 1)
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, 'psw')
return result
|
angr-platforms
|
positive
|
def save_layer(self, layer_name: str, directory: Path, layer_key: Optional[str]=None, verbose=True) -> (Path, dict):
<DeepExtract>
populated_techniques = []
layer = self.get_layer(layer_name, layer_key)
base_name = f'{layer_name}-{layer_key}' if layer_key else layer_name
base_name = base_name.replace('*', 'WILDCARD')
name = f'Elastic-detection-rules-{base_name}'
for (tactic, techniques) in layer.items():
tactic_normalized = '-'.join(tactic.lower().split())
for (technique_id, rules_data) in techniques.items():
rules_data.update(tactic=tactic_normalized, techniqueID=technique_id)
techniques = Techniques.from_dict(rules_data)
populated_techniques.append(techniques.to_dict())
base_nav_obj = {'name': name, 'techniques': populated_techniques, 'versions': {'attack': CURRENT_ATTACK_VERSION}}
navigator = Navigator.from_dict(base_nav_obj)
built = navigator
</DeepExtract>
return (self._save(built, directory, verbose), built)
|
def save_layer(self, layer_name: str, directory: Path, layer_key: Optional[str]=None, verbose=True) -> (Path, dict):
populated_techniques = []
layer = self.get_layer(layer_name, layer_key)
base_name = f'{layer_name}-{layer_key}' if layer_key else layer_name
base_name = base_name.replace('*', 'WILDCARD')
name = f'Elastic-detection-rules-{base_name}'
for (tactic, techniques) in layer.items():
tactic_normalized = '-'.join(tactic.lower().split())
for (technique_id, rules_data) in techniques.items():
rules_data.update(tactic=tactic_normalized, techniqueID=technique_id)
techniques = Techniques.from_dict(rules_data)
populated_techniques.append(techniques.to_dict())
base_nav_obj = {'name': name, 'techniques': populated_techniques, 'versions': {'attack': CURRENT_ATTACK_VERSION}}
navigator = Navigator.from_dict(base_nav_obj)
built = navigator
return (self._save(built, directory, verbose), built)
|
detection-rules
|
positive
|
def min_str(s, t):
if s == '' and t == '':
return ''
counter = Counter(t)
missing = set(counter.keys())
min_start = 0
min_end = float('inf')
cursor_end = 0
s_len = len(s)
for (cursor_start, char) in enumerate(s):
if missing:
try:
<DeepExtract>
for cursor_end in range(cursor_end, s_len):
end_char = s[cursor_end]
if end_char in counter:
counter[end_char] -= 1
if counter[end_char] == 0:
missing.remove(end_char)
if not missing:
cursor_end = cursor_end + 1
raise StopIteration()
</DeepExtract>
except StopIteration:
break
if not missing:
if min_end - min_start > cursor_end - cursor_start:
min_start = cursor_start
min_end = cursor_end
if char in counter:
counter[char] += 1
if counter[char] == 1:
missing.add(char)
if math.isfinite(min_end):
return s[min_start:min_end]
raise Exception()
|
def min_str(s, t):
if s == '' and t == '':
return ''
counter = Counter(t)
missing = set(counter.keys())
min_start = 0
min_end = float('inf')
cursor_end = 0
s_len = len(s)
for (cursor_start, char) in enumerate(s):
if missing:
try:
for cursor_end in range(cursor_end, s_len):
end_char = s[cursor_end]
if end_char in counter:
counter[end_char] -= 1
if counter[end_char] == 0:
missing.remove(end_char)
if not missing:
cursor_end = cursor_end + 1
raise StopIteration()
except StopIteration:
break
if not missing:
if min_end - min_start > cursor_end - cursor_start:
min_start = cursor_start
min_end = cursor_end
if char in counter:
counter[char] += 1
if counter[char] == 1:
missing.add(char)
if math.isfinite(min_end):
return s[min_start:min_end]
raise Exception()
|
code_interview_training
|
positive
|
def embedding_postprocessor(input_tensor, use_token_type=False, token_type_ids=None, token_type_vocab_size=16, token_type_embedding_name='token_type_embeddings', use_position_embeddings=True, position_embedding_name='position_embeddings', initializer_range=0.02, max_position_embeddings=512, dropout_prob=0.1):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
<DeepExtract>
if name is None:
name = input_tensor.name
if 3 is not None:
assert_rank(input_tensor, 3, name)
shape = input_tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
input_shape = shape
dyn_shape = tf.shape(input_tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
input_shape = shape
</DeepExtract>
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError('`token_type_ids` must be specified if`use_token_type` is True.')
token_type_table = tf.get_variable(name=token_type_embedding_name, shape=[token_type_vocab_size, width], initializer=create_initializer(initializer_range))
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings, [batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.get_variable(name=position_embedding_name, shape=[max_position_embeddings, width], initializer=create_initializer(initializer_range))
position_embeddings = tf.slice(full_position_embeddings, [0, 0], [seq_length, -1])
num_dims = len(output.shape.as_list())
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings, position_broadcast_shape)
output += position_embeddings
<DeepExtract>
output_tensor = layer_norm(output, name)
output_tensor = dropout(output_tensor, dropout_prob)
output = output_tensor
</DeepExtract>
return output
|
def embedding_postprocessor(input_tensor, use_token_type=False, token_type_ids=None, token_type_vocab_size=16, token_type_embedding_name='token_type_embeddings', use_position_embeddings=True, position_embedding_name='position_embeddings', initializer_range=0.02, max_position_embeddings=512, dropout_prob=0.1):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
if name is None:
name = input_tensor.name
if 3 is not None:
assert_rank(input_tensor, 3, name)
shape = input_tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
input_shape = shape
dyn_shape = tf.shape(input_tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
input_shape = shape
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError('`token_type_ids` must be specified if`use_token_type` is True.')
token_type_table = tf.get_variable(name=token_type_embedding_name, shape=[token_type_vocab_size, width], initializer=create_initializer(initializer_range))
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings, [batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.get_variable(name=position_embedding_name, shape=[max_position_embeddings, width], initializer=create_initializer(initializer_range))
position_embeddings = tf.slice(full_position_embeddings, [0, 0], [seq_length, -1])
num_dims = len(output.shape.as_list())
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings, position_broadcast_shape)
output += position_embeddings
output_tensor = layer_norm(output, name)
output_tensor = dropout(output_tensor, dropout_prob)
output = output_tensor
return output
|
BERT4doc-Classification
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.