before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def _initialize_pytorch(self):
if self.layer_config_file:
<DeepExtract>
if isinstance(self.layer_config_file, list):
self.layers = self.layer_config_file
</DeepExtract>
self.nneurons = None
self.activations = None
else:
self.layers = []
self.nneurons = [self.nfeatures] + self.nneurons + [self.noutputs]
self.activations = ['None'] + self.activations + ['None']
for i in range(len(self.nneurons)):
self.layers.append(('Linear', {'units': self.nneurons[i], 'activation': self.activations[i]}))
self.model = pytorch_Net(self.layers, self.layer_config_file).base_model
if self.loss == 'mean_squared_error':
self.loss = nn.MSELoss()
if isinstance(self.opt_config, list) or isinstance(self.opt_config, tuple):
<DeepExtract>
(opt_name, opt_params) = (self.opt_config[0], self.opt_config[1])
if self.engine == 'pytorch':
opt_params['params'] = self.model.parameters()
opt_params['weight_decay'] = self.alpha
pytorch_opt_module = import_module('torch.optim')
try:
opt = getattr(pytorch_opt_module, opt_name)(**opt_params)
self.opt = opt
except:
raise ValueError('incorrect optimizer name or parameter for opt_config')
elif self.engine == 'tensorflow':
keras_opt_module = import_module('tensorflow.keras.optimizers')
try:
opt = getattr(keras_opt_module, opt_name)(**opt_params)
self.opt = opt
except:
raise ValueError('incorrect optimizer name or parameter for opt_config')
</DeepExtract>
elif self.opt_config.lower() == 'sgd':
self.opt = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate, momentum=0.9, weight_decay=self.alpha)
elif self.opt_config.lower() == 'adam':
self.opt = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate, weight_decay=self.alpha)
else:
raise TypeError('opt_config should be a list/tuple or a str. If str, should either be "sgd" or "adam". If list, should provide exact configurations and parameters corresponding to the respective engines')
|
def _initialize_pytorch(self):
if self.layer_config_file:
if isinstance(self.layer_config_file, list):
self.layers = self.layer_config_file
self.nneurons = None
self.activations = None
else:
self.layers = []
self.nneurons = [self.nfeatures] + self.nneurons + [self.noutputs]
self.activations = ['None'] + self.activations + ['None']
for i in range(len(self.nneurons)):
self.layers.append(('Linear', {'units': self.nneurons[i], 'activation': self.activations[i]}))
self.model = pytorch_Net(self.layers, self.layer_config_file).base_model
if self.loss == 'mean_squared_error':
self.loss = nn.MSELoss()
if isinstance(self.opt_config, list) or isinstance(self.opt_config, tuple):
(opt_name, opt_params) = (self.opt_config[0], self.opt_config[1])
if self.engine == 'pytorch':
opt_params['params'] = self.model.parameters()
opt_params['weight_decay'] = self.alpha
pytorch_opt_module = import_module('torch.optim')
try:
opt = getattr(pytorch_opt_module, opt_name)(**opt_params)
self.opt = opt
except:
raise ValueError('incorrect optimizer name or parameter for opt_config')
elif self.engine == 'tensorflow':
keras_opt_module = import_module('tensorflow.keras.optimizers')
try:
opt = getattr(keras_opt_module, opt_name)(**opt_params)
self.opt = opt
except:
raise ValueError('incorrect optimizer name or parameter for opt_config')
elif self.opt_config.lower() == 'sgd':
self.opt = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate, momentum=0.9, weight_decay=self.alpha)
elif self.opt_config.lower() == 'adam':
self.opt = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate, weight_decay=self.alpha)
else:
raise TypeError('opt_config should be a list/tuple or a str. If str, should either be "sgd" or "adam". If list, should provide exact configurations and parameters corresponding to the respective engines')
|
chemml
|
positive
|
def application(environ, start_response):
if environ['PATH_INFO'] not in URL_2_HANDLERS:
response = json.dumps({'ec': 0, 'error': 'invalid uri'})
start_response('200 OK', [('Content-Type', 'application/json')])
return [response.encode()]
handler = URL_2_HANDLERS[environ['PATH_INFO']]
<DeepExtract>
post_data = {}
content_type = environ['CONTENT_TYPE'] if 'CONTENT_TYPE' in environ else None
if content_type is not None:
(mimetype, options) = cgi.parse_header(content_type)
if mimetype == 'application/json' and environ['REQUEST_METHOD'] == 'POST':
storage = environ['wsgi.input'].read()
if storage:
post_data = json.loads(storage)
storage = FieldStorage(environ['wsgi.input'], environ=environ, keep_blank_values=True)
if not False:
for k in storage.keys():
post_data[k] = storage.getvalue(k)
post_data = post_data
</DeepExtract>
response = handler(post_data)
start_response('200 OK', [('Content-Type', 'application/json')])
return [str(response).encode()]
|
def application(environ, start_response):
if environ['PATH_INFO'] not in URL_2_HANDLERS:
response = json.dumps({'ec': 0, 'error': 'invalid uri'})
start_response('200 OK', [('Content-Type', 'application/json')])
return [response.encode()]
handler = URL_2_HANDLERS[environ['PATH_INFO']]
post_data = {}
content_type = environ['CONTENT_TYPE'] if 'CONTENT_TYPE' in environ else None
if content_type is not None:
(mimetype, options) = cgi.parse_header(content_type)
if mimetype == 'application/json' and environ['REQUEST_METHOD'] == 'POST':
storage = environ['wsgi.input'].read()
if storage:
post_data = json.loads(storage)
storage = FieldStorage(environ['wsgi.input'], environ=environ, keep_blank_values=True)
if not False:
for k in storage.keys():
post_data[k] = storage.getvalue(k)
post_data = post_data
response = handler(post_data)
start_response('200 OK', [('Content-Type', 'application/json')])
return [str(response).encode()]
|
aswan
|
positive
|
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
args = parser.parse_args(argv)
log.info('start parameters: ' + str(args))
log.info('loading embeddings')
vocab = read_vocabulary_id_file(args.vocabulary)
embs = np.loadtxt(args.embeddings)
log.info('loading documents')
<DeepExtract>
pos_docs = os.listdir(os.path.join(args.corpus_dir, u'pos'))
num_pos_docs = len(pos_docs)
pos_docs = [os.path.join(args.corpus_dir, u'pos/', d) for d in pos_docs]
neg_docs = os.listdir(os.path.join(args.corpus_dir, u'neg'))
neg_docs = [os.path.join(args.corpus_dir, u'neg/', d) for d in neg_docs]
docs = pos_docs + neg_docs
features = dict()
labels = dict()
for (count, d) in enumerate(docs):
basename = os.path.basename(d)
features[basename] = convert_doc(d, vocab, embs)
labels[basename] = 1 if count < num_pos_docs else 0
(features, labels) = (features, labels)
</DeepExtract>
log.info('performing cross validation')
<DeepExtract>
single_predictions = []
weight_vectors = np.zeros((NO_OF_FOLDS, len(features.values()[0])))
classification_result = np.zeros((NO_OF_FOLDS + 1, 5))
for (cur_fold, (train_names, test_names)) in enumerate(imdb_cross_folds(features.keys())):
train_data = [features[n] for n in train_names]
train_labels = [labels[n] for n in train_names]
model = train_model(train_data, train_labels)
test_data = [features[n] for n in test_names]
pred_labels = model.predict(test_data)
true_labels = []
for i in xrange(len(test_data)):
single_predictions.append([cur_fold, test_names[i], labels[test_names[i]], pred_labels[i]])
true_labels.append(labels[test_names[i]])
classification_result[cur_fold, :] = get_classification_result(cur_fold, true_labels, pred_labels)
weight_vectors[cur_fold, :] = model.coef_
(single_predictions, classification_result, weight_vectors) = (single_predictions, classification_result, weight_vectors)
</DeepExtract>
log.info('storing results')
np.savetxt(os.path.join(args.output_dir, 'svm-weights.csv'), weight_vectors, '%f', ';', '\n')
with utf8_file_open(os.path.join(args.output_dir, 'predictions.csv'), 'w') as pred_file:
pred_file.write(u'fold_no;doc;true_label;pred_label\n')
for sp in single_predictions:
pred_file.write(u';'.join(map(unicode, sp)) + u'\n')
all_true_labels = [sp[2] for sp in single_predictions]
all_pred_labels = [sp[3] for sp in single_predictions]
confusion = confusion_matrix(all_true_labels, all_pred_labels)
np.savetxt(os.path.join(args.output_dir, 'confusion_matrix.csv'), confusion, '%d', ';', '\n')
<DeepExtract>
res = calc_metrics(all_true_labels, all_pred_labels)
classification_result[NO_OF_FOLDS, :] = np.asarray([-1] + [r for r in res])
</DeepExtract>
header = u'fold_no;accuracy;precision;recall;f1'
np.savetxt(os.path.join(args.output_dir, 'metrics.csv'), classification_result, '%f', u';', u'\n', header=header)
log.info(classification_result)
log.info('finished')
|
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
args = parser.parse_args(argv)
log.info('start parameters: ' + str(args))
log.info('loading embeddings')
vocab = read_vocabulary_id_file(args.vocabulary)
embs = np.loadtxt(args.embeddings)
log.info('loading documents')
pos_docs = os.listdir(os.path.join(args.corpus_dir, u'pos'))
num_pos_docs = len(pos_docs)
pos_docs = [os.path.join(args.corpus_dir, u'pos/', d) for d in pos_docs]
neg_docs = os.listdir(os.path.join(args.corpus_dir, u'neg'))
neg_docs = [os.path.join(args.corpus_dir, u'neg/', d) for d in neg_docs]
docs = pos_docs + neg_docs
features = dict()
labels = dict()
for (count, d) in enumerate(docs):
basename = os.path.basename(d)
features[basename] = convert_doc(d, vocab, embs)
labels[basename] = 1 if count < num_pos_docs else 0
(features, labels) = (features, labels)
log.info('performing cross validation')
single_predictions = []
weight_vectors = np.zeros((NO_OF_FOLDS, len(features.values()[0])))
classification_result = np.zeros((NO_OF_FOLDS + 1, 5))
for (cur_fold, (train_names, test_names)) in enumerate(imdb_cross_folds(features.keys())):
train_data = [features[n] for n in train_names]
train_labels = [labels[n] for n in train_names]
model = train_model(train_data, train_labels)
test_data = [features[n] for n in test_names]
pred_labels = model.predict(test_data)
true_labels = []
for i in xrange(len(test_data)):
single_predictions.append([cur_fold, test_names[i], labels[test_names[i]], pred_labels[i]])
true_labels.append(labels[test_names[i]])
classification_result[cur_fold, :] = get_classification_result(cur_fold, true_labels, pred_labels)
weight_vectors[cur_fold, :] = model.coef_
(single_predictions, classification_result, weight_vectors) = (single_predictions, classification_result, weight_vectors)
log.info('storing results')
np.savetxt(os.path.join(args.output_dir, 'svm-weights.csv'), weight_vectors, '%f', ';', '\n')
with utf8_file_open(os.path.join(args.output_dir, 'predictions.csv'), 'w') as pred_file:
pred_file.write(u'fold_no;doc;true_label;pred_label\n')
for sp in single_predictions:
pred_file.write(u';'.join(map(unicode, sp)) + u'\n')
all_true_labels = [sp[2] for sp in single_predictions]
all_pred_labels = [sp[3] for sp in single_predictions]
confusion = confusion_matrix(all_true_labels, all_pred_labels)
np.savetxt(os.path.join(args.output_dir, 'confusion_matrix.csv'), confusion, '%d', ';', '\n')
res = calc_metrics(all_true_labels, all_pred_labels)
classification_result[NO_OF_FOLDS, :] = np.asarray([-1] + [r for r in res])
header = u'fold_no;accuracy;precision;recall;f1'
np.savetxt(os.path.join(args.output_dir, 'metrics.csv'), classification_result, '%f', u';', u'\n', header=header)
log.info(classification_result)
log.info('finished')
|
Attentive_Convolution
|
positive
|
def addClass(self, className):
"""
addClass - append a class name to the end of the "class" attribute, if not present
@param className <str> - The name of the class to add
"""
className = stripWordsOnly(className)
if not className:
return None
if ' ' in className:
for oneClassName in className.split(' '):
<DeepExtract>
oneClassName = stripWordsOnly(oneClassName)
if not oneClassName:
return None
if ' ' in oneClassName:
for oneClassName in oneClassName.split(' '):
self.addClass(oneClassName)
return
myClassNames = self._classNames
if oneClassName in myClassNames:
return
myClassNames.append(oneClassName)
return None
</DeepExtract>
return
myClassNames = self._classNames
if className in myClassNames:
return
myClassNames.append(className)
return None
|
def addClass(self, className):
"""
addClass - append a class name to the end of the "class" attribute, if not present
@param className <str> - The name of the class to add
"""
className = stripWordsOnly(className)
if not className:
return None
if ' ' in className:
for oneClassName in className.split(' '):
oneClassName = stripWordsOnly(oneClassName)
if not oneClassName:
return None
if ' ' in oneClassName:
for oneClassName in oneClassName.split(' '):
self.addClass(oneClassName)
return
myClassNames = self._classNames
if oneClassName in myClassNames:
return
myClassNames.append(oneClassName)
return None
return
myClassNames = self._classNames
if className in myClassNames:
return
myClassNames.append(className)
return None
|
AdvancedHTMLParser
|
positive
|
def train(args, trainer, task, epoch_itr):
"""Train the model for one epoch."""
itr = epoch_itr.next_epoch_itr()
progress = progress_bar.build_progress_bar(args, itr, epoch_itr.epoch, no_progress_bar='simple')
if epoch_itr.epoch <= len(args.update_freq):
update_freq = args.update_freq[epoch_itr.epoch - 1]
else:
update_freq = args.update_freq[-1]
extra_meters = collections.defaultdict(lambda : AverageMeter())
first_valid = args.valid_subset.split(',')[0]
max_update = args.max_update or math.inf
num_batches = len(epoch_itr)
for (i, sample) in enumerate(progress, start=epoch_itr.iterations_in_epoch):
if i < num_batches - 1 and (i + 1) % update_freq > 0:
trainer.train_step(sample, update_params=False)
continue
else:
log_output = trainer.train_step(sample, update_params=True)
<DeepExtract>
stats = collections.OrderedDict()
stats['loss'] = '{:.3f}'.format(trainer.get_meter('train_loss').avg)
if trainer.get_meter('train_nll_loss').count > 0:
nll_loss = trainer.get_meter('train_nll_loss').avg
stats['nll_loss'] = '{:.3f}'.format(nll_loss)
else:
nll_loss = trainer.get_meter('train_loss').avg
stats['ppl'] = get_perplexity(nll_loss)
stats['wps'] = round(trainer.get_meter('wps').avg)
stats['ups'] = '{:.1f}'.format(trainer.get_meter('ups').avg)
stats['wpb'] = round(trainer.get_meter('wpb').avg)
stats['bsz'] = round(trainer.get_meter('bsz').avg)
stats['num_updates'] = trainer.get_num_updates()
stats['lr'] = trainer.get_lr()
stats['gnorm'] = '{:.3f}'.format(trainer.get_meter('gnorm').avg)
stats['clip'] = '{:.0%}'.format(trainer.get_meter('clip').avg)
stats['oom'] = trainer.get_meter('oom').avg
if trainer.get_meter('loss_scale') is not None:
stats['loss_scale'] = '{:.3f}'.format(trainer.get_meter('loss_scale').avg)
stats['wall'] = round(trainer.get_meter('wall').elapsed_time)
stats['train_wall'] = round(trainer.get_meter('train_wall').sum)
stats = stats
</DeepExtract>
for (k, v) in log_output.items():
if k in ['loss', 'nll_loss', 'sample_size']:
continue
if 'loss' in k:
extra_meters[k].update(v, log_output['sample_size'])
else:
extra_meters[k].update(v)
stats[k] = extra_meters[k].avg
progress.log(stats)
if i == 0:
trainer.get_meter('wps').reset()
num_updates = trainer.get_num_updates()
if args.save_interval_updates > 0 and num_updates % args.save_interval_updates == 0 and (num_updates > 0):
<DeepExtract>
valid_losses = []
for subset in [first_valid]:
itr = task.get_batch_iterator(dataset=task.dataset(subset), max_tokens=args.max_tokens, max_sentences=args.max_sentences_valid, max_positions=utils.resolve_max_positions(task.max_positions(), trainer.get_model().max_positions()), ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=8, seed=args.seed, num_shards=args.distributed_world_size, shard_id=args.distributed_rank).next_epoch_itr(shuffle=False)
progress = progress_bar.build_progress_bar(args, itr, epoch_itr.epoch, prefix="valid on '{}' subset".format(subset), no_progress_bar='simple')
for k in ['valid_loss', 'valid_nll_loss']:
meter = trainer.get_meter(k)
if meter is not None:
meter.reset()
extra_meters = collections.defaultdict(lambda : AverageMeter())
for sample in progress:
log_output = trainer.valid_step(sample)
for (k, v) in log_output.items():
if k in ['loss', 'nll_loss', 'sample_size']:
continue
extra_meters[k].update(v)
stats = get_valid_stats(trainer)
for (k, meter) in extra_meters.items():
stats[k] = meter.avg
progress.print(stats)
valid_losses.append(stats['valid_loss'])
valid_losses = valid_losses
</DeepExtract>
<DeepExtract>
if args.no_save or not distributed_utils.is_master(args):
return
epoch = epoch_itr.epoch
end_of_epoch = epoch_itr.end_of_epoch()
updates = trainer.get_num_updates()
checkpoint_conds = collections.OrderedDict()
checkpoint_conds['checkpoint{}.pt'.format(epoch)] = end_of_epoch and (not args.no_epoch_checkpoints) and (epoch % args.save_interval == 0)
checkpoint_conds['checkpoint_{}_{}.pt'.format(epoch, updates)] = not end_of_epoch and args.save_interval_updates > 0 and (updates % args.save_interval_updates == 0)
checkpoint_conds['checkpoint_best.pt'] = valid_losses[0] is not None and (not hasattr(save_checkpoint, 'best') or valid_losses[0] < save_checkpoint.best)
checkpoint_conds['checkpoint_last.pt'] = True
prev_best = getattr(save_checkpoint, 'best', valid_losses[0])
if valid_losses[0] is not None:
save_checkpoint.best = min(valid_losses[0], prev_best)
extra_state = {'best': save_checkpoint.best, 'train_iterator': epoch_itr.state_dict(), 'val_loss': valid_losses[0]}
checkpoints = [os.path.join(args.save_dir, fn) for (fn, cond) in checkpoint_conds.items() if cond]
if len(checkpoints) > 0:
for cp in checkpoints:
trainer.save_checkpoint(cp, extra_state)
if not end_of_epoch and args.keep_interval_updates > 0:
checkpoints = utils.checkpoint_paths(args.save_dir, pattern='checkpoint_\\d+_(\\d+)\\.pt')
for old_chk in checkpoints[args.keep_interval_updates:]:
os.remove(old_chk)
</DeepExtract>
if num_updates >= max_update:
break
<DeepExtract>
stats = collections.OrderedDict()
stats['loss'] = '{:.3f}'.format(trainer.get_meter('train_loss').avg)
if trainer.get_meter('train_nll_loss').count > 0:
nll_loss = trainer.get_meter('train_nll_loss').avg
stats['nll_loss'] = '{:.3f}'.format(nll_loss)
else:
nll_loss = trainer.get_meter('train_loss').avg
stats['ppl'] = get_perplexity(nll_loss)
stats['wps'] = round(trainer.get_meter('wps').avg)
stats['ups'] = '{:.1f}'.format(trainer.get_meter('ups').avg)
stats['wpb'] = round(trainer.get_meter('wpb').avg)
stats['bsz'] = round(trainer.get_meter('bsz').avg)
stats['num_updates'] = trainer.get_num_updates()
stats['lr'] = trainer.get_lr()
stats['gnorm'] = '{:.3f}'.format(trainer.get_meter('gnorm').avg)
stats['clip'] = '{:.0%}'.format(trainer.get_meter('clip').avg)
stats['oom'] = trainer.get_meter('oom').avg
if trainer.get_meter('loss_scale') is not None:
stats['loss_scale'] = '{:.3f}'.format(trainer.get_meter('loss_scale').avg)
stats['wall'] = round(trainer.get_meter('wall').elapsed_time)
stats['train_wall'] = round(trainer.get_meter('train_wall').sum)
stats = stats
</DeepExtract>
for (k, meter) in extra_meters.items():
stats[k] = meter.avg
progress.print(stats)
for k in ['train_loss', 'train_nll_loss', 'wps', 'ups', 'wpb', 'bsz', 'clip', 'gnorm']:
meter = trainer.get_meter(k)
if meter is not None:
meter.reset()
|
def train(args, trainer, task, epoch_itr):
"""Train the model for one epoch."""
itr = epoch_itr.next_epoch_itr()
progress = progress_bar.build_progress_bar(args, itr, epoch_itr.epoch, no_progress_bar='simple')
if epoch_itr.epoch <= len(args.update_freq):
update_freq = args.update_freq[epoch_itr.epoch - 1]
else:
update_freq = args.update_freq[-1]
extra_meters = collections.defaultdict(lambda : AverageMeter())
first_valid = args.valid_subset.split(',')[0]
max_update = args.max_update or math.inf
num_batches = len(epoch_itr)
for (i, sample) in enumerate(progress, start=epoch_itr.iterations_in_epoch):
if i < num_batches - 1 and (i + 1) % update_freq > 0:
trainer.train_step(sample, update_params=False)
continue
else:
log_output = trainer.train_step(sample, update_params=True)
stats = collections.OrderedDict()
stats['loss'] = '{:.3f}'.format(trainer.get_meter('train_loss').avg)
if trainer.get_meter('train_nll_loss').count > 0:
nll_loss = trainer.get_meter('train_nll_loss').avg
stats['nll_loss'] = '{:.3f}'.format(nll_loss)
else:
nll_loss = trainer.get_meter('train_loss').avg
stats['ppl'] = get_perplexity(nll_loss)
stats['wps'] = round(trainer.get_meter('wps').avg)
stats['ups'] = '{:.1f}'.format(trainer.get_meter('ups').avg)
stats['wpb'] = round(trainer.get_meter('wpb').avg)
stats['bsz'] = round(trainer.get_meter('bsz').avg)
stats['num_updates'] = trainer.get_num_updates()
stats['lr'] = trainer.get_lr()
stats['gnorm'] = '{:.3f}'.format(trainer.get_meter('gnorm').avg)
stats['clip'] = '{:.0%}'.format(trainer.get_meter('clip').avg)
stats['oom'] = trainer.get_meter('oom').avg
if trainer.get_meter('loss_scale') is not None:
stats['loss_scale'] = '{:.3f}'.format(trainer.get_meter('loss_scale').avg)
stats['wall'] = round(trainer.get_meter('wall').elapsed_time)
stats['train_wall'] = round(trainer.get_meter('train_wall').sum)
stats = stats
for (k, v) in log_output.items():
if k in ['loss', 'nll_loss', 'sample_size']:
continue
if 'loss' in k:
extra_meters[k].update(v, log_output['sample_size'])
else:
extra_meters[k].update(v)
stats[k] = extra_meters[k].avg
progress.log(stats)
if i == 0:
trainer.get_meter('wps').reset()
num_updates = trainer.get_num_updates()
if args.save_interval_updates > 0 and num_updates % args.save_interval_updates == 0 and (num_updates > 0):
valid_losses = []
for subset in [first_valid]:
itr = task.get_batch_iterator(dataset=task.dataset(subset), max_tokens=args.max_tokens, max_sentences=args.max_sentences_valid, max_positions=utils.resolve_max_positions(task.max_positions(), trainer.get_model().max_positions()), ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=8, seed=args.seed, num_shards=args.distributed_world_size, shard_id=args.distributed_rank).next_epoch_itr(shuffle=False)
progress = progress_bar.build_progress_bar(args, itr, epoch_itr.epoch, prefix="valid on '{}' subset".format(subset), no_progress_bar='simple')
for k in ['valid_loss', 'valid_nll_loss']:
meter = trainer.get_meter(k)
if meter is not None:
meter.reset()
extra_meters = collections.defaultdict(lambda : AverageMeter())
for sample in progress:
log_output = trainer.valid_step(sample)
for (k, v) in log_output.items():
if k in ['loss', 'nll_loss', 'sample_size']:
continue
extra_meters[k].update(v)
stats = get_valid_stats(trainer)
for (k, meter) in extra_meters.items():
stats[k] = meter.avg
progress.print(stats)
valid_losses.append(stats['valid_loss'])
valid_losses = valid_losses
if args.no_save or not distributed_utils.is_master(args):
return
epoch = epoch_itr.epoch
end_of_epoch = epoch_itr.end_of_epoch()
updates = trainer.get_num_updates()
checkpoint_conds = collections.OrderedDict()
checkpoint_conds['checkpoint{}.pt'.format(epoch)] = end_of_epoch and (not args.no_epoch_checkpoints) and (epoch % args.save_interval == 0)
checkpoint_conds['checkpoint_{}_{}.pt'.format(epoch, updates)] = not end_of_epoch and args.save_interval_updates > 0 and (updates % args.save_interval_updates == 0)
checkpoint_conds['checkpoint_best.pt'] = valid_losses[0] is not None and (not hasattr(save_checkpoint, 'best') or valid_losses[0] < save_checkpoint.best)
checkpoint_conds['checkpoint_last.pt'] = True
prev_best = getattr(save_checkpoint, 'best', valid_losses[0])
if valid_losses[0] is not None:
save_checkpoint.best = min(valid_losses[0], prev_best)
extra_state = {'best': save_checkpoint.best, 'train_iterator': epoch_itr.state_dict(), 'val_loss': valid_losses[0]}
checkpoints = [os.path.join(args.save_dir, fn) for (fn, cond) in checkpoint_conds.items() if cond]
if len(checkpoints) > 0:
for cp in checkpoints:
trainer.save_checkpoint(cp, extra_state)
if not end_of_epoch and args.keep_interval_updates > 0:
checkpoints = utils.checkpoint_paths(args.save_dir, pattern='checkpoint_\\d+_(\\d+)\\.pt')
for old_chk in checkpoints[args.keep_interval_updates:]:
os.remove(old_chk)
if num_updates >= max_update:
break
stats = collections.OrderedDict()
stats['loss'] = '{:.3f}'.format(trainer.get_meter('train_loss').avg)
if trainer.get_meter('train_nll_loss').count > 0:
nll_loss = trainer.get_meter('train_nll_loss').avg
stats['nll_loss'] = '{:.3f}'.format(nll_loss)
else:
nll_loss = trainer.get_meter('train_loss').avg
stats['ppl'] = get_perplexity(nll_loss)
stats['wps'] = round(trainer.get_meter('wps').avg)
stats['ups'] = '{:.1f}'.format(trainer.get_meter('ups').avg)
stats['wpb'] = round(trainer.get_meter('wpb').avg)
stats['bsz'] = round(trainer.get_meter('bsz').avg)
stats['num_updates'] = trainer.get_num_updates()
stats['lr'] = trainer.get_lr()
stats['gnorm'] = '{:.3f}'.format(trainer.get_meter('gnorm').avg)
stats['clip'] = '{:.0%}'.format(trainer.get_meter('clip').avg)
stats['oom'] = trainer.get_meter('oom').avg
if trainer.get_meter('loss_scale') is not None:
stats['loss_scale'] = '{:.3f}'.format(trainer.get_meter('loss_scale').avg)
stats['wall'] = round(trainer.get_meter('wall').elapsed_time)
stats['train_wall'] = round(trainer.get_meter('train_wall').sum)
stats = stats
for (k, meter) in extra_meters.items():
stats[k] = meter.avg
progress.print(stats)
for k in ['train_loss', 'train_nll_loss', 'wps', 'ups', 'wpb', 'bsz', 'clip', 'gnorm']:
meter = trainer.get_meter(k)
if meter is not None:
meter.reset()
|
Abstractive-Text-Summarization
|
positive
|
def __repr__(self):
<DeepExtract>
attrs = self.scraper.scrape(self)
</DeepExtract>
<DeepExtract>
cl_name = self.__class__.__name__
attrs['_TITLE_'] = format_title(cl_name, name_prefix=self.name_prefix)
</DeepExtract>
return attrs
|
def __repr__(self):
attrs = self.scraper.scrape(self)
cl_name = self.__class__.__name__
attrs['_TITLE_'] = format_title(cl_name, name_prefix=self.name_prefix)
return attrs
|
Copycat-abstractive-opinion-summarizer
|
positive
|
def test_predict_with_target(self):
series_long = self.series1
series_short = series_long[:25]
<DeepExtract>
lags = [-1, -2, -5]
ensemble_model = NaiveEnsembleModel(models=[LinearRegressionModel(lags=lags, lags_past_covariates=lags, output_chunk_length=output_chunk_length), LinearRegressionModel(lags=lags, lags_past_covariates=lags, output_chunk_length=output_chunk_length)])
</DeepExtract>
ensemble_model.fit(series_short, past_covariates=series_long)
preds = ensemble_model.predict(n=5, past_covariates=series_long)
self.assertTrue(isinstance(preds, TimeSeries))
preds = ensemble_model.predict(n=5, series=series_long, past_covariates=series_long)
self.assertTrue(isinstance(preds, TimeSeries))
preds = ensemble_model.predict(n=5, series=[series_long] * 2, past_covariates=[series_long] * 2)
self.assertTrue(isinstance(preds, list) and len(preds) == 2)
preds = ensemble_model.predict(n=5, series=[series_long], past_covariates=[series_long])
self.assertTrue(isinstance(preds, list) and len(preds) == 1)
<DeepExtract>
lags = [-1, -2, -5]
ensemble_model = NaiveEnsembleModel(models=[LinearRegressionModel(lags=lags, lags_past_covariates=lags, output_chunk_length=output_chunk_length), LinearRegressionModel(lags=lags, lags_past_covariates=lags, output_chunk_length=output_chunk_length)])
</DeepExtract>
ensemble_model.fit([series_short] * 2, past_covariates=[series_long] * 2)
with self.assertRaises(ValueError):
ensemble_model.predict(n=5, past_covariates=series_long)
preds = ensemble_model.predict(n=5, series=series_long, past_covariates=series_long)
self.assertTrue(isinstance(preds, TimeSeries))
preds = ensemble_model.predict(n=5, series=[series_long] * 2, past_covariates=[series_long] * 2)
self.assertTrue(isinstance(preds, list) and len(preds) == 2)
preds = ensemble_model.predict(n=5, series=[series_long], past_covariates=[series_long])
self.assertTrue(isinstance(preds, list) and len(preds) == 1)
|
def test_predict_with_target(self):
series_long = self.series1
series_short = series_long[:25]
lags = [-1, -2, -5]
ensemble_model = NaiveEnsembleModel(models=[LinearRegressionModel(lags=lags, lags_past_covariates=lags, output_chunk_length=output_chunk_length), LinearRegressionModel(lags=lags, lags_past_covariates=lags, output_chunk_length=output_chunk_length)])
ensemble_model.fit(series_short, past_covariates=series_long)
preds = ensemble_model.predict(n=5, past_covariates=series_long)
self.assertTrue(isinstance(preds, TimeSeries))
preds = ensemble_model.predict(n=5, series=series_long, past_covariates=series_long)
self.assertTrue(isinstance(preds, TimeSeries))
preds = ensemble_model.predict(n=5, series=[series_long] * 2, past_covariates=[series_long] * 2)
self.assertTrue(isinstance(preds, list) and len(preds) == 2)
preds = ensemble_model.predict(n=5, series=[series_long], past_covariates=[series_long])
self.assertTrue(isinstance(preds, list) and len(preds) == 1)
lags = [-1, -2, -5]
ensemble_model = NaiveEnsembleModel(models=[LinearRegressionModel(lags=lags, lags_past_covariates=lags, output_chunk_length=output_chunk_length), LinearRegressionModel(lags=lags, lags_past_covariates=lags, output_chunk_length=output_chunk_length)])
ensemble_model.fit([series_short] * 2, past_covariates=[series_long] * 2)
with self.assertRaises(ValueError):
ensemble_model.predict(n=5, past_covariates=series_long)
preds = ensemble_model.predict(n=5, series=series_long, past_covariates=series_long)
self.assertTrue(isinstance(preds, TimeSeries))
preds = ensemble_model.predict(n=5, series=[series_long] * 2, past_covariates=[series_long] * 2)
self.assertTrue(isinstance(preds, list) and len(preds) == 2)
preds = ensemble_model.predict(n=5, series=[series_long], past_covariates=[series_long])
self.assertTrue(isinstance(preds, list) and len(preds) == 1)
|
darts
|
positive
|
def first(self, or_fail=False, **kwargs):
if kwargs:
<DeepExtract>
self._query = self.query().filter_by(**kwargs)
return self
</DeepExtract>
<DeepExtract>
if int(environ.get('API_REPO_DEBUG', 0)) == 1:
logger.debug(f'first@repos.py: executing first query {self.compiled()} in {self.__class__.__name__}')
</DeepExtract>
results = self.final().first()
if or_fail:
if not results:
abort(404, f'Item not found: {self.__class__.__name__}')
self._results = results
return self
|
def first(self, or_fail=False, **kwargs):
if kwargs:
self._query = self.query().filter_by(**kwargs)
return self
if int(environ.get('API_REPO_DEBUG', 0)) == 1:
logger.debug(f'first@repos.py: executing first query {self.compiled()} in {self.__class__.__name__}')
results = self.final().first()
if or_fail:
if not results:
abort(404, f'Item not found: {self.__class__.__name__}')
self._results = results
return self
|
boucanpy
|
positive
|
def main():
<DeepExtract>
size = int(input())
tree = [Vertex(w) for w in map(int, input().split())]
for i in range(1, size):
(a, b) = list(map(int, input().split()))
tree[a - 1].children.append(b - 1)
tree[b - 1].children.append(a - 1)
tree = tree
</DeepExtract>
<DeepExtract>
size = len(tree)
if size == 0:
weight = 0
dfs(tree, 0, -1)
weight = 0
</DeepExtract>
print(weight)
|
def main():
size = int(input())
tree = [Vertex(w) for w in map(int, input().split())]
for i in range(1, size):
(a, b) = list(map(int, input().split()))
tree[a - 1].children.append(b - 1)
tree[b - 1].children.append(a - 1)
tree = tree
size = len(tree)
if size == 0:
weight = 0
dfs(tree, 0, -1)
weight = 0
print(weight)
|
Coursera_Data_Structures_and_Algorithms_Specialization
|
positive
|
def validate_dataset(self) -> None:
if not self._data_is_a_list():
raise TypeError('The dataset is not a list.')
if self.is_training_container:
data_to_validate = [data[0] for data in self.data]
else:
data_to_validate = self.data
if validate_if_any_none(string_elements=data_to_validate):
raise DataError('Some addresses data points are None value.')
if self.is_training_container:
<DeepExtract>
if not self._data_is_list_of_tuple():
raise TypeError('The pickled dataset data are not in a tuple format. Datais expected to be a list of tuples where the first element isthe address, and the second is the address tag.')
if self._empty_tags():
raise DataError('Some tags data points are empty.')
if not self._data_tags_is_same_len_then_address():
print(f'Some addresses (whitespace-split) and the tags associated with them are not the same len. If you are using a CSVDatasetContainer, consider using the tag_seperator_reformat_fn argument.Here is the report of those cases where len differ to help you out:\n{self._data_tags_not_the_same_len_diff()}')
raise DataError('Some addresses (whitespace-split) and the tags associated with them are not the same len.')
</DeepExtract>
if validate_if_any_empty(string_elements=data_to_validate):
raise DataError('Some addresses data points are empty.')
if validate_if_any_whitespace_only(string_elements=data_to_validate):
raise DataError('Some addresses only include whitespace thus cannot be parsed.')
|
def validate_dataset(self) -> None:
if not self._data_is_a_list():
raise TypeError('The dataset is not a list.')
if self.is_training_container:
data_to_validate = [data[0] for data in self.data]
else:
data_to_validate = self.data
if validate_if_any_none(string_elements=data_to_validate):
raise DataError('Some addresses data points are None value.')
if self.is_training_container:
if not self._data_is_list_of_tuple():
raise TypeError('The pickled dataset data are not in a tuple format. Datais expected to be a list of tuples where the first element isthe address, and the second is the address tag.')
if self._empty_tags():
raise DataError('Some tags data points are empty.')
if not self._data_tags_is_same_len_then_address():
print(f'Some addresses (whitespace-split) and the tags associated with them are not the same len. If you are using a CSVDatasetContainer, consider using the tag_seperator_reformat_fn argument.Here is the report of those cases where len differ to help you out:\n{self._data_tags_not_the_same_len_diff()}')
raise DataError('Some addresses (whitespace-split) and the tags associated with them are not the same len.')
if validate_if_any_empty(string_elements=data_to_validate):
raise DataError('Some addresses data points are empty.')
if validate_if_any_whitespace_only(string_elements=data_to_validate):
raise DataError('Some addresses only include whitespace thus cannot be parsed.')
|
deepparse
|
positive
|
def test_entities_converted_on_the_way_out(self):
text = '<p><<sacré bleu!>></p>'
expected = u'<p><<sacré bleu!>></p>'.encode('utf-8')
<DeepExtract>
builder = kwargs.pop('builder', self.default_builder)
soup = BeautifulSoup(text, builder=builder, **kwargs)
</DeepExtract>
self.assertEqual(soup.p.encode('utf-8'), expected)
|
def test_entities_converted_on_the_way_out(self):
text = '<p><<sacré bleu!>></p>'
expected = u'<p><<sacré bleu!>></p>'.encode('utf-8')
builder = kwargs.pop('builder', self.default_builder)
soup = BeautifulSoup(text, builder=builder, **kwargs)
self.assertEqual(soup.p.encode('utf-8'), expected)
|
alfred_pandoctor
|
positive
|
def UpdateEntityFromJsonDict(entity, json_dict):
"""Update a db or ndb Model entity from a dict.
Only attributes that are present in both the entity and the
json_dict will be updated in the entity.
Args:
entity: the model entity to get the values from (db.Model or ndb.Model).
json_dict: a dict containing key/values to be updated in the entity.
"""
if hasattr(entity, 'ExtractFromJsonDict'):
entity.ExtractFromJsonDict(json_dict)
for (key, value) in json_dict.items():
if key not in ('id', 'created', 'key', 'last_updated'):
if hasattr(entity, key):
<DeepExtract>
if hasattr(entity, 'properties'):
prop_type = entity.properties().get(key, None).__class__.__name__
else:
entity_properties = entity._properties
prop_type = entity_properties.get(key, None).__class__.__name__
</DeepExtract>
if prop_type == 'IntegerProperty':
if value:
if isinstance(value, collections.Iterable) and (not isinstance(value, basestring)):
value = map(int, value)
else:
value = int(value)
elif prop_type == 'DateTimeProperty':
if value:
if isinstance(value, collections.Iterable) and (not isinstance(value, basestring)):
value = map(dateutil.parser.parse, value)
else:
value = dateutil.parser.parse(value)
setattr(entity, key, value)
|
def UpdateEntityFromJsonDict(entity, json_dict):
"""Update a db or ndb Model entity from a dict.
Only attributes that are present in both the entity and the
json_dict will be updated in the entity.
Args:
entity: the model entity to get the values from (db.Model or ndb.Model).
json_dict: a dict containing key/values to be updated in the entity.
"""
if hasattr(entity, 'ExtractFromJsonDict'):
entity.ExtractFromJsonDict(json_dict)
for (key, value) in json_dict.items():
if key not in ('id', 'created', 'key', 'last_updated'):
if hasattr(entity, key):
if hasattr(entity, 'properties'):
prop_type = entity.properties().get(key, None).__class__.__name__
else:
entity_properties = entity._properties
prop_type = entity_properties.get(key, None).__class__.__name__
if prop_type == 'IntegerProperty':
if value:
if isinstance(value, collections.Iterable) and (not isinstance(value, basestring)):
value = map(int, value)
else:
value = int(value)
elif prop_type == 'DateTimeProperty':
if value:
if isinstance(value, collections.Iterable) and (not isinstance(value, basestring)):
value = map(dateutil.parser.parse, value)
else:
value = dateutil.parser.parse(value)
setattr(entity, key, value)
|
Data-Pipeline
|
positive
|
def run_iperf(self, client, target=None, opts=''):
if target is None:
<DeepExtract>
target = '192.168.0.1'
</DeepExtract>
client.sendline('iperf %s -c %s %s | grep -v SUM' % (self.client_opts(), target, opts))
client.expect('Client connecting to')
|
def run_iperf(self, client, target=None, opts=''):
if target is None:
target = '192.168.0.1'
client.sendline('iperf %s -c %s %s | grep -v SUM' % (self.client_opts(), target, opts))
client.expect('Client connecting to')
|
boardfarm
|
positive
|
def crop(self, box):
"""
Cropss a rectangular region from this bounding box. The box is a
4-tuple defining the left, upper, right, and lower pixel
coordinate.
"""
<DeepExtract>
if self.mode == 'xyxy':
(xmin, ymin, xmax, ymax) = self.bbox.split(1, dim=-1)
(xmin, ymin, xmax, ymax) = (xmin, ymin, xmax, ymax)
elif self.mode == 'xywh':
TO_REMOVE = 1
(xmin, ymin, w, h) = self.bbox.split(1, dim=-1)
(xmin, ymin, xmax, ymax) = (xmin, ymin, xmin + (w - TO_REMOVE).clamp(min=0), ymin + (h - TO_REMOVE).clamp(min=0))
else:
raise RuntimeError('Should not be here')
</DeepExtract>
(w, h) = (box[2] - box[0], box[3] - box[1])
cropped_xmin = (xmin - box[0]).clamp(min=0, max=w)
cropped_ymin = (ymin - box[1]).clamp(min=0, max=h)
cropped_xmax = (xmax - box[0]).clamp(min=0, max=w)
cropped_ymax = (ymax - box[1]).clamp(min=0, max=h)
if False:
is_empty = (cropped_xmin == cropped_xmax) | (cropped_ymin == cropped_ymax)
cropped_box = torch.cat((cropped_xmin, cropped_ymin, cropped_xmax, cropped_ymax), dim=-1)
bbox = BoxList(cropped_box, (w, h), mode='xyxy')
for (k, v) in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.crop(box)
bbox.add_field(k, v)
return bbox.convert(self.mode)
|
def crop(self, box):
"""
Cropss a rectangular region from this bounding box. The box is a
4-tuple defining the left, upper, right, and lower pixel
coordinate.
"""
if self.mode == 'xyxy':
(xmin, ymin, xmax, ymax) = self.bbox.split(1, dim=-1)
(xmin, ymin, xmax, ymax) = (xmin, ymin, xmax, ymax)
elif self.mode == 'xywh':
TO_REMOVE = 1
(xmin, ymin, w, h) = self.bbox.split(1, dim=-1)
(xmin, ymin, xmax, ymax) = (xmin, ymin, xmin + (w - TO_REMOVE).clamp(min=0), ymin + (h - TO_REMOVE).clamp(min=0))
else:
raise RuntimeError('Should not be here')
(w, h) = (box[2] - box[0], box[3] - box[1])
cropped_xmin = (xmin - box[0]).clamp(min=0, max=w)
cropped_ymin = (ymin - box[1]).clamp(min=0, max=h)
cropped_xmax = (xmax - box[0]).clamp(min=0, max=w)
cropped_ymax = (ymax - box[1]).clamp(min=0, max=h)
if False:
is_empty = (cropped_xmin == cropped_xmax) | (cropped_ymin == cropped_ymax)
cropped_box = torch.cat((cropped_xmin, cropped_ymin, cropped_xmax, cropped_ymax), dim=-1)
bbox = BoxList(cropped_box, (w, h), mode='xyxy')
for (k, v) in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.crop(box)
bbox.add_field(k, v)
return bbox.convert(self.mode)
|
EveryPixelMatters
|
positive
|
def run(self):
<DeepExtract>
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = "Versioneer was unable to run the project root directory. Versioneer requires setup.py to be executed from its immediate directory (like 'python setup.py COMMAND'), or in a way that lets it use sys.argv[0] to find the root (like 'python path/to/setup.py COMMAND')."
raise VersioneerBadRootError(err)
try:
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print('Warning: build in %s is using versioneer.py from %s' % (os.path.dirname(me), versioneer_py))
except NameError:
pass
root = root
</DeepExtract>
<DeepExtract>
setup_cfg = os.path.join(root, 'setup.cfg')
parser = configparser.SafeConfigParser()
with open(setup_cfg, 'r') as f:
parser.readfp(f)
VCS = parser.get('versioneer', 'VCS')
def get(parser, name):
if parser.has_option('versioneer', name):
cfg = parser.get('versioneer', name)
cfg = None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, 'style') or ''
cfg.versionfile_source = get(parser, 'versionfile_source')
cfg.versionfile_build = get(parser, 'versionfile_build')
cfg.tag_prefix = get(parser, 'tag_prefix')
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ''
cfg.parentdir_prefix = get(parser, 'parentdir_prefix')
cfg.verbose = get(parser, 'verbose')
cfg = cfg
</DeepExtract>
<DeepExtract>
if 'versioneer' in sys.modules:
del sys.modules['versioneer']
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, 'please set [versioneer]VCS= in setup.cfg'
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, 'please set versioneer.versionfile_source'
assert cfg.tag_prefix is not None, 'please set versioneer.tag_prefix'
versionfile_abs = os.path.join(root, cfg.versionfile_source)
get_keywords_f = handlers.get('get_keywords')
from_keywords_f = handlers.get('keywords')
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print('got version from expanded keyword %s' % ver)
versions = ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print('got version from file %s %s' % (versionfile_abs, ver))
versions = ver
except NotThisMethod:
pass
from_vcs_f = handlers.get('pieces_from_vcs')
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print('got version from VCS %s' % ver)
versions = ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print('got version from parentdir %s' % ver)
versions = ver
except NotThisMethod:
pass
if verbose:
print('unable to compute version')
versions = {'version': '0+unknown', 'full-revisionid': None, 'dirty': None, 'error': 'unable to compute version', 'date': None}
</DeepExtract>
_build_py.run(self)
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build)
print('UPDATING %s' % target_versionfile)
<DeepExtract>
os.unlink(target_versionfile)
contents = json.dumps(versions, sort_keys=True, indent=1, separators=(',', ': '))
with open(target_versionfile, 'w') as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (target_versionfile, versions['version']))
</DeepExtract>
|
def run(self):
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = "Versioneer was unable to run the project root directory. Versioneer requires setup.py to be executed from its immediate directory (like 'python setup.py COMMAND'), or in a way that lets it use sys.argv[0] to find the root (like 'python path/to/setup.py COMMAND')."
raise VersioneerBadRootError(err)
try:
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print('Warning: build in %s is using versioneer.py from %s' % (os.path.dirname(me), versioneer_py))
except NameError:
pass
root = root
setup_cfg = os.path.join(root, 'setup.cfg')
parser = configparser.SafeConfigParser()
with open(setup_cfg, 'r') as f:
parser.readfp(f)
VCS = parser.get('versioneer', 'VCS')
def get(parser, name):
if parser.has_option('versioneer', name):
cfg = parser.get('versioneer', name)
cfg = None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, 'style') or ''
cfg.versionfile_source = get(parser, 'versionfile_source')
cfg.versionfile_build = get(parser, 'versionfile_build')
cfg.tag_prefix = get(parser, 'tag_prefix')
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ''
cfg.parentdir_prefix = get(parser, 'parentdir_prefix')
cfg.verbose = get(parser, 'verbose')
cfg = cfg
if 'versioneer' in sys.modules:
del sys.modules['versioneer']
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, 'please set [versioneer]VCS= in setup.cfg'
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, 'please set versioneer.versionfile_source'
assert cfg.tag_prefix is not None, 'please set versioneer.tag_prefix'
versionfile_abs = os.path.join(root, cfg.versionfile_source)
get_keywords_f = handlers.get('get_keywords')
from_keywords_f = handlers.get('keywords')
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print('got version from expanded keyword %s' % ver)
versions = ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print('got version from file %s %s' % (versionfile_abs, ver))
versions = ver
except NotThisMethod:
pass
from_vcs_f = handlers.get('pieces_from_vcs')
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print('got version from VCS %s' % ver)
versions = ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print('got version from parentdir %s' % ver)
versions = ver
except NotThisMethod:
pass
if verbose:
print('unable to compute version')
versions = {'version': '0+unknown', 'full-revisionid': None, 'dirty': None, 'error': 'unable to compute version', 'date': None}
_build_py.run(self)
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build)
print('UPDATING %s' % target_versionfile)
os.unlink(target_versionfile)
contents = json.dumps(versions, sort_keys=True, indent=1, separators=(',', ': '))
with open(target_versionfile, 'w') as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (target_versionfile, versions['version']))
</DeepExtract>
|
Ensembler
|
positive
|
def get_url_path(self):
if hasattr(self.get_url, 'dont_recurse'):
raise NotImplemented
try:
<DeepExtract>
if hasattr(self.get_url_path, 'dont_recurse'):
raise NotImplemented
try:
path = self.get_url_path()
except NotImplemented:
raise
website_url = getattr(settings, 'DEFAULT_WEBSITE_URL', 'http://127.0.0.1:8000')
url = website_url + path
</DeepExtract>
except NotImplemented:
raise
bits = urlparse.urlparse(url)
return urlparse.urlunparse(('', '') + bits[2:])
|
def get_url_path(self):
if hasattr(self.get_url, 'dont_recurse'):
raise NotImplemented
try:
if hasattr(self.get_url_path, 'dont_recurse'):
raise NotImplemented
try:
path = self.get_url_path()
except NotImplemented:
raise
website_url = getattr(settings, 'DEFAULT_WEBSITE_URL', 'http://127.0.0.1:8000')
url = website_url + path
except NotImplemented:
raise
bits = urlparse.urlparse(url)
return urlparse.urlunparse(('', '') + bits[2:])
|
Django-Web-Development-with-Python
|
positive
|
@pyqtSlot()
def call_dialog(self):
<DeepExtract>
presets = config.table_presets
for key in presets[HeaderEditWidget.default]:
if key not in presets[current] and (not header_dict[key][0]):
header_dict.pop(key)
presets[current] = header_dict
</DeepExtract>
self.save = False
HeaderEditDialog(self.source, self.config).exec()
|
@pyqtSlot()
def call_dialog(self):
presets = config.table_presets
for key in presets[HeaderEditWidget.default]:
if key not in presets[current] and (not header_dict[key][0]):
header_dict.pop(key)
presets[current] = header_dict
self.save = False
HeaderEditDialog(self.source, self.config).exec()
|
clever-show
|
positive
|
def read(system, file):
"""
Read PSS/E RAW file v32/v33 formats.
"""
blocks = ['bus', 'load', 'fshunt', 'gen', 'branch', 'transf', 'area', 'twotermdc', 'vscdc', 'impedcorr', 'mtdc', 'msline', 'zone', 'interarea', 'owner', 'facts', 'swshunt', 'gne', 'Q']
rawd = re.compile('rawd\\d\\d')
ret = True
block_idx = 0
mva = 100
raw = {}
for item in blocks:
raw[item] = []
data = []
mdata = []
dev_line = 0
line_list = andes.io.read_file_like(file)
for (num, line) in enumerate(line_list):
line = line.strip()
if num == 0:
data = line.split('/')[0]
data = data.split(',')
mva = float(data[1])
system.config.mva = mva
try:
system.config.freq = float(data[5])
except IndexError:
logger.warning('System frequency is set to 60 Hz.\nConsider using a higher version PSS/E raw file.')
system.config.freq = 60.0
version = 0
if len(data) >= 3:
version = int(data[2])
elif rawd.search(line):
version = int(rawd.search(line).group(0).strip('rawd'))
continue
elif num == 1 or num == 2:
if len(line) > 0:
logger.info(' ' + line)
continue
elif num >= 3:
if line[0:2] == '0 ' or line[0:3] == ' 0 ':
block_idx += 1
continue
elif line[0] == 'Q':
break
data = line.split(',')
data = [to_number(item) for item in data]
mdata.append(data)
dev_line += 1
<DeepExtract>
line_counts = [1, 1, 1, 1, 1, 4, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0]
if block_idx == 5:
if mdata[0][2] == 0:
block_lines = 4
else:
block_lines = 5
block_lines = line_counts[block_idx]
</DeepExtract>
if dev_line >= block_lines:
if block_lines == 1:
mdata = mdata[0]
raw[blocks[block_idx]].append(mdata)
mdata = []
dev_line = 0
<DeepExtract>
out = defaultdict(list)
bus_idx_list = list()
sw = dict()
for data in raw['bus']:
idx = data[0]
bus_idx_list.append(idx)
ty = data[3]
a0 = data[8] * deg2rad
if ty == 3:
sw[idx] = a0
param = {'idx': idx, 'name': data[1], 'Vn': data[2], 'v0': data[7], 'a0': a0, 'area': data[4], 'zone': data[5], 'owner': data[6]}
out['Bus'].append(param)
_add_devices_from_dict(out, system)
(bus_params, bus_idx_list, sw) = (out, bus_idx_list, sw)
</DeepExtract>
max_bus = max(bus_idx_list)
<DeepExtract>
mva = system.config.mva
out = defaultdict(list)
for data in raw['load']:
bus = data[0]
vn = system.Bus.get(src='Vn', idx=bus, attr='v')
v0 = system.Bus.get(src='v0', idx=bus, attr='v')
param = {'bus': bus, 'u': data[2], 'Vn': vn, 'p0': (data[5] + data[7] * v0 + data[9] * v0 ** 2) / mva, 'q0': (data[6] + data[8] * v0 - data[10] * v0 ** 2) / mva, 'owner': data[11]}
out['PQ'].append(param)
_add_devices_from_dict(out, system)
return out
</DeepExtract>
<DeepExtract>
mva = system.config.mva
out = defaultdict(list)
for data in raw['fshunt']:
bus = data[0]
vn = system.Bus.get(src='Vn', idx=bus, attr='v')
param = {'bus': bus, 'Vn': vn, 'u': data[2], 'Sn': mva, 'g': data[3] / mva, 'b': data[4] / mva}
out['Shunt'].append(param)
_add_devices_from_dict(out, system)
return out
</DeepExtract>
<DeepExtract>
mva = system.config.mva
out = defaultdict(list)
gen_idx = 0
for data in raw['gen']:
bus = data[0]
subidx = data[1]
vn = system.Bus.get(src='Vn', idx=bus, attr='v')
gen_mva = data[8]
gen_idx += 1
status = data[14]
param = {'Sn': gen_mva, 'Vn': vn, 'u': status, 'bus': bus, 'subidx': subidx, 'idx': gen_idx, 'p0': data[2] / mva, 'q0': data[3] / mva, 'pmax': data[16] / mva, 'pmin': data[17] / mva, 'qmax': data[4] / mva, 'qmin': data[5] / mva, 'v0': data[6], 'ra': data[9], 'xs': data[10]}
if data[0] in sw.keys():
param.update({'a0': sw[data[0]]})
out['Slack'].append(param)
else:
out['PV'].append(param)
_add_devices_from_dict(out, system)
return out
</DeepExtract>
<DeepExtract>
out = defaultdict(list)
for data in raw['branch']:
param = {'bus1': data[0], 'bus2': data[1], 'r': data[3], 'x': data[4], 'b': data[5], 'rate_a': data[6], 'rate_b': data[7], 'rate_c': data[8], 'Vn1': system.Bus.get(src='Vn', idx=data[0], attr='v'), 'Vn2': system.Bus.get(src='Vn', idx=data[1], attr='v')}
out['Line'].append(param)
_add_devices_from_dict(out, system)
return out
</DeepExtract>
<DeepExtract>
out = defaultdict(list)
xf_3_count = 1
for data in raw['transf']:
if len(data) == 4:
Sn = system.config.mva
bus_Vn1 = system.Bus.get(src='Vn', idx=data[0][0], attr='v')
bus_Vn2 = system.Bus.get(src='Vn', idx=data[0][1], attr='v')
Vn1 = data[2][1] if data[2][1] != 0.0 else bus_Vn1
Vn2 = data[3][1] if data[3][1] != 0.0 else bus_Vn2
transf = True
tap = data[2][0]
phi = data[2][2] * deg2rad
rate_a = data[2][3]
rate_b = data[2][4]
rate_c = data[2][5]
if data[0][4] == 2:
tap = data[2][0] / bus_Vn1 / (data[3][0] / bus_Vn2)
elif data[0][4] == 3:
tap = tap * (Vn1 / bus_Vn1) / (Vn2 / bus_Vn2)
if data[0][5] == 1:
Sn = system.config.mva
elif data[0][5] == 2:
Sn = data[1][2]
else:
logger.warning('Impedance code 3 not implemented')
if data[0][6] == 2:
logger.warning('Admittance code 2 not implemented')
param = {'bus1': data[0][0], 'bus2': data[0][1], 'u': data[0][11], 'b': data[0][8], 'r': data[1][0], 'x': data[1][1], 'trans': transf, 'tap': tap, 'phi': phi, 'Sn': Sn, 'Vn1': Vn1, 'Vn2': Vn2, 'rate_a': rate_a, 'rate_b': rate_b, 'rate_c': rate_c}
out['Line'].append(param)
else:
new_bus = data[0][2] + 1
if new_bus in system.Bus.idx.v:
new_bus = max_bus + xf_3_count
logger.warning('Added bus <%s> for 3-winding transformer <%s-%s-%s>', new_bus, data[0][0], data[0][1], data[0][2])
param = {'idx': new_bus, 'name': '_'.join([str(i) for i in data[0][:3]]), 'Vn': 1.0, 'v0': data[1][-2], 'a0': data[1][-1] * deg2rad}
out['Bus'].append(param)
r = []
x = []
r.append((data[1][0] + data[1][6] - data[1][3]) / 2)
r.append((data[1][3] + data[1][0] - data[1][6]) / 2)
r.append((data[1][6] + data[1][3] - data[1][0]) / 2)
x.append((data[1][1] + data[1][7] - data[1][4]) / 2)
x.append((data[1][4] + data[1][1] - data[1][7]) / 2)
x.append((data[1][7] + data[1][4] - data[1][1]) / 2)
for i in range(0, 3):
param = {'trans': True, 'bus1': data[0][i], 'bus2': new_bus, 'u': data[0][11], 'b': data[0][8], 'r': r[i], 'x': x[i], 'tap': data[2 + i][0], 'phi': data[2 + i][2] * deg2rad, 'Vn1': system.Bus.get(src='Vn', idx=data[0][i], attr='v'), 'Vn2': 1.0}
out['Line'].append(param)
xf_3_count += 1
_add_devices_from_dict(out, system)
return (out, xf_3_count)
</DeepExtract>
<DeepExtract>
out = defaultdict(list)
mva = system.config.mva
for data in raw['swshunt']:
bus = data[0]
vn = system.Bus.get(src='Vn', idx=bus, attr='v')
param = {'bus': bus, 'Vn': vn, 'Sn': mva, 'u': data[3], 'b': data[9] / mva}
out['Shunt'].append(param)
_add_devices_from_dict(out, system)
return out
</DeepExtract>
<DeepExtract>
out = defaultdict(list)
for data in raw['area']:
param = {'idx': data[0], 'name': data[4]}
out['Area'].append(param)
for data in raw['zone']:
param = {'idx': data[0], 'name': data[1]}
_add_devices_from_dict(out, system)
return out
</DeepExtract>
return ret
|
def read(system, file):
"""
Read PSS/E RAW file v32/v33 formats.
"""
blocks = ['bus', 'load', 'fshunt', 'gen', 'branch', 'transf', 'area', 'twotermdc', 'vscdc', 'impedcorr', 'mtdc', 'msline', 'zone', 'interarea', 'owner', 'facts', 'swshunt', 'gne', 'Q']
rawd = re.compile('rawd\\d\\d')
ret = True
block_idx = 0
mva = 100
raw = {}
for item in blocks:
raw[item] = []
data = []
mdata = []
dev_line = 0
line_list = andes.io.read_file_like(file)
for (num, line) in enumerate(line_list):
line = line.strip()
if num == 0:
data = line.split('/')[0]
data = data.split(',')
mva = float(data[1])
system.config.mva = mva
try:
system.config.freq = float(data[5])
except IndexError:
logger.warning('System frequency is set to 60 Hz.\nConsider using a higher version PSS/E raw file.')
system.config.freq = 60.0
version = 0
if len(data) >= 3:
version = int(data[2])
elif rawd.search(line):
version = int(rawd.search(line).group(0).strip('rawd'))
continue
elif num == 1 or num == 2:
if len(line) > 0:
logger.info(' ' + line)
continue
elif num >= 3:
if line[0:2] == '0 ' or line[0:3] == ' 0 ':
block_idx += 1
continue
elif line[0] == 'Q':
break
data = line.split(',')
data = [to_number(item) for item in data]
mdata.append(data)
dev_line += 1
line_counts = [1, 1, 1, 1, 1, 4, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0]
if block_idx == 5:
if mdata[0][2] == 0:
block_lines = 4
else:
block_lines = 5
block_lines = line_counts[block_idx]
if dev_line >= block_lines:
if block_lines == 1:
mdata = mdata[0]
raw[blocks[block_idx]].append(mdata)
mdata = []
dev_line = 0
out = defaultdict(list)
bus_idx_list = list()
sw = dict()
for data in raw['bus']:
idx = data[0]
bus_idx_list.append(idx)
ty = data[3]
a0 = data[8] * deg2rad
if ty == 3:
sw[idx] = a0
param = {'idx': idx, 'name': data[1], 'Vn': data[2], 'v0': data[7], 'a0': a0, 'area': data[4], 'zone': data[5], 'owner': data[6]}
out['Bus'].append(param)
_add_devices_from_dict(out, system)
(bus_params, bus_idx_list, sw) = (out, bus_idx_list, sw)
max_bus = max(bus_idx_list)
mva = system.config.mva
out = defaultdict(list)
for data in raw['load']:
bus = data[0]
vn = system.Bus.get(src='Vn', idx=bus, attr='v')
v0 = system.Bus.get(src='v0', idx=bus, attr='v')
param = {'bus': bus, 'u': data[2], 'Vn': vn, 'p0': (data[5] + data[7] * v0 + data[9] * v0 ** 2) / mva, 'q0': (data[6] + data[8] * v0 - data[10] * v0 ** 2) / mva, 'owner': data[11]}
out['PQ'].append(param)
_add_devices_from_dict(out, system)
return out
mva = system.config.mva
out = defaultdict(list)
for data in raw['fshunt']:
bus = data[0]
vn = system.Bus.get(src='Vn', idx=bus, attr='v')
param = {'bus': bus, 'Vn': vn, 'u': data[2], 'Sn': mva, 'g': data[3] / mva, 'b': data[4] / mva}
out['Shunt'].append(param)
_add_devices_from_dict(out, system)
return out
mva = system.config.mva
out = defaultdict(list)
gen_idx = 0
for data in raw['gen']:
bus = data[0]
subidx = data[1]
vn = system.Bus.get(src='Vn', idx=bus, attr='v')
gen_mva = data[8]
gen_idx += 1
status = data[14]
param = {'Sn': gen_mva, 'Vn': vn, 'u': status, 'bus': bus, 'subidx': subidx, 'idx': gen_idx, 'p0': data[2] / mva, 'q0': data[3] / mva, 'pmax': data[16] / mva, 'pmin': data[17] / mva, 'qmax': data[4] / mva, 'qmin': data[5] / mva, 'v0': data[6], 'ra': data[9], 'xs': data[10]}
if data[0] in sw.keys():
param.update({'a0': sw[data[0]]})
out['Slack'].append(param)
else:
out['PV'].append(param)
_add_devices_from_dict(out, system)
return out
out = defaultdict(list)
for data in raw['branch']:
param = {'bus1': data[0], 'bus2': data[1], 'r': data[3], 'x': data[4], 'b': data[5], 'rate_a': data[6], 'rate_b': data[7], 'rate_c': data[8], 'Vn1': system.Bus.get(src='Vn', idx=data[0], attr='v'), 'Vn2': system.Bus.get(src='Vn', idx=data[1], attr='v')}
out['Line'].append(param)
_add_devices_from_dict(out, system)
return out
out = defaultdict(list)
xf_3_count = 1
for data in raw['transf']:
if len(data) == 4:
Sn = system.config.mva
bus_Vn1 = system.Bus.get(src='Vn', idx=data[0][0], attr='v')
bus_Vn2 = system.Bus.get(src='Vn', idx=data[0][1], attr='v')
Vn1 = data[2][1] if data[2][1] != 0.0 else bus_Vn1
Vn2 = data[3][1] if data[3][1] != 0.0 else bus_Vn2
transf = True
tap = data[2][0]
phi = data[2][2] * deg2rad
rate_a = data[2][3]
rate_b = data[2][4]
rate_c = data[2][5]
if data[0][4] == 2:
tap = data[2][0] / bus_Vn1 / (data[3][0] / bus_Vn2)
elif data[0][4] == 3:
tap = tap * (Vn1 / bus_Vn1) / (Vn2 / bus_Vn2)
if data[0][5] == 1:
Sn = system.config.mva
elif data[0][5] == 2:
Sn = data[1][2]
else:
logger.warning('Impedance code 3 not implemented')
if data[0][6] == 2:
logger.warning('Admittance code 2 not implemented')
param = {'bus1': data[0][0], 'bus2': data[0][1], 'u': data[0][11], 'b': data[0][8], 'r': data[1][0], 'x': data[1][1], 'trans': transf, 'tap': tap, 'phi': phi, 'Sn': Sn, 'Vn1': Vn1, 'Vn2': Vn2, 'rate_a': rate_a, 'rate_b': rate_b, 'rate_c': rate_c}
out['Line'].append(param)
else:
new_bus = data[0][2] + 1
if new_bus in system.Bus.idx.v:
new_bus = max_bus + xf_3_count
logger.warning('Added bus <%s> for 3-winding transformer <%s-%s-%s>', new_bus, data[0][0], data[0][1], data[0][2])
param = {'idx': new_bus, 'name': '_'.join([str(i) for i in data[0][:3]]), 'Vn': 1.0, 'v0': data[1][-2], 'a0': data[1][-1] * deg2rad}
out['Bus'].append(param)
r = []
x = []
r.append((data[1][0] + data[1][6] - data[1][3]) / 2)
r.append((data[1][3] + data[1][0] - data[1][6]) / 2)
r.append((data[1][6] + data[1][3] - data[1][0]) / 2)
x.append((data[1][1] + data[1][7] - data[1][4]) / 2)
x.append((data[1][4] + data[1][1] - data[1][7]) / 2)
x.append((data[1][7] + data[1][4] - data[1][1]) / 2)
for i in range(0, 3):
param = {'trans': True, 'bus1': data[0][i], 'bus2': new_bus, 'u': data[0][11], 'b': data[0][8], 'r': r[i], 'x': x[i], 'tap': data[2 + i][0], 'phi': data[2 + i][2] * deg2rad, 'Vn1': system.Bus.get(src='Vn', idx=data[0][i], attr='v'), 'Vn2': 1.0}
out['Line'].append(param)
xf_3_count += 1
_add_devices_from_dict(out, system)
return (out, xf_3_count)
out = defaultdict(list)
mva = system.config.mva
for data in raw['swshunt']:
bus = data[0]
vn = system.Bus.get(src='Vn', idx=bus, attr='v')
param = {'bus': bus, 'Vn': vn, 'Sn': mva, 'u': data[3], 'b': data[9] / mva}
out['Shunt'].append(param)
_add_devices_from_dict(out, system)
return out
out = defaultdict(list)
for data in raw['area']:
param = {'idx': data[0], 'name': data[4]}
out['Area'].append(param)
for data in raw['zone']:
param = {'idx': data[0], 'name': data[1]}
_add_devices_from_dict(out, system)
return out
return ret
|
andes
|
positive
|
def __call__(self, *args):
with errors.SetUserErrorContext(cli=self, pname=args[0]):
<DeepExtract>
ba = self.signature.read_arguments(args[1:], args[0])
(func, post, posargs, kwargs) = ba
name = ' '.join([args[0]] + post)
(func, name, posargs, kwargs) = (func or self.func, name, posargs, kwargs)
</DeepExtract>
return func(*posargs, **kwargs)
|
def __call__(self, *args):
with errors.SetUserErrorContext(cli=self, pname=args[0]):
ba = self.signature.read_arguments(args[1:], args[0])
(func, post, posargs, kwargs) = ba
name = ' '.join([args[0]] + post)
(func, name, posargs, kwargs) = (func or self.func, name, posargs, kwargs)
return func(*posargs, **kwargs)
|
clize
|
positive
|
def test_key_isnt_unicode_bydefault(self):
<DeepExtract>
canary = []
def decorate(fn):
canary.append(util.function_key_generator('mynamespace', fn, **kw))
(decorate, canary) = fn
(decorate, canary) = (decorate, canary)
</DeepExtract>
@decorate
def one(a, b):
pass
gen = canary[0]
assert isinstance(gen('foo'), str)
|
def test_key_isnt_unicode_bydefault(self):
canary = []
def decorate(fn):
canary.append(util.function_key_generator('mynamespace', fn, **kw))
(decorate, canary) = fn
(decorate, canary) = (decorate, canary)
@decorate
def one(a, b):
pass
gen = canary[0]
assert isinstance(gen('foo'), str)
|
dogpile.cache
|
positive
|
def advice(parent_object, *args, **kw):
request = parent_object.request
audit_manager = request.data_managers['audit_manager']
if 'scheme_id' not in request.matchdict.keys():
log.error('Misuse of the audit decorator. The url must at least contain a {scheme_id} parameter')
return fn(parent_object, *args, **kw)
provider = request.skos_registry.get_provider(request.matchdict['scheme_id'])
if not provider or 'external' in provider.get_metadata()['subject']:
return fn(parent_object, *args, **kw)
else:
if 'c_id' in request.matchdict.keys():
visit_log = ConceptVisitLog(concept_id=request.matchdict['c_id'], conceptscheme_id=request.matchdict['scheme_id'])
else:
visit_log = ConceptschemeVisitLog(conceptscheme_id=request.matchdict['scheme_id'])
response = fn(parent_object, *args, **kw)
if isinstance(response, Response):
<DeepExtract>
if response.content_type == 'application/rdf+xml' or response.content_type == 'application/ld+json' or response.content_type == 'text/turtle' or (response.content_type == 'application/x-turtle'):
visit_log.origin = 'RDF'
else:
visit_log.origin = None
</DeepExtract>
else:
<DeepExtract>
if '.csv' in request.url:
visit_log.origin = 'CSV'
elif 'text/html' in request.accept:
visit_log.origin = 'HTML'
elif 'application/json' in request.accept:
visit_log.origin = 'REST'
elif 'application/ld+json' in request.accept:
visit_log.origin = 'RDF'
else:
visit_log.origin = None
</DeepExtract>
audit_manager.save(visit_log)
return response
|
def advice(parent_object, *args, **kw):
request = parent_object.request
audit_manager = request.data_managers['audit_manager']
if 'scheme_id' not in request.matchdict.keys():
log.error('Misuse of the audit decorator. The url must at least contain a {scheme_id} parameter')
return fn(parent_object, *args, **kw)
provider = request.skos_registry.get_provider(request.matchdict['scheme_id'])
if not provider or 'external' in provider.get_metadata()['subject']:
return fn(parent_object, *args, **kw)
else:
if 'c_id' in request.matchdict.keys():
visit_log = ConceptVisitLog(concept_id=request.matchdict['c_id'], conceptscheme_id=request.matchdict['scheme_id'])
else:
visit_log = ConceptschemeVisitLog(conceptscheme_id=request.matchdict['scheme_id'])
response = fn(parent_object, *args, **kw)
if isinstance(response, Response):
if response.content_type == 'application/rdf+xml' or response.content_type == 'application/ld+json' or response.content_type == 'text/turtle' or (response.content_type == 'application/x-turtle'):
visit_log.origin = 'RDF'
else:
visit_log.origin = None
else:
if '.csv' in request.url:
visit_log.origin = 'CSV'
elif 'text/html' in request.accept:
visit_log.origin = 'HTML'
elif 'application/json' in request.accept:
visit_log.origin = 'REST'
elif 'application/ld+json' in request.accept:
visit_log.origin = 'RDF'
else:
visit_log.origin = None
audit_manager.save(visit_log)
return response
|
atramhasis
|
positive
|
def FindCheckMacro(line):
"""Find a replaceable CHECK-like macro.
Args:
line: line to search on.
Returns:
(macro name, start position), or (None, -1) if no replaceable
macro is found.
"""
for macro in _CHECK_MACROS:
i = line.find(macro)
if i >= 0:
<DeepExtract>
if '^(.*\\b' + macro + '\\s*)\\(' not in _regexp_compile_cache:
_regexp_compile_cache['^(.*\\b' + macro + '\\s*)\\('] = sre_compile.compile('^(.*\\b' + macro + '\\s*)\\(')
matched = _regexp_compile_cache['^(.*\\b' + macro + '\\s*)\\('].match(line)
</DeepExtract>
if not matched:
continue
return (macro, len(matched.group(1)))
return (None, -1)
|
def FindCheckMacro(line):
"""Find a replaceable CHECK-like macro.
Args:
line: line to search on.
Returns:
(macro name, start position), or (None, -1) if no replaceable
macro is found.
"""
for macro in _CHECK_MACROS:
i = line.find(macro)
if i >= 0:
if '^(.*\\b' + macro + '\\s*)\\(' not in _regexp_compile_cache:
_regexp_compile_cache['^(.*\\b' + macro + '\\s*)\\('] = sre_compile.compile('^(.*\\b' + macro + '\\s*)\\(')
matched = _regexp_compile_cache['^(.*\\b' + macro + '\\s*)\\('].match(line)
if not matched:
continue
return (macro, len(matched.group(1)))
return (None, -1)
|
cowry
|
positive
|
def _compute_loss(self, batch, output_decoder, output_lm, target):
<DeepExtract>
bottled_output_decoder = output_decoder.view(-1, output_decoder.size(2))
</DeepExtract>
<DeepExtract>
bottled_output_lm = output_lm.view(-1, output_lm.size(2))
</DeepExtract>
scores = self.generator(bottled_output_decoder, bottled_output_lm)
gtruth = target.view(-1)
loss = self.criterion(scores, gtruth)
<DeepExtract>
pred = scores.max(1)[1]
non_padding = gtruth.ne(self.padding_idx)
num_correct = pred.eq(gtruth).masked_select(non_padding).sum().item()
num_non_padding = non_padding.sum().item()
stats = onmt.utils.Statistics(loss.clone().item(), num_non_padding, num_correct)
</DeepExtract>
return (loss, stats)
|
def _compute_loss(self, batch, output_decoder, output_lm, target):
bottled_output_decoder = output_decoder.view(-1, output_decoder.size(2))
bottled_output_lm = output_lm.view(-1, output_lm.size(2))
scores = self.generator(bottled_output_decoder, bottled_output_lm)
gtruth = target.view(-1)
loss = self.criterion(scores, gtruth)
pred = scores.max(1)[1]
non_padding = gtruth.ne(self.padding_idx)
num_correct = pred.eq(gtruth).masked_select(non_padding).sum().item()
num_non_padding = non_padding.sum().item()
stats = onmt.utils.Statistics(loss.clone().item(), num_non_padding, num_correct)
return (loss, stats)
|
encoder-agnostic-adaptation
|
positive
|
def test_tanh(self):
<DeepExtract>
self = Mpfr_t()
mpfr_init2(self, 53)
x = self
</DeepExtract>
<DeepExtract>
self = Mpfr_t()
mpfr_init2(self, 53)
y = self
</DeepExtract>
mpfr_set_d(x, 7.3, MPFR_RNDN)
mpfr_tanh(y, x, MPFR_RNDN)
self.assertEqual(mpfr_get_d(y, MPFR_RNDN), 0.999999087295143)
|
def test_tanh(self):
self = Mpfr_t()
mpfr_init2(self, 53)
x = self
self = Mpfr_t()
mpfr_init2(self, 53)
y = self
mpfr_set_d(x, 7.3, MPFR_RNDN)
mpfr_tanh(y, x, MPFR_RNDN)
self.assertEqual(mpfr_get_d(y, MPFR_RNDN), 0.999999087295143)
|
bigfloat
|
positive
|
def main(_):
if FLAGS.input_dir[-1] != '/':
FLAGS.input_dir = FLAGS.input_dir + '/'
if FLAGS.output_dir[-1] != '/':
FLAGS.output_dir = FLAGS.output_dir + '/'
tf.logging.set_verbosity(tf.logging.INFO)
tokenizer = tokenization.FullTokenizer(vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
rng = random.Random(FLAGS.random_seed)
for (dex, fname) in enumerate(os.listdir(FLAGS.input_dir)):
input_files = tf.gfile.Glob(FLAGS.input_dir + fname)
tf.logging.info('*** Reading from input files ***')
for input_file in input_files:
tf.logging.info(' %s', input_file)
<DeepExtract>
all_documents = [[]]
for input_file in input_files:
with tf.gfile.GFile(input_file, 'r') as reader:
while True:
line = tokenization.convert_to_unicode(reader.readline())
if not line:
break
line = line.strip()
if not line:
all_documents.append([])
tokens = tokenizer.tokenize(line)
if tokens:
all_documents[-1].append(tokens)
all_documents = [x for x in all_documents if x]
rng.shuffle(all_documents)
vocab_words = list(tokenizer.vocab.keys())
instances = []
for _ in range(FLAGS.dupe_factor):
for document_index in range(len(all_documents)):
instances.extend(create_instances_from_document(all_documents, document_index, FLAGS.max_seq_length, FLAGS.short_seq_prob, FLAGS.masked_lm_prob, FLAGS.max_predictions_per_seq, vocab_words, rng))
rng.shuffle(instances)
instances = instances
</DeepExtract>
output_files = [FLAGS.output_dir + FLAGS.output_base_name + '_' + str(dex) + '.tfrecord']
tf.logging.info('*** Writing to output files ***')
for output_file in output_files:
tf.logging.info(' %s', output_file)
<DeepExtract>
writers = []
for output_file in output_files:
writers.append(tf.python_io.TFRecordWriter(output_file))
writer_index = 0
total_written = 0
for (inst_index, instance) in enumerate(instances):
input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)
input_mask = [1] * len(input_ids)
segment_ids = list(instance.segment_ids)
assert len(input_ids) <= FLAGS.max_seq_length
while len(input_ids) < FLAGS.max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == FLAGS.max_seq_length
assert len(input_mask) == FLAGS.max_seq_length
assert len(segment_ids) == FLAGS.max_seq_length
masked_lm_positions = list(instance.masked_lm_positions)
masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)
masked_lm_weights = [1.0] * len(masked_lm_ids)
while len(masked_lm_positions) < FLAGS.max_predictions_per_seq:
masked_lm_positions.append(0)
masked_lm_ids.append(0)
masked_lm_weights.append(0.0)
next_sentence_label = 1 if instance.is_random_next else 0
features = collections.OrderedDict()
features['input_ids'] = create_int_feature(input_ids)
features['input_mask'] = create_int_feature(input_mask)
features['segment_ids'] = create_int_feature(segment_ids)
features['masked_lm_positions'] = create_int_feature(masked_lm_positions)
features['masked_lm_ids'] = create_int_feature(masked_lm_ids)
features['masked_lm_weights'] = create_float_feature(masked_lm_weights)
features['next_sentence_labels'] = create_int_feature([next_sentence_label])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writers[writer_index].write(tf_example.SerializeToString())
writer_index = (writer_index + 1) % len(writers)
total_written += 1
if inst_index < 20:
tf.logging.info('*** Example ***')
tf.logging.info('tokens: %s' % ' '.join([tokenization.printable_text(x) for x in instance.tokens]))
for feature_name in features.keys():
feature = features[feature_name]
values = []
if feature.int64_list.value:
values = feature.int64_list.value
elif feature.float_list.value:
values = feature.float_list.value
tf.logging.info('%s: %s' % (feature_name, ' '.join([str(x) for x in values])))
for writer in writers:
writer.close()
tf.logging.info('Wrote %d total instances', total_written)
</DeepExtract>
|
def main(_):
if FLAGS.input_dir[-1] != '/':
FLAGS.input_dir = FLAGS.input_dir + '/'
if FLAGS.output_dir[-1] != '/':
FLAGS.output_dir = FLAGS.output_dir + '/'
tf.logging.set_verbosity(tf.logging.INFO)
tokenizer = tokenization.FullTokenizer(vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
rng = random.Random(FLAGS.random_seed)
for (dex, fname) in enumerate(os.listdir(FLAGS.input_dir)):
input_files = tf.gfile.Glob(FLAGS.input_dir + fname)
tf.logging.info('*** Reading from input files ***')
for input_file in input_files:
tf.logging.info(' %s', input_file)
all_documents = [[]]
for input_file in input_files:
with tf.gfile.GFile(input_file, 'r') as reader:
while True:
line = tokenization.convert_to_unicode(reader.readline())
if not line:
break
line = line.strip()
if not line:
all_documents.append([])
tokens = tokenizer.tokenize(line)
if tokens:
all_documents[-1].append(tokens)
all_documents = [x for x in all_documents if x]
rng.shuffle(all_documents)
vocab_words = list(tokenizer.vocab.keys())
instances = []
for _ in range(FLAGS.dupe_factor):
for document_index in range(len(all_documents)):
instances.extend(create_instances_from_document(all_documents, document_index, FLAGS.max_seq_length, FLAGS.short_seq_prob, FLAGS.masked_lm_prob, FLAGS.max_predictions_per_seq, vocab_words, rng))
rng.shuffle(instances)
instances = instances
output_files = [FLAGS.output_dir + FLAGS.output_base_name + '_' + str(dex) + '.tfrecord']
tf.logging.info('*** Writing to output files ***')
for output_file in output_files:
tf.logging.info(' %s', output_file)
writers = []
for output_file in output_files:
writers.append(tf.python_io.TFRecordWriter(output_file))
writer_index = 0
total_written = 0
for (inst_index, instance) in enumerate(instances):
input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)
input_mask = [1] * len(input_ids)
segment_ids = list(instance.segment_ids)
assert len(input_ids) <= FLAGS.max_seq_length
while len(input_ids) < FLAGS.max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == FLAGS.max_seq_length
assert len(input_mask) == FLAGS.max_seq_length
assert len(segment_ids) == FLAGS.max_seq_length
masked_lm_positions = list(instance.masked_lm_positions)
masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)
masked_lm_weights = [1.0] * len(masked_lm_ids)
while len(masked_lm_positions) < FLAGS.max_predictions_per_seq:
masked_lm_positions.append(0)
masked_lm_ids.append(0)
masked_lm_weights.append(0.0)
next_sentence_label = 1 if instance.is_random_next else 0
features = collections.OrderedDict()
features['input_ids'] = create_int_feature(input_ids)
features['input_mask'] = create_int_feature(input_mask)
features['segment_ids'] = create_int_feature(segment_ids)
features['masked_lm_positions'] = create_int_feature(masked_lm_positions)
features['masked_lm_ids'] = create_int_feature(masked_lm_ids)
features['masked_lm_weights'] = create_float_feature(masked_lm_weights)
features['next_sentence_labels'] = create_int_feature([next_sentence_label])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writers[writer_index].write(tf_example.SerializeToString())
writer_index = (writer_index + 1) % len(writers)
total_written += 1
if inst_index < 20:
tf.logging.info('*** Example ***')
tf.logging.info('tokens: %s' % ' '.join([tokenization.printable_text(x) for x in instance.tokens]))
for feature_name in features.keys():
feature = features[feature_name]
values = []
if feature.int64_list.value:
values = feature.int64_list.value
elif feature.float_list.value:
values = feature.float_list.value
tf.logging.info('%s: %s' % (feature_name, ' '.join([str(x) for x in values])))
for writer in writers:
writer.close()
tf.logging.info('Wrote %d total instances', total_written)
</DeepExtract>
|
DistillBERT
|
positive
|
def showreply(self) -> None:
ms = 40 + random.random() * 10
self.write('64 bytes from {} ({}): icmp_seq={} ttl=50 time={:.1f} ms\n'.format(self.host, self.ip, self.count + 1, ms))
self.count += 1
if self.count == self.max:
self.running = False
self.write('\n')
<DeepExtract>
self.write(f'--- {self.host} ping statistics ---\n')
self.write('%d packets transmitted, %d received, 0%% packet loss, time 907ms\n' % (self.count, self.count))
self.write('rtt min/avg/max/mdev = 48.264/50.352/52.441/2.100 ms\n')
</DeepExtract>
self.exit()
else:
self.scheduled = reactor.callLater(1, self.showreply)
|
def showreply(self) -> None:
ms = 40 + random.random() * 10
self.write('64 bytes from {} ({}): icmp_seq={} ttl=50 time={:.1f} ms\n'.format(self.host, self.ip, self.count + 1, ms))
self.count += 1
if self.count == self.max:
self.running = False
self.write('\n')
self.write(f'--- {self.host} ping statistics ---\n')
self.write('%d packets transmitted, %d received, 0%% packet loss, time 907ms\n' % (self.count, self.count))
self.write('rtt min/avg/max/mdev = 48.264/50.352/52.441/2.100 ms\n')
self.exit()
else:
self.scheduled = reactor.callLater(1, self.showreply)
|
cowrie
|
positive
|
def main(_):
seed = 0
(in_dev_accs, dev_accs, test_accs) = ([], [], [])
for seed in range(3):
FLAGS.seed = seed
np.random.seed(seed)
if FLAGS.simple:
FLAGS.lmbda = 0
<DeepExtract>
SEED = FLAGS.seed
np.random.seed(SEED)
emb_dim = FLAGS.emb_dim
num_domains = max([np.max(train[2]), np.max(dev[2]), np.max(test[2])]) + 1
tf.reset_default_graph()
with tf.Graph().as_default():
tf.set_random_seed(SEED)
ph_lr = tf.placeholder(tf.float32, name='learning_rate', shape=[])
(images_placeholder, labels_placeholder, domain_placeholder) = placeholder_inputs(FLAGS.batch_size)
LN = tf.keras.layers.LayerNormalization(axis=1)
with tf.variable_scope(''):
reprs = lipitk.get_reprs(images_placeholder, image_size=IMAGE_SIZE, network=NETWORK, is_training=True)
reprs = LN(reprs)
(logits1, logits2, reg_loss, common_var, specialized_common_wt, _e) = lipitk.inference_bottleneckv2(reprs, domain_placeholder, num_domains=num_domains, emb_dim=emb_dim, num_classes=NUM_CLASSES)
with tf.variable_scope('', reuse=True):
reprs = lipitk.get_reprs(images_placeholder, image_size=IMAGE_SIZE, network=NETWORK, is_training=False)
reprs = LN(reprs)
(_, logits_for_eval, _, _, _, _) = lipitk.inference_bottleneckv2(reprs, domain_placeholder, num_domains=num_domains, emb_dim=emb_dim, num_classes=NUM_CLASSES)
loss1 = lipitk.loss(logits1, labels_placeholder, num_classes=NUM_CLASSES)
loss2 = lipitk.loss(logits2, labels_placeholder, num_classes=NUM_CLASSES)
loss = FLAGS.lmbda * loss1 + loss2
if FLAGS.lmbda > 0:
loss = FLAGS.lmbda * loss1 + (1 - FLAGS.lmbda) * loss2
loss += FLAGS.alpha * reg_loss
if FLAGS.lmbda > 1:
loss /= FLAGS.lmbda
global_step = tf.contrib.framework.get_or_create_global_step()
increment_global_step = tf.assign(global_step, global_step + 1)
train_op = lipitk.training(loss, ph_lr)
eval_correct = lipitk.evaluation(logits_for_eval, labels_placeholder)
init = tf.global_variables_initializer()
_vars = tf.global_variables()
saver = tf.train.Saver(var_list=_vars, max_to_keep=20)
sess = tf.Session(config=config)
sess.run(init)
(best_acc, best_test) = (-1, -1)
nepochs = FLAGS.nepochs
nsteps = nepochs * len(train[0]) // FLAGS.batch_size
nsteps_per_epoch = len(train[0]) / FLAGS.batch_size
start_lr = FLAGS.learning_rate
for step in tqdm.tqdm(xrange(nsteps)):
start_time = time.time()
feed_dict = fill_feed_dict(train, images_placeholder, labels_placeholder, domain_placeholder)
np_lr = start_lr
feed_dict[ph_lr] = np_lr
(_, np_loss, _) = sess.run([train_op, loss, increment_global_step], feed_dict=feed_dict)
all_losses = sess.run([loss1, loss2, reg_loss], feed_dict=feed_dict)
if (step + 1) % 1000 == 0 or step + 1 == nsteps:
print('Loss: ', np_loss)
print('Losses: ', all_losses)
print('Common wt: ', sess.run(common_var))
print('Specialized common wt: ', sess.run(specialized_common_wt))
print('Emb matrix: ', sess.run(_e)[:5])
eprint('Training Data Eval:')
do_eval(sess, eval_correct, images_placeholder, labels_placeholder, domain_placeholder, train)
(in_dev_acc, _) = do_eval(sess, eval_correct, images_placeholder, labels_placeholder, domain_placeholder, in_dev)
(dev_acc, _) = do_eval(sess, eval_correct, images_placeholder, labels_placeholder, domain_placeholder, dev)
(test_acc, _) = do_eval(sess, eval_correct, images_placeholder, labels_placeholder, domain_placeholder, test)
if dev_acc >= best_acc:
best_acc = dev_acc
best_test = test_acc
checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
saver.save(sess, checkpoint_file, global_step=step)
print('Best in-domain dev acc: %f test: %f' % (best_acc, best_test))
(in_dev_acc, dev_acc, test_acc) = (in_dev_acc, best_acc, best_test)
</DeepExtract>
elif FLAGS.cg:
<DeepExtract>
SEED = FLAGS.seed
np.random.seed(SEED)
emb_dim = FLAGS.emb_dim
num_domains = np.max(train[2]) + 1
tf.reset_default_graph()
with tf.Graph().as_default():
tf.set_random_seed(SEED)
(images_placeholder, labels_placeholder, domain_placeholder) = placeholder_inputs(FLAGS.batch_size)
ph_lr = tf.placeholder(tf.float32, name='learning_rate')
if not cgpp:
cg_fn = lipitk.cg
else:
cg_fn = lipitk.cgpp
with tf.variable_scope(''):
(loss, _, debug_print) = cg_fn(images_placeholder, labels_placeholder, domain_placeholder, image_size=IMAGE_SIZE, is_training=True, network=NETWORK, num_classes=NUM_CLASSES, num_domains=num_domains, FLAGS=FLAGS)
with tf.variable_scope('', reuse=True):
(_, logits_for_eval, __) = cg_fn(images_placeholder, labels_placeholder, domain_placeholder, image_size=IMAGE_SIZE, is_training=False, network=NETWORK, num_classes=NUM_CLASSES, num_domains=num_domains, FLAGS=FLAGS)
train_op = lipitk.training(loss, ph_lr)
eval_correct = lipitk.evaluation(logits_for_eval, labels_placeholder)
init = tf.global_variables_initializer()
_vars = tf.all_variables()
saver = tf.train.Saver(var_list=_vars, max_to_keep=20)
sess = tf.Session(config=config)
sess.run(init)
(best_acc, best_test) = (-1, -1)
nepochs = FLAGS.nepochs
nsteps = nepochs * len(train[0]) // FLAGS.batch_size
for step in tqdm.tqdm(xrange(nsteps)):
start_time = time.time()
feed_dict = fill_feed_dict(train, images_placeholder, labels_placeholder, domain_placeholder)
lr = FLAGS.learning_rate
feed_dict[ph_lr] = lr
(_, np_loss) = sess.run([train_op, loss], feed_dict=feed_dict)
if (step + 1) % 1000 == 0 or step + 1 == nsteps:
print('Loss: ', np_loss)
if debug_print is not None:
np_dp = sess.run(debug_print, feed_dict=feed_dict)
print('****Debug:****')
print(np_dp)
eprint('Training Data Eval:')
do_eval(sess, eval_correct, images_placeholder, labels_placeholder, domain_placeholder, train)
(in_dev_acc, _) = do_eval(sess, eval_correct, images_placeholder, labels_placeholder, domain_placeholder, in_dev)
(dev_acc, _) = do_eval(sess, eval_correct, images_placeholder, labels_placeholder, domain_placeholder, dev)
(test_acc, _) = do_eval(sess, eval_correct, images_placeholder, labels_placeholder, domain_placeholder, test)
if dev_acc >= best_acc:
best_acc = dev_acc
best_test = test_acc
checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
saver.save(sess, checkpoint_file, global_step=step)
print('Best in-domain ind dev acc: %f dev acc: %f test: %f' % (in_dev_acc, best_acc, best_test))
(in_dev_acc, dev_acc, test_acc) = (in_dev_acc, best_acc, best_test)
</DeepExtract>
else:
<DeepExtract>
SEED = FLAGS.seed
np.random.seed(SEED)
emb_dim = FLAGS.emb_dim
num_domains = max([np.max(train[2]), np.max(dev[2]), np.max(test[2])]) + 1
tf.reset_default_graph()
with tf.Graph().as_default():
tf.set_random_seed(SEED)
ph_lr = tf.placeholder(tf.float32, name='learning_rate', shape=[])
(images_placeholder, labels_placeholder, domain_placeholder) = placeholder_inputs(FLAGS.batch_size)
LN = tf.keras.layers.LayerNormalization(axis=1)
with tf.variable_scope(''):
reprs = lipitk.get_reprs(images_placeholder, image_size=IMAGE_SIZE, network=NETWORK, is_training=True)
reprs = LN(reprs)
(logits1, logits2, reg_loss, common_var, specialized_common_wt, _e) = lipitk.inference_bottleneckv2(reprs, domain_placeholder, num_domains=num_domains, emb_dim=emb_dim, num_classes=NUM_CLASSES)
with tf.variable_scope('', reuse=True):
reprs = lipitk.get_reprs(images_placeholder, image_size=IMAGE_SIZE, network=NETWORK, is_training=False)
reprs = LN(reprs)
(_, logits_for_eval, _, _, _, _) = lipitk.inference_bottleneckv2(reprs, domain_placeholder, num_domains=num_domains, emb_dim=emb_dim, num_classes=NUM_CLASSES)
loss1 = lipitk.loss(logits1, labels_placeholder, num_classes=NUM_CLASSES)
loss2 = lipitk.loss(logits2, labels_placeholder, num_classes=NUM_CLASSES)
loss = FLAGS.lmbda * loss1 + loss2
if FLAGS.lmbda > 0:
loss = FLAGS.lmbda * loss1 + (1 - FLAGS.lmbda) * loss2
loss += FLAGS.alpha * reg_loss
if FLAGS.lmbda > 1:
loss /= FLAGS.lmbda
global_step = tf.contrib.framework.get_or_create_global_step()
increment_global_step = tf.assign(global_step, global_step + 1)
train_op = lipitk.training(loss, ph_lr)
eval_correct = lipitk.evaluation(logits_for_eval, labels_placeholder)
init = tf.global_variables_initializer()
_vars = tf.global_variables()
saver = tf.train.Saver(var_list=_vars, max_to_keep=20)
sess = tf.Session(config=config)
sess.run(init)
(best_acc, best_test) = (-1, -1)
nepochs = FLAGS.nepochs
nsteps = nepochs * len(train[0]) // FLAGS.batch_size
nsteps_per_epoch = len(train[0]) / FLAGS.batch_size
start_lr = FLAGS.learning_rate
for step in tqdm.tqdm(xrange(nsteps)):
start_time = time.time()
feed_dict = fill_feed_dict(train, images_placeholder, labels_placeholder, domain_placeholder)
np_lr = start_lr
feed_dict[ph_lr] = np_lr
(_, np_loss, _) = sess.run([train_op, loss, increment_global_step], feed_dict=feed_dict)
all_losses = sess.run([loss1, loss2, reg_loss], feed_dict=feed_dict)
if (step + 1) % 1000 == 0 or step + 1 == nsteps:
print('Loss: ', np_loss)
print('Losses: ', all_losses)
print('Common wt: ', sess.run(common_var))
print('Specialized common wt: ', sess.run(specialized_common_wt))
print('Emb matrix: ', sess.run(_e)[:5])
eprint('Training Data Eval:')
do_eval(sess, eval_correct, images_placeholder, labels_placeholder, domain_placeholder, train)
(in_dev_acc, _) = do_eval(sess, eval_correct, images_placeholder, labels_placeholder, domain_placeholder, in_dev)
(dev_acc, _) = do_eval(sess, eval_correct, images_placeholder, labels_placeholder, domain_placeholder, dev)
(test_acc, _) = do_eval(sess, eval_correct, images_placeholder, labels_placeholder, domain_placeholder, test)
if dev_acc >= best_acc:
best_acc = dev_acc
best_test = test_acc
checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
saver.save(sess, checkpoint_file, global_step=step)
print('Best in-domain dev acc: %f test: %f' % (best_acc, best_test))
(in_dev_acc, dev_acc, test_acc) = (in_dev_acc, best_acc, best_test)
</DeepExtract>
in_dev_accs.append(in_dev_acc)
dev_accs.append(dev_acc)
test_accs.append(test_acc)
print('InD Val, Val, test acc: %0.4f (%0.4f), %0.4f (%0.4f), %0.4f (%0.4f)' % (np.mean(in_dev_accs), np.std(in_dev_accs), np.mean(dev_accs), np.std(dev_accs), np.mean(test_accs), np.std(test_accs)))
|
def main(_):
seed = 0
(in_dev_accs, dev_accs, test_accs) = ([], [], [])
for seed in range(3):
FLAGS.seed = seed
np.random.seed(seed)
if FLAGS.simple:
FLAGS.lmbda = 0
SEED = FLAGS.seed
np.random.seed(SEED)
emb_dim = FLAGS.emb_dim
num_domains = max([np.max(train[2]), np.max(dev[2]), np.max(test[2])]) + 1
tf.reset_default_graph()
with tf.Graph().as_default():
tf.set_random_seed(SEED)
ph_lr = tf.placeholder(tf.float32, name='learning_rate', shape=[])
(images_placeholder, labels_placeholder, domain_placeholder) = placeholder_inputs(FLAGS.batch_size)
LN = tf.keras.layers.LayerNormalization(axis=1)
with tf.variable_scope(''):
reprs = lipitk.get_reprs(images_placeholder, image_size=IMAGE_SIZE, network=NETWORK, is_training=True)
reprs = LN(reprs)
(logits1, logits2, reg_loss, common_var, specialized_common_wt, _e) = lipitk.inference_bottleneckv2(reprs, domain_placeholder, num_domains=num_domains, emb_dim=emb_dim, num_classes=NUM_CLASSES)
with tf.variable_scope('', reuse=True):
reprs = lipitk.get_reprs(images_placeholder, image_size=IMAGE_SIZE, network=NETWORK, is_training=False)
reprs = LN(reprs)
(_, logits_for_eval, _, _, _, _) = lipitk.inference_bottleneckv2(reprs, domain_placeholder, num_domains=num_domains, emb_dim=emb_dim, num_classes=NUM_CLASSES)
loss1 = lipitk.loss(logits1, labels_placeholder, num_classes=NUM_CLASSES)
loss2 = lipitk.loss(logits2, labels_placeholder, num_classes=NUM_CLASSES)
loss = FLAGS.lmbda * loss1 + loss2
if FLAGS.lmbda > 0:
loss = FLAGS.lmbda * loss1 + (1 - FLAGS.lmbda) * loss2
loss += FLAGS.alpha * reg_loss
if FLAGS.lmbda > 1:
loss /= FLAGS.lmbda
global_step = tf.contrib.framework.get_or_create_global_step()
increment_global_step = tf.assign(global_step, global_step + 1)
train_op = lipitk.training(loss, ph_lr)
eval_correct = lipitk.evaluation(logits_for_eval, labels_placeholder)
init = tf.global_variables_initializer()
_vars = tf.global_variables()
saver = tf.train.Saver(var_list=_vars, max_to_keep=20)
sess = tf.Session(config=config)
sess.run(init)
(best_acc, best_test) = (-1, -1)
nepochs = FLAGS.nepochs
nsteps = nepochs * len(train[0]) // FLAGS.batch_size
nsteps_per_epoch = len(train[0]) / FLAGS.batch_size
start_lr = FLAGS.learning_rate
for step in tqdm.tqdm(xrange(nsteps)):
start_time = time.time()
feed_dict = fill_feed_dict(train, images_placeholder, labels_placeholder, domain_placeholder)
np_lr = start_lr
feed_dict[ph_lr] = np_lr
(_, np_loss, _) = sess.run([train_op, loss, increment_global_step], feed_dict=feed_dict)
all_losses = sess.run([loss1, loss2, reg_loss], feed_dict=feed_dict)
if (step + 1) % 1000 == 0 or step + 1 == nsteps:
print('Loss: ', np_loss)
print('Losses: ', all_losses)
print('Common wt: ', sess.run(common_var))
print('Specialized common wt: ', sess.run(specialized_common_wt))
print('Emb matrix: ', sess.run(_e)[:5])
eprint('Training Data Eval:')
do_eval(sess, eval_correct, images_placeholder, labels_placeholder, domain_placeholder, train)
(in_dev_acc, _) = do_eval(sess, eval_correct, images_placeholder, labels_placeholder, domain_placeholder, in_dev)
(dev_acc, _) = do_eval(sess, eval_correct, images_placeholder, labels_placeholder, domain_placeholder, dev)
(test_acc, _) = do_eval(sess, eval_correct, images_placeholder, labels_placeholder, domain_placeholder, test)
if dev_acc >= best_acc:
best_acc = dev_acc
best_test = test_acc
checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
saver.save(sess, checkpoint_file, global_step=step)
print('Best in-domain dev acc: %f test: %f' % (best_acc, best_test))
(in_dev_acc, dev_acc, test_acc) = (in_dev_acc, best_acc, best_test)
elif FLAGS.cg:
SEED = FLAGS.seed
np.random.seed(SEED)
emb_dim = FLAGS.emb_dim
num_domains = np.max(train[2]) + 1
tf.reset_default_graph()
with tf.Graph().as_default():
tf.set_random_seed(SEED)
(images_placeholder, labels_placeholder, domain_placeholder) = placeholder_inputs(FLAGS.batch_size)
ph_lr = tf.placeholder(tf.float32, name='learning_rate')
if not cgpp:
cg_fn = lipitk.cg
else:
cg_fn = lipitk.cgpp
with tf.variable_scope(''):
(loss, _, debug_print) = cg_fn(images_placeholder, labels_placeholder, domain_placeholder, image_size=IMAGE_SIZE, is_training=True, network=NETWORK, num_classes=NUM_CLASSES, num_domains=num_domains, FLAGS=FLAGS)
with tf.variable_scope('', reuse=True):
(_, logits_for_eval, __) = cg_fn(images_placeholder, labels_placeholder, domain_placeholder, image_size=IMAGE_SIZE, is_training=False, network=NETWORK, num_classes=NUM_CLASSES, num_domains=num_domains, FLAGS=FLAGS)
train_op = lipitk.training(loss, ph_lr)
eval_correct = lipitk.evaluation(logits_for_eval, labels_placeholder)
init = tf.global_variables_initializer()
_vars = tf.all_variables()
saver = tf.train.Saver(var_list=_vars, max_to_keep=20)
sess = tf.Session(config=config)
sess.run(init)
(best_acc, best_test) = (-1, -1)
nepochs = FLAGS.nepochs
nsteps = nepochs * len(train[0]) // FLAGS.batch_size
for step in tqdm.tqdm(xrange(nsteps)):
start_time = time.time()
feed_dict = fill_feed_dict(train, images_placeholder, labels_placeholder, domain_placeholder)
lr = FLAGS.learning_rate
feed_dict[ph_lr] = lr
(_, np_loss) = sess.run([train_op, loss], feed_dict=feed_dict)
if (step + 1) % 1000 == 0 or step + 1 == nsteps:
print('Loss: ', np_loss)
if debug_print is not None:
np_dp = sess.run(debug_print, feed_dict=feed_dict)
print('****Debug:****')
print(np_dp)
eprint('Training Data Eval:')
do_eval(sess, eval_correct, images_placeholder, labels_placeholder, domain_placeholder, train)
(in_dev_acc, _) = do_eval(sess, eval_correct, images_placeholder, labels_placeholder, domain_placeholder, in_dev)
(dev_acc, _) = do_eval(sess, eval_correct, images_placeholder, labels_placeholder, domain_placeholder, dev)
(test_acc, _) = do_eval(sess, eval_correct, images_placeholder, labels_placeholder, domain_placeholder, test)
if dev_acc >= best_acc:
best_acc = dev_acc
best_test = test_acc
checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
saver.save(sess, checkpoint_file, global_step=step)
print('Best in-domain ind dev acc: %f dev acc: %f test: %f' % (in_dev_acc, best_acc, best_test))
(in_dev_acc, dev_acc, test_acc) = (in_dev_acc, best_acc, best_test)
else:
SEED = FLAGS.seed
np.random.seed(SEED)
emb_dim = FLAGS.emb_dim
num_domains = max([np.max(train[2]), np.max(dev[2]), np.max(test[2])]) + 1
tf.reset_default_graph()
with tf.Graph().as_default():
tf.set_random_seed(SEED)
ph_lr = tf.placeholder(tf.float32, name='learning_rate', shape=[])
(images_placeholder, labels_placeholder, domain_placeholder) = placeholder_inputs(FLAGS.batch_size)
LN = tf.keras.layers.LayerNormalization(axis=1)
with tf.variable_scope(''):
reprs = lipitk.get_reprs(images_placeholder, image_size=IMAGE_SIZE, network=NETWORK, is_training=True)
reprs = LN(reprs)
(logits1, logits2, reg_loss, common_var, specialized_common_wt, _e) = lipitk.inference_bottleneckv2(reprs, domain_placeholder, num_domains=num_domains, emb_dim=emb_dim, num_classes=NUM_CLASSES)
with tf.variable_scope('', reuse=True):
reprs = lipitk.get_reprs(images_placeholder, image_size=IMAGE_SIZE, network=NETWORK, is_training=False)
reprs = LN(reprs)
(_, logits_for_eval, _, _, _, _) = lipitk.inference_bottleneckv2(reprs, domain_placeholder, num_domains=num_domains, emb_dim=emb_dim, num_classes=NUM_CLASSES)
loss1 = lipitk.loss(logits1, labels_placeholder, num_classes=NUM_CLASSES)
loss2 = lipitk.loss(logits2, labels_placeholder, num_classes=NUM_CLASSES)
loss = FLAGS.lmbda * loss1 + loss2
if FLAGS.lmbda > 0:
loss = FLAGS.lmbda * loss1 + (1 - FLAGS.lmbda) * loss2
loss += FLAGS.alpha * reg_loss
if FLAGS.lmbda > 1:
loss /= FLAGS.lmbda
global_step = tf.contrib.framework.get_or_create_global_step()
increment_global_step = tf.assign(global_step, global_step + 1)
train_op = lipitk.training(loss, ph_lr)
eval_correct = lipitk.evaluation(logits_for_eval, labels_placeholder)
init = tf.global_variables_initializer()
_vars = tf.global_variables()
saver = tf.train.Saver(var_list=_vars, max_to_keep=20)
sess = tf.Session(config=config)
sess.run(init)
(best_acc, best_test) = (-1, -1)
nepochs = FLAGS.nepochs
nsteps = nepochs * len(train[0]) // FLAGS.batch_size
nsteps_per_epoch = len(train[0]) / FLAGS.batch_size
start_lr = FLAGS.learning_rate
for step in tqdm.tqdm(xrange(nsteps)):
start_time = time.time()
feed_dict = fill_feed_dict(train, images_placeholder, labels_placeholder, domain_placeholder)
np_lr = start_lr
feed_dict[ph_lr] = np_lr
(_, np_loss, _) = sess.run([train_op, loss, increment_global_step], feed_dict=feed_dict)
all_losses = sess.run([loss1, loss2, reg_loss], feed_dict=feed_dict)
if (step + 1) % 1000 == 0 or step + 1 == nsteps:
print('Loss: ', np_loss)
print('Losses: ', all_losses)
print('Common wt: ', sess.run(common_var))
print('Specialized common wt: ', sess.run(specialized_common_wt))
print('Emb matrix: ', sess.run(_e)[:5])
eprint('Training Data Eval:')
do_eval(sess, eval_correct, images_placeholder, labels_placeholder, domain_placeholder, train)
(in_dev_acc, _) = do_eval(sess, eval_correct, images_placeholder, labels_placeholder, domain_placeholder, in_dev)
(dev_acc, _) = do_eval(sess, eval_correct, images_placeholder, labels_placeholder, domain_placeholder, dev)
(test_acc, _) = do_eval(sess, eval_correct, images_placeholder, labels_placeholder, domain_placeholder, test)
if dev_acc >= best_acc:
best_acc = dev_acc
best_test = test_acc
checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
saver.save(sess, checkpoint_file, global_step=step)
print('Best in-domain dev acc: %f test: %f' % (best_acc, best_test))
(in_dev_acc, dev_acc, test_acc) = (in_dev_acc, best_acc, best_test)
in_dev_accs.append(in_dev_acc)
dev_accs.append(dev_acc)
test_accs.append(test_acc)
print('InD Val, Val, test acc: %0.4f (%0.4f), %0.4f (%0.4f), %0.4f (%0.4f)' % (np.mean(in_dev_accs), np.std(in_dev_accs), np.mean(dev_accs), np.std(dev_accs), np.mean(test_accs), np.std(test_accs)))
|
CSD
|
positive
|
def get_avg_ans_prob(data):
<DeepExtract>
paired = {}
for (keys, ex) in data.items():
keys = keys.lower().split('|')
spair = (keys[2], keys[3])
tid = keys[4]
acluster = keys[5]
opair = (keys[6], keys[7])
assert spair[0] != spair[1]
key = (tuple(sorted([spair[0], spair[1]])), tid, acluster, opair[0], opair[1])
if key not in paired:
paired[key] = [None, None]
if key[0][0] == spair[0]:
paired[key][0] = ex
elif key[0][1] == spair[0]:
paired[key][1] = ex
else:
raise Exception('Something weird happend to this example: ', keys, ex)
paired = paired
</DeepExtract>
all_ans_p = []
for (keys, ex_pair) in paired.items():
<DeepExtract>
if 0 == 0:
(ex1_p00, ex1_p01) = (ex_pair[0][0], ex_pair[0][1])
else:
(ex1_p00, ex1_p01) = (ex_pair[0][2], ex_pair[0][3])
</DeepExtract>
<DeepExtract>
if 0 == 0:
(ex2_p00, ex2_p01) = (ex_pair[1][0], ex_pair[1][1])
else:
(ex2_p00, ex2_p01) = (ex_pair[1][2], ex_pair[1][3])
</DeepExtract>
<DeepExtract>
if 1 == 0:
(ex1_p10, ex1_p11) = (ex_pair[0][0], ex_pair[0][1])
else:
(ex1_p10, ex1_p11) = (ex_pair[0][2], ex_pair[0][3])
</DeepExtract>
<DeepExtract>
if 1 == 0:
(ex2_p10, ex2_p11) = (ex_pair[1][0], ex_pair[1][1])
else:
(ex2_p10, ex2_p11) = (ex_pair[1][2], ex_pair[1][3])
</DeepExtract>
all_ans_p.extend([ex1_p00, ex1_p01, ex2_p00, ex2_p01, ex1_p10, ex1_p11, ex2_p10, ex2_p11])
avg_p = sum(all_ans_p) / len(all_ans_p)
return avg_p
|
def get_avg_ans_prob(data):
paired = {}
for (keys, ex) in data.items():
keys = keys.lower().split('|')
spair = (keys[2], keys[3])
tid = keys[4]
acluster = keys[5]
opair = (keys[6], keys[7])
assert spair[0] != spair[1]
key = (tuple(sorted([spair[0], spair[1]])), tid, acluster, opair[0], opair[1])
if key not in paired:
paired[key] = [None, None]
if key[0][0] == spair[0]:
paired[key][0] = ex
elif key[0][1] == spair[0]:
paired[key][1] = ex
else:
raise Exception('Something weird happend to this example: ', keys, ex)
paired = paired
all_ans_p = []
for (keys, ex_pair) in paired.items():
if 0 == 0:
(ex1_p00, ex1_p01) = (ex_pair[0][0], ex_pair[0][1])
else:
(ex1_p00, ex1_p01) = (ex_pair[0][2], ex_pair[0][3])
if 0 == 0:
(ex2_p00, ex2_p01) = (ex_pair[1][0], ex_pair[1][1])
else:
(ex2_p00, ex2_p01) = (ex_pair[1][2], ex_pair[1][3])
if 1 == 0:
(ex1_p10, ex1_p11) = (ex_pair[0][0], ex_pair[0][1])
else:
(ex1_p10, ex1_p11) = (ex_pair[0][2], ex_pair[0][3])
if 1 == 0:
(ex2_p10, ex2_p11) = (ex_pair[1][0], ex_pair[1][1])
else:
(ex2_p10, ex2_p11) = (ex_pair[1][2], ex_pair[1][3])
all_ans_p.extend([ex1_p00, ex1_p01, ex2_p00, ex2_p01, ex1_p10, ex1_p11, ex2_p10, ex2_p11])
avg_p = sum(all_ans_p) / len(all_ans_p)
return avg_p
|
BIG-bench
|
positive
|
def test_dbj032_check_nohydrogens(dash_duo):
<DeepExtract>
app = dash.Dash(__name__)
app.layout = html.Div(simple_app_layout(dash_bio.Jsme(id=_COMPONENT_ID, height='400px', width='400px', options='NOhydrogens oldLook', smiles='CO')))
dash_duo.start_server(app, dev_tools_props_check=True)
dash_duo.wait_for_element('#' + _COMPONENT_ID)
if hasattr(dash_bio.Jsme(id=_COMPONENT_ID, height='400px', width='400px', options='NOhydrogens oldLook', smiles='CO'), 'smiles'):
if hasattr(dash_bio.Jsme(id=_COMPONENT_ID, height='400px', width='400px', options='NOhydrogens oldLook', smiles='CO'), 'options') and 'NOuseOpenChemLib' in dash_bio.Jsme(id=_COMPONENT_ID, height='400px', width='400px', options='NOhydrogens oldLook', smiles='CO').options:
return app
dash_duo.wait_for_element(_FIRST_LINE_SELECTOR)
return app
</DeepExtract>
text = dash_duo.wait_for_element('div > div > div:nth-child(2) > svg > g > text')
assert text.get_attribute('innerHTML') == 'O'
|
def test_dbj032_check_nohydrogens(dash_duo):
app = dash.Dash(__name__)
app.layout = html.Div(simple_app_layout(dash_bio.Jsme(id=_COMPONENT_ID, height='400px', width='400px', options='NOhydrogens oldLook', smiles='CO')))
dash_duo.start_server(app, dev_tools_props_check=True)
dash_duo.wait_for_element('#' + _COMPONENT_ID)
if hasattr(dash_bio.Jsme(id=_COMPONENT_ID, height='400px', width='400px', options='NOhydrogens oldLook', smiles='CO'), 'smiles'):
if hasattr(dash_bio.Jsme(id=_COMPONENT_ID, height='400px', width='400px', options='NOhydrogens oldLook', smiles='CO'), 'options') and 'NOuseOpenChemLib' in dash_bio.Jsme(id=_COMPONENT_ID, height='400px', width='400px', options='NOhydrogens oldLook', smiles='CO').options:
return app
dash_duo.wait_for_element(_FIRST_LINE_SELECTOR)
return app
text = dash_duo.wait_for_element('div > div > div:nth-child(2) > svg > g > text')
assert text.get_attribute('innerHTML') == 'O'
|
dash-bio
|
positive
|
def add_value(self, signal, x, y):
if not self.is_displayed_signal(signal):
return
if signal not in self.values:
graph = Graph()
self.values[signal] = graph
else:
graph = self.values[signal]
last_x = graph.x[-1]
if self.break_time_uninit:
<DeepExtract>
if self.break_time <= 0:
self.break_time = None
elif type(x) == datetime.datetime:
self.half_break_time = datetime.timedelta(seconds=self.break_time / 2)
self.break_time = datetime.timedelta(seconds=self.break_time)
else:
self.half_break_time = self.break_time / 2
self.break_time_uninit = False
</DeepExtract>
if self.break_time and last_x + self.break_time < x:
x_break = last_x + self.half_break_time
graph.x.append(x_break)
graph.y.append(None)
graph.x.append(x)
graph.y.append(y)
|
def add_value(self, signal, x, y):
if not self.is_displayed_signal(signal):
return
if signal not in self.values:
graph = Graph()
self.values[signal] = graph
else:
graph = self.values[signal]
last_x = graph.x[-1]
if self.break_time_uninit:
if self.break_time <= 0:
self.break_time = None
elif type(x) == datetime.datetime:
self.half_break_time = datetime.timedelta(seconds=self.break_time / 2)
self.break_time = datetime.timedelta(seconds=self.break_time)
else:
self.half_break_time = self.break_time / 2
self.break_time_uninit = False
if self.break_time and last_x + self.break_time < x:
x_break = last_x + self.half_break_time
graph.x.append(x_break)
graph.y.append(None)
graph.x.append(x)
graph.y.append(y)
|
cantools
|
positive
|
def __iter__(self):
try:
it = super(NamedTupleCursor, self).__iter__()
t = next(it)
nt = self.Record
if nt is None:
<DeepExtract>
key = tuple((d[0] for d in self.description)) if self.description else ()
nt = self.Record = self._cached_make_nt(key)
</DeepExtract>
yield nt._make(t)
while True:
yield nt._make(next(it))
except StopIteration:
return
|
def __iter__(self):
try:
it = super(NamedTupleCursor, self).__iter__()
t = next(it)
nt = self.Record
if nt is None:
key = tuple((d[0] for d in self.description)) if self.description else ()
nt = self.Record = self._cached_make_nt(key)
yield nt._make(t)
while True:
yield nt._make(next(it))
except StopIteration:
return
|
CaseHarvester
|
positive
|
def segment_hmm(cnarr, method, window=None, variants=None, processes=1):
"""Segment bins by Hidden Markov Model.
Use Viterbi method to infer copy number segments from sequential data.
With b-allele frequencies ('baf' column in `cnarr`), jointly segment
log-ratios and b-allele frequencies across a chromosome.
Parameters
----------
cnarr : CopyNumArray
The bin-level data to segment.
method : string
One of 'hmm' (3 states, flexible means), 'hmm-tumor' (5 states, flexible
means), 'hmm-germline' (3 states, fixed means).
Results
-------
segarr : CopyNumArray
The segmented data.
"""
orig_log2 = cnarr['log2'].values.copy()
cnarr['log2'] = cnarr.smooth_log2()
logging.info('Building model from observations')
<DeepExtract>
assert method in ('hmm-tumor', 'hmm-germline', 'hmm')
observations = as_observation_matrix(cnarr.autosomes())
stdev = biweight_midvariance(np.concatenate(observations), initial=0)
if method == 'hmm-germline':
state_names = ['loss', 'neutral', 'gain']
distributions = [pom.NormalDistribution(-1.0, stdev, frozen=True), pom.NormalDistribution(0.0, stdev, frozen=True), pom.NormalDistribution(0.585, stdev, frozen=True)]
elif method == 'hmm-tumor':
state_names = ['del', 'loss', 'neutral', 'gain', 'amp']
distributions = [pom.NormalDistribution(-2.0, stdev, frozen=False), pom.NormalDistribution(-0.5, stdev, frozen=False), pom.NormalDistribution(0.0, stdev, frozen=True), pom.NormalDistribution(0.3, stdev, frozen=False), pom.NormalDistribution(1.0, stdev, frozen=False)]
else:
state_names = ['loss', 'neutral', 'gain']
distributions = [pom.NormalDistribution(-1.0, stdev, frozen=False), pom.NormalDistribution(0.0, stdev, frozen=False), pom.NormalDistribution(0.585, stdev, frozen=False)]
n_states = len(distributions)
binom_coefs = scipy.special.binom(n_states - 1, range(n_states))
start_probabilities = binom_coefs / binom_coefs.sum()
transition_matrix = np.identity(n_states) * 100 + np.ones((n_states, n_states)) / n_states
model = pom.HiddenMarkovModel.from_matrix(transition_matrix, distributions, start_probabilities, state_names=state_names, name=method)
model.fit(sequences=observations, weights=[len(obs) for obs in observations], distribution_inertia=0.8, edge_inertia=0.1, pseudocount=5, use_pseudocount=True, max_iterations=100000, n_jobs=processes, verbose=False)
model = model
</DeepExtract>
logging.info('Predicting states from model')
<DeepExtract>
observations = [arm.log2.values for (_c, arm) in cnarr.by_arm()]
observations = observations
</DeepExtract>
states = np.concatenate([np.array(model.predict(obs, algorithm='map')) for obs in observations])
logging.info('Done, now finalizing')
logging.debug('Model states: %s', model.states)
logging.debug('Predicted states: %s', states[:100])
logging.debug(str(collections.Counter(states)))
logging.debug('Observations: %s', observations[0][:100])
logging.debug('Edges: %s', model.edges)
cnarr['log2'] = orig_log2
cnarr['probes'] = 1
segarr = squash_by_groups(cnarr, pd.Series(states, index=cnarr.data.index), by_arm=True)
if not (segarr.start < segarr.end).all():
bad_segs = segarr[segarr.start >= segarr.end]
logging.warning('Bad segments:\n%s', bad_segs.data)
return segarr
|
def segment_hmm(cnarr, method, window=None, variants=None, processes=1):
"""Segment bins by Hidden Markov Model.
Use Viterbi method to infer copy number segments from sequential data.
With b-allele frequencies ('baf' column in `cnarr`), jointly segment
log-ratios and b-allele frequencies across a chromosome.
Parameters
----------
cnarr : CopyNumArray
The bin-level data to segment.
method : string
One of 'hmm' (3 states, flexible means), 'hmm-tumor' (5 states, flexible
means), 'hmm-germline' (3 states, fixed means).
Results
-------
segarr : CopyNumArray
The segmented data.
"""
orig_log2 = cnarr['log2'].values.copy()
cnarr['log2'] = cnarr.smooth_log2()
logging.info('Building model from observations')
assert method in ('hmm-tumor', 'hmm-germline', 'hmm')
observations = as_observation_matrix(cnarr.autosomes())
stdev = biweight_midvariance(np.concatenate(observations), initial=0)
if method == 'hmm-germline':
state_names = ['loss', 'neutral', 'gain']
distributions = [pom.NormalDistribution(-1.0, stdev, frozen=True), pom.NormalDistribution(0.0, stdev, frozen=True), pom.NormalDistribution(0.585, stdev, frozen=True)]
elif method == 'hmm-tumor':
state_names = ['del', 'loss', 'neutral', 'gain', 'amp']
distributions = [pom.NormalDistribution(-2.0, stdev, frozen=False), pom.NormalDistribution(-0.5, stdev, frozen=False), pom.NormalDistribution(0.0, stdev, frozen=True), pom.NormalDistribution(0.3, stdev, frozen=False), pom.NormalDistribution(1.0, stdev, frozen=False)]
else:
state_names = ['loss', 'neutral', 'gain']
distributions = [pom.NormalDistribution(-1.0, stdev, frozen=False), pom.NormalDistribution(0.0, stdev, frozen=False), pom.NormalDistribution(0.585, stdev, frozen=False)]
n_states = len(distributions)
binom_coefs = scipy.special.binom(n_states - 1, range(n_states))
start_probabilities = binom_coefs / binom_coefs.sum()
transition_matrix = np.identity(n_states) * 100 + np.ones((n_states, n_states)) / n_states
model = pom.HiddenMarkovModel.from_matrix(transition_matrix, distributions, start_probabilities, state_names=state_names, name=method)
model.fit(sequences=observations, weights=[len(obs) for obs in observations], distribution_inertia=0.8, edge_inertia=0.1, pseudocount=5, use_pseudocount=True, max_iterations=100000, n_jobs=processes, verbose=False)
model = model
logging.info('Predicting states from model')
observations = [arm.log2.values for (_c, arm) in cnarr.by_arm()]
observations = observations
states = np.concatenate([np.array(model.predict(obs, algorithm='map')) for obs in observations])
logging.info('Done, now finalizing')
logging.debug('Model states: %s', model.states)
logging.debug('Predicted states: %s', states[:100])
logging.debug(str(collections.Counter(states)))
logging.debug('Observations: %s', observations[0][:100])
logging.debug('Edges: %s', model.edges)
cnarr['log2'] = orig_log2
cnarr['probes'] = 1
segarr = squash_by_groups(cnarr, pd.Series(states, index=cnarr.data.index), by_arm=True)
if not (segarr.start < segarr.end).all():
bad_segs = segarr[segarr.start >= segarr.end]
logging.warning('Bad segments:\n%s', bad_segs.data)
return segarr
|
cnvkit
|
positive
|
def _get_sources(self):
ret = helpers.get(self.url + '&s=hydrax', sel=True).text
<DeepExtract>
regex = 'iframe.*src="(https://.*?)"'
url = re.search(regex, ret).group(1)
data = [('hydrax', url)]
</DeepExtract>
return data
|
def _get_sources(self):
ret = helpers.get(self.url + '&s=hydrax', sel=True).text
regex = 'iframe.*src="(https://.*?)"'
url = re.search(regex, ret).group(1)
data = [('hydrax', url)]
return data
|
anime-downloader
|
positive
|
def test_train_with_val(self):
<DeepExtract>
p = CalamariTestScenario.default_trainer_params()
train = PageXML(images=[os.path.join(this_dir, 'data', 'avicanon_pagexml', f'006.{img_suffix}'), os.path.join(this_dir, 'data', 'avicanon_pagexml', f'007.{img_suffix}')], preload=preload)
if with_split:
p.gen = CalamariSplitTrainerPipelineParams(validation_split_ratio=0.5, train=train)
elif True:
p.gen.val = PageXML(images=[os.path.join(this_dir, 'data', 'avicanon_pagexml', f'008.{img_suffix}')], preload=preload)
p.gen.train = train
p.gen.__post_init__()
else:
p.gen = CalamariTrainOnlyPipelineParams(train=train)
p.gen.setup.val.batch_size = 1
p.gen.setup.val.num_processes = 1
p.gen.setup.train.batch_size = 1
p.gen.setup.train.num_processes = 1
p.epochs = 1
p.samples_per_epoch = 2
p.scenario.data.pre_proc.run_parallel = False
p.scenario.data.input_channels = channels
p.scenario.data.__post_init__()
p.scenario.__post_init__()
p.__post_init__()
trainer_params = p
</DeepExtract>
with tempfile.TemporaryDirectory() as d:
trainer_params.output_dir = d
main(trainer_params)
|
def test_train_with_val(self):
p = CalamariTestScenario.default_trainer_params()
train = PageXML(images=[os.path.join(this_dir, 'data', 'avicanon_pagexml', f'006.{img_suffix}'), os.path.join(this_dir, 'data', 'avicanon_pagexml', f'007.{img_suffix}')], preload=preload)
if with_split:
p.gen = CalamariSplitTrainerPipelineParams(validation_split_ratio=0.5, train=train)
elif True:
p.gen.val = PageXML(images=[os.path.join(this_dir, 'data', 'avicanon_pagexml', f'008.{img_suffix}')], preload=preload)
p.gen.train = train
p.gen.__post_init__()
else:
p.gen = CalamariTrainOnlyPipelineParams(train=train)
p.gen.setup.val.batch_size = 1
p.gen.setup.val.num_processes = 1
p.gen.setup.train.batch_size = 1
p.gen.setup.train.num_processes = 1
p.epochs = 1
p.samples_per_epoch = 2
p.scenario.data.pre_proc.run_parallel = False
p.scenario.data.input_channels = channels
p.scenario.data.__post_init__()
p.scenario.__post_init__()
p.__post_init__()
trainer_params = p
with tempfile.TemporaryDirectory() as d:
trainer_params.output_dir = d
main(trainer_params)
|
calamari
|
positive
|
def create_dataflow(self):
import nipype.interfaces.io as nio
import nipype.pipeline.engine as pe
self._wk = pe.Workflow(name='custom_datagrabber')
self._dg = pe.Node(nio.DataGrabber(outfields=self.outfields, infields=self._get_infields(), sort_filelist=self.sort), name='datagrabber')
<DeepExtract>
self._node_added = False
set_dict = {}
for f in self.fields:
if not f.iterable:
set_dict[f.name] = f.values
else:
it = self._add_iterable(f)
self._node_added = True
self._wk.connect(it, f.name, self._dg, f.name)
self._dg.inputs.trait_set(**set_dict)
</DeepExtract>
self._dg.inputs.base_directory = self.base_directory
self._dg.inputs.field_template = self.field_template
self._dg.inputs.template_args = self.template_args
self._dg.inputs.template = self.template
if not self._node_added:
self._wk.add_nodes([self._dg])
return self._wk
|
def create_dataflow(self):
import nipype.interfaces.io as nio
import nipype.pipeline.engine as pe
self._wk = pe.Workflow(name='custom_datagrabber')
self._dg = pe.Node(nio.DataGrabber(outfields=self.outfields, infields=self._get_infields(), sort_filelist=self.sort), name='datagrabber')
self._node_added = False
set_dict = {}
for f in self.fields:
if not f.iterable:
set_dict[f.name] = f.values
else:
it = self._add_iterable(f)
self._node_added = True
self._wk.connect(it, f.name, self._dg, f.name)
self._dg.inputs.trait_set(**set_dict)
self._dg.inputs.base_directory = self.base_directory
self._dg.inputs.field_template = self.field_template
self._dg.inputs.template_args = self.template_args
self._dg.inputs.template = self.template
if not self._node_added:
self._wk.add_nodes([self._dg])
return self._wk
|
BrainImagingPipelines
|
positive
|
def restrict_representation(self, id, repr: e2cnn.group.Representation) -> e2cnn.group.Representation:
"""
Restrict the input :class:`~e2cnn.group.Representation` to the subgroup identified by ``id``.
Any representation :math:`\\rho : G \\to \\GL{\\R^n}` can be uniquely restricted to a representation
of a subgroup :math:`H < G` by restricting its domain of definition:
.. math ::
\\Res{H}{G}(\\rho): H \\to \\GL{{\\R}^n},\\ h \\mapsto \\rho\\big|_H(h)
.. seealso ::
Check the documentation of the method :meth:`~e2cnn.group.Group.subgroup()` of the group used to see
the available subgroups and accepted ids.
Args:
id: identifier of the subgroup
repr (Representation): the representation to restrict
Returns:
the restricted representation
"""
assert repr.group == self
<DeepExtract>
pass
</DeepExtract>
irreps_changes_of_basis = []
irreps = []
for irr in repr.irreps:
<DeepExtract>
pass
</DeepExtract>
size = self.irreps[irr].size
assert irrep_cob.shape == (size, size)
irreps_changes_of_basis.append(irrep_cob)
irreps += reduced_irreps
irreps_changes_of_basis = sparse.block_diag(irreps_changes_of_basis, format='csc')
change_of_basis = repr.change_of_basis @ irreps_changes_of_basis
name = f'{self.name}:{repr.name}'
resr = e2cnn.group.Representation(sg, name, irreps, change_of_basis, repr.supported_nonlinearities)
if resr.is_trivial() and 'pointwise' not in repr.supported_nonlinearities:
resr.supported_nonlinearities.add('pointwise')
return resr
|
def restrict_representation(self, id, repr: e2cnn.group.Representation) -> e2cnn.group.Representation:
"""
Restrict the input :class:`~e2cnn.group.Representation` to the subgroup identified by ``id``.
Any representation :math:`\\rho : G \\to \\GL{\\R^n}` can be uniquely restricted to a representation
of a subgroup :math:`H < G` by restricting its domain of definition:
.. math ::
\\Res{H}{G}(\\rho): H \\to \\GL{{\\R}^n},\\ h \\mapsto \\rho\\big|_H(h)
.. seealso ::
Check the documentation of the method :meth:`~e2cnn.group.Group.subgroup()` of the group used to see
the available subgroups and accepted ids.
Args:
id: identifier of the subgroup
repr (Representation): the representation to restrict
Returns:
the restricted representation
"""
assert repr.group == self
pass
irreps_changes_of_basis = []
irreps = []
for irr in repr.irreps:
pass
size = self.irreps[irr].size
assert irrep_cob.shape == (size, size)
irreps_changes_of_basis.append(irrep_cob)
irreps += reduced_irreps
irreps_changes_of_basis = sparse.block_diag(irreps_changes_of_basis, format='csc')
change_of_basis = repr.change_of_basis @ irreps_changes_of_basis
name = f'{self.name}:{repr.name}'
resr = e2cnn.group.Representation(sg, name, irreps, change_of_basis, repr.supported_nonlinearities)
if resr.is_trivial() and 'pointwise' not in repr.supported_nonlinearities:
resr.supported_nonlinearities.add('pointwise')
return resr
|
e2cnn
|
positive
|
def update_rule_definition(self, component: DefinedComponent, rule_set: str, name: str, value: str, ns: str, class_: str) -> None:
"""Update rule definition."""
if value is not None and len(value):
<DeepExtract>
rval = None
for prop in component.props:
if prop.remarks == rule_set and prop.name == name:
rval = prop
break
prop = rval
</DeepExtract>
if prop:
if prop.value == value:
return
logger.debug(f'update-rule: {rule_set} {name} {prop.value} -> {value}')
prop.value = value
else:
<DeepExtract>
prop_add = Property(name=name, value=value, ns=ns, class_=class_, remarks=rule_set)
last = 0
for (index, prop) in enumerate(component.props):
if prop.remarks == rule_set:
last = index
props = []
for (index, prop) in enumerate(component.props):
if prop_add:
if index > last:
props.append(prop_add)
prop_add = None
logger.debug(f'add-prop (last): {rule_set} {name} {prop.value} ->> {value}')
elif prop_add.remarks == prop.remarks:
if CsvColumn.get_order(prop.name) > CsvColumn.get_order(prop_add.name):
props.append(prop_add)
prop_add = None
logger.debug(f'add-prop (order): {rule_set} {name} {prop.value} ->> {value}')
props.append(prop)
component.props = props
</DeepExtract>
else:
<DeepExtract>
props = []
for prop in component.props:
if prop.remarks == rule_set and prop.name == name:
logger.debug(f'delete-prop: {rule_set} {name} {prop.value}')
else:
props.append(prop)
component.props = props
</DeepExtract>
|
def update_rule_definition(self, component: DefinedComponent, rule_set: str, name: str, value: str, ns: str, class_: str) -> None:
"""Update rule definition."""
if value is not None and len(value):
rval = None
for prop in component.props:
if prop.remarks == rule_set and prop.name == name:
rval = prop
break
prop = rval
if prop:
if prop.value == value:
return
logger.debug(f'update-rule: {rule_set} {name} {prop.value} -> {value}')
prop.value = value
else:
prop_add = Property(name=name, value=value, ns=ns, class_=class_, remarks=rule_set)
last = 0
for (index, prop) in enumerate(component.props):
if prop.remarks == rule_set:
last = index
props = []
for (index, prop) in enumerate(component.props):
if prop_add:
if index > last:
props.append(prop_add)
prop_add = None
logger.debug(f'add-prop (last): {rule_set} {name} {prop.value} ->> {value}')
elif prop_add.remarks == prop.remarks:
if CsvColumn.get_order(prop.name) > CsvColumn.get_order(prop_add.name):
props.append(prop_add)
prop_add = None
logger.debug(f'add-prop (order): {rule_set} {name} {prop.value} ->> {value}')
props.append(prop)
component.props = props
else:
props = []
for prop in component.props:
if prop.remarks == rule_set and prop.name == name:
logger.debug(f'delete-prop: {rule_set} {name} {prop.value}')
else:
props.append(prop)
component.props = props
</DeepExtract>
|
compliance-trestle
|
positive
|
def verify_lane_tangent_vector() -> None:
"""Debug low confidence lane tangent predictions.
I noticed that the confidence score of lane direction is
pretty low (almost zero) in some logs
"""
POSE_FILE_DIR = '../debug_lane_tangent'
log_ids = ['033669d3-3d6b-3d3d-bd93-7985d86653ea', '028d5cb1-f74d-366c-85ad-84fde69b0fd3']
avm = ArgoverseMap()
city_name = 'PIT'
for log_id in log_ids:
print(f'On {log_id}')
pose_fpaths = glob.glob(f'{POSE_FILE_DIR}/{log_id}/poses/city_SE3_egovehicle_*.json')
num_poses = len(pose_fpaths)
egovehicle_xy_arr = np.zeros((num_poses, 2))
for (i, pose_fpath) in enumerate(pose_fpaths):
json_data = read_json_file(pose_fpath)
egovehicle_xy_arr[i, 0] = json_data['translation'][0]
egovehicle_xy_arr[i, 1] = json_data['translation'][1]
for (i, query_xy_city_coords) in enumerate(egovehicle_xy_arr[::10, :]):
query_xy_city_coords = np.array([3116.8282170094944, 1817.1269613456188])
query_xy_city_coords = np.array([3304.7072308190845, 1993.1670162837597])
(lane_dir_vector, confidence) = avm.get_lane_direction(query_xy_city_coords, city_name, visualize=False)
print(f'\t{i}: {confidence}')
visualize = True
if visualize:
fig = plt.figure(figsize=(22.5, 8))
ax = fig.add_subplot(111)
dx = lane_dir_vector[0] * 20
dy = lane_dir_vector[1] * 20
plt.arrow(query_xy_city_coords[0], query_xy_city_coords[1], dx, dy, color='r', width=0.3, zorder=2)
(query_x, query_y) = query_xy_city_coords
ax.scatter([query_x], [query_y], 100, color='k', marker='.')
<DeepExtract>
nearby_lane_ids = avm.get_lane_ids_in_xy_bbox(query_x, query_y, city_name, radius)
for nearby_lane_id in nearby_lane_ids:
halluc_lane_polygon = avm.get_lane_segment_polygon(nearby_lane_id, city_name)
plot_lane_segment_patch(halluc_lane_polygon, ax, color=patch_color, alpha=0.3)
plt.text(halluc_lane_polygon[:, 0].mean(), halluc_lane_polygon[:, 1].mean(), str(nearby_lane_id))
</DeepExtract>
ax.axis('equal')
plt.show()
plt.close('all')
|
def verify_lane_tangent_vector() -> None:
"""Debug low confidence lane tangent predictions.
I noticed that the confidence score of lane direction is
pretty low (almost zero) in some logs
"""
POSE_FILE_DIR = '../debug_lane_tangent'
log_ids = ['033669d3-3d6b-3d3d-bd93-7985d86653ea', '028d5cb1-f74d-366c-85ad-84fde69b0fd3']
avm = ArgoverseMap()
city_name = 'PIT'
for log_id in log_ids:
print(f'On {log_id}')
pose_fpaths = glob.glob(f'{POSE_FILE_DIR}/{log_id}/poses/city_SE3_egovehicle_*.json')
num_poses = len(pose_fpaths)
egovehicle_xy_arr = np.zeros((num_poses, 2))
for (i, pose_fpath) in enumerate(pose_fpaths):
json_data = read_json_file(pose_fpath)
egovehicle_xy_arr[i, 0] = json_data['translation'][0]
egovehicle_xy_arr[i, 1] = json_data['translation'][1]
for (i, query_xy_city_coords) in enumerate(egovehicle_xy_arr[::10, :]):
query_xy_city_coords = np.array([3116.8282170094944, 1817.1269613456188])
query_xy_city_coords = np.array([3304.7072308190845, 1993.1670162837597])
(lane_dir_vector, confidence) = avm.get_lane_direction(query_xy_city_coords, city_name, visualize=False)
print(f'\t{i}: {confidence}')
visualize = True
if visualize:
fig = plt.figure(figsize=(22.5, 8))
ax = fig.add_subplot(111)
dx = lane_dir_vector[0] * 20
dy = lane_dir_vector[1] * 20
plt.arrow(query_xy_city_coords[0], query_xy_city_coords[1], dx, dy, color='r', width=0.3, zorder=2)
(query_x, query_y) = query_xy_city_coords
ax.scatter([query_x], [query_y], 100, color='k', marker='.')
nearby_lane_ids = avm.get_lane_ids_in_xy_bbox(query_x, query_y, city_name, radius)
for nearby_lane_id in nearby_lane_ids:
halluc_lane_polygon = avm.get_lane_segment_polygon(nearby_lane_id, city_name)
plot_lane_segment_patch(halluc_lane_polygon, ax, color=patch_color, alpha=0.3)
plt.text(halluc_lane_polygon[:, 0].mean(), halluc_lane_polygon[:, 1].mean(), str(nearby_lane_id))
ax.axis('equal')
plt.show()
plt.close('all')
|
argoverse-api
|
positive
|
def fetch_callback(channel, method, header, body):
try:
id = json.loads(body)['harvest_object_id']
log.info('Received harvest object id: %s' % id)
except KeyError:
log.error('No harvest object id received')
channel.basic_ack(method.delivery_tag)
return False
try:
obj = HarvestObject.get(id)
except sqlalchemy.exc.DatabaseError:
log.exception('Connection Error during fetch of job %s', id)
model.Session.remove()
return
if not obj:
log.error('Harvest object does not exist: %s' % id)
channel.basic_ack(method.delivery_tag)
return False
obj.retry_times += 1
obj.save()
if obj.retry_times >= 5:
obj.state = 'ERROR'
obj.save()
log.error('Too many consecutive retries for object {0}'.format(obj.id))
channel.basic_ack(method.delivery_tag)
return False
job = HarvestJob.get(obj.harvest_job_id)
if job.status == 'Finished':
obj.state = 'ERROR'
obj.report_status = 'errored'
obj.save()
log.error('Job {0} was aborted or timed out, object {1} set to error'.format(job.id, obj.id))
channel.basic_ack(method.delivery_tag)
return False
for harvester in PluginImplementations(IHarvester):
if harvester.info()['name'] == obj.source.type:
<DeepExtract>
obj.fetch_started = datetime.datetime.utcnow()
obj.state = 'FETCH'
obj.save()
success_fetch = harvester.fetch_stage(obj)
obj.fetch_finished = datetime.datetime.utcnow()
obj.save()
if success_fetch is True:
obj.import_started = datetime.datetime.utcnow()
obj.state = 'IMPORT'
obj.save()
success_import = harvester.import_stage(obj)
obj.import_finished = datetime.datetime.utcnow()
if success_import:
obj.state = 'COMPLETE'
if success_import == 'unchanged':
obj.report_status = 'not modified'
obj.save()
return
else:
obj.state = 'ERROR'
obj.save()
elif success_fetch == 'unchanged':
obj.state = 'COMPLETE'
obj.report_status = 'not modified'
obj.save()
return
else:
obj.state = 'ERROR'
obj.save()
if obj.state == 'ERROR':
obj.report_status = 'errored'
elif obj.current is False:
obj.report_status = 'deleted'
elif len(model.Session.query(HarvestObject).filter_by(package_id=obj.package_id).limit(2).all()) == 2:
obj.report_status = 'updated'
else:
obj.report_status = 'added'
obj.save()
</DeepExtract>
model.Session.remove()
channel.basic_ack(method.delivery_tag)
|
def fetch_callback(channel, method, header, body):
try:
id = json.loads(body)['harvest_object_id']
log.info('Received harvest object id: %s' % id)
except KeyError:
log.error('No harvest object id received')
channel.basic_ack(method.delivery_tag)
return False
try:
obj = HarvestObject.get(id)
except sqlalchemy.exc.DatabaseError:
log.exception('Connection Error during fetch of job %s', id)
model.Session.remove()
return
if not obj:
log.error('Harvest object does not exist: %s' % id)
channel.basic_ack(method.delivery_tag)
return False
obj.retry_times += 1
obj.save()
if obj.retry_times >= 5:
obj.state = 'ERROR'
obj.save()
log.error('Too many consecutive retries for object {0}'.format(obj.id))
channel.basic_ack(method.delivery_tag)
return False
job = HarvestJob.get(obj.harvest_job_id)
if job.status == 'Finished':
obj.state = 'ERROR'
obj.report_status = 'errored'
obj.save()
log.error('Job {0} was aborted or timed out, object {1} set to error'.format(job.id, obj.id))
channel.basic_ack(method.delivery_tag)
return False
for harvester in PluginImplementations(IHarvester):
if harvester.info()['name'] == obj.source.type:
obj.fetch_started = datetime.datetime.utcnow()
obj.state = 'FETCH'
obj.save()
success_fetch = harvester.fetch_stage(obj)
obj.fetch_finished = datetime.datetime.utcnow()
obj.save()
if success_fetch is True:
obj.import_started = datetime.datetime.utcnow()
obj.state = 'IMPORT'
obj.save()
success_import = harvester.import_stage(obj)
obj.import_finished = datetime.datetime.utcnow()
if success_import:
obj.state = 'COMPLETE'
if success_import == 'unchanged':
obj.report_status = 'not modified'
obj.save()
return
else:
obj.state = 'ERROR'
obj.save()
elif success_fetch == 'unchanged':
obj.state = 'COMPLETE'
obj.report_status = 'not modified'
obj.save()
return
else:
obj.state = 'ERROR'
obj.save()
if obj.state == 'ERROR':
obj.report_status = 'errored'
elif obj.current is False:
obj.report_status = 'deleted'
elif len(model.Session.query(HarvestObject).filter_by(package_id=obj.package_id).limit(2).all()) == 2:
obj.report_status = 'updated'
else:
obj.report_status = 'added'
obj.save()
model.Session.remove()
channel.basic_ack(method.delivery_tag)
|
ckanext-harvest
|
positive
|
def build_toc_etree(self, div, toc_list):
if self.config['title']:
header = etree.SubElement(div, 'span')
header.attrib['class'] = 'toctitle'
header.text = self.config['title']
def build_etree_ul(toc_list, parent):
ul = etree.SubElement(parent, 'ul')
for item in toc_list:
li = etree.SubElement(ul, 'li')
link = etree.SubElement(li, 'a')
link.text = item.get('name', '')
link.attrib['href'] = '#' + item.get('id', '')
if item['children']:
<DeepExtract>
ul = etree.SubElement(li, 'ul')
for item in item['children']:
li = etree.SubElement(ul, 'li')
link = etree.SubElement(li, 'a')
link.text = item.get('name', '')
link.attrib['href'] = '#' + item.get('id', '')
if item['children']:
build_etree_ul(item['children'], li)
return ul
</DeepExtract>
return ul
return build_etree_ul(toc_list, div)
|
def build_toc_etree(self, div, toc_list):
if self.config['title']:
header = etree.SubElement(div, 'span')
header.attrib['class'] = 'toctitle'
header.text = self.config['title']
def build_etree_ul(toc_list, parent):
ul = etree.SubElement(parent, 'ul')
for item in toc_list:
li = etree.SubElement(ul, 'li')
link = etree.SubElement(li, 'a')
link.text = item.get('name', '')
link.attrib['href'] = '#' + item.get('id', '')
if item['children']:
ul = etree.SubElement(li, 'ul')
for item in item['children']:
li = etree.SubElement(ul, 'li')
link = etree.SubElement(li, 'a')
link.text = item.get('name', '')
link.attrib['href'] = '#' + item.get('id', '')
if item['children']:
build_etree_ul(item['children'], li)
return ul
return ul
return build_etree_ul(toc_list, div)
|
appengine-gcs-blobstore-python
|
positive
|
def test_allo(self):
<DeepExtract>
self.client_connect()
self.client.login(user='nobody', passwd='nobody')
</DeepExtract>
self.assertEqual(self.client.sendcmd('allo 250'), '202 No storage allocation necessary.')
|
def test_allo(self):
self.client_connect()
self.client.login(user='nobody', passwd='nobody')
self.assertEqual(self.client.sendcmd('allo 250'), '202 No storage allocation necessary.')
|
conpot
|
positive
|
def test_getitem(self):
""" Colr.__getitem__ should grab escape codes before and after. """
<DeepExtract>
exampleargs = {'fore': {'fore': self.random_color()}, 'fore-back': {'fore': self.random_color(), 'back': self.random_color()}, 'fore-back-style': {'fore': self.random_color(), 'back': self.random_color(), 'style': self.random_style()}, 'hex-fore': {'fore': self.random_hex(with_hash=False)}, 'hex-fore-back': {'fore': self.random_hex(with_hash=False), 'back': self.random_hex(with_hash=False)}, 'hex-fore-back-style': {'fore': self.random_hex(with_hash=False), 'back': self.random_hex(with_hash=False), 'style': self.random_style()}, 'hex-hash-fore': {'fore': self.random_hex(with_hash=True)}, 'hex-hash-fore-back': {'fore': self.random_hex(with_hash=True), 'back': self.random_hex(with_hash=True)}, 'hex-hash-fore-back-style': {'fore': self.random_hex(with_hash=True), 'back': self.random_hex(with_hash=True), 'style': self.random_style()}, 'rgb-fore': {'fore': self.random_rgb()}, 'rgb-fore-back': {'fore': self.random_rgb(), 'back': self.random_rgb()}, 'rgb-fore-back-style': {'fore': self.random_rgb(), 'back': self.random_rgb(), 'style': self.random_style()}}
</DeepExtract>
for stylename in codes['style']:
exampleargs['style_{}'.format(stylename)] = {'fore': self.random_color(), 'back': self.random_color(), 'style': stylename}
for (argtype, kwargs) in exampleargs.items():
index = random.randint(0, len(argtype) - 1)
clr = Colr(argtype, **kwargs)
clr_s = clr[index]
self.assertCallEqual(clr_s, Colr(argtype[index], **kwargs), func=Colr.__getitem__, args=(clr, index), kwargs=kwargs, msg='Failed to keep color codes for __getitem__.')
|
def test_getitem(self):
""" Colr.__getitem__ should grab escape codes before and after. """
exampleargs = {'fore': {'fore': self.random_color()}, 'fore-back': {'fore': self.random_color(), 'back': self.random_color()}, 'fore-back-style': {'fore': self.random_color(), 'back': self.random_color(), 'style': self.random_style()}, 'hex-fore': {'fore': self.random_hex(with_hash=False)}, 'hex-fore-back': {'fore': self.random_hex(with_hash=False), 'back': self.random_hex(with_hash=False)}, 'hex-fore-back-style': {'fore': self.random_hex(with_hash=False), 'back': self.random_hex(with_hash=False), 'style': self.random_style()}, 'hex-hash-fore': {'fore': self.random_hex(with_hash=True)}, 'hex-hash-fore-back': {'fore': self.random_hex(with_hash=True), 'back': self.random_hex(with_hash=True)}, 'hex-hash-fore-back-style': {'fore': self.random_hex(with_hash=True), 'back': self.random_hex(with_hash=True), 'style': self.random_style()}, 'rgb-fore': {'fore': self.random_rgb()}, 'rgb-fore-back': {'fore': self.random_rgb(), 'back': self.random_rgb()}, 'rgb-fore-back-style': {'fore': self.random_rgb(), 'back': self.random_rgb(), 'style': self.random_style()}}
for stylename in codes['style']:
exampleargs['style_{}'.format(stylename)] = {'fore': self.random_color(), 'back': self.random_color(), 'style': stylename}
for (argtype, kwargs) in exampleargs.items():
index = random.randint(0, len(argtype) - 1)
clr = Colr(argtype, **kwargs)
clr_s = clr[index]
self.assertCallEqual(clr_s, Colr(argtype[index], **kwargs), func=Colr.__getitem__, args=(clr, index), kwargs=kwargs, msg='Failed to keep color codes for __getitem__.')
|
colr
|
positive
|
def flash_erase_block(addr=0):
<DeepExtract>
sir(2)
send_tms(1)
send_tms(0)
send_tms(0)
jtag.swspi.write(none)
jtag.swspi.write(wrenable[:-1])
send_data_byte_reverse(wrenable[-1], 1, 8)
send_tms(0)
send_tms(1)
send_tms(1)
send_tms(0)
</DeepExtract>
<DeepExtract>
retry = 1001
while retry > 0:
user1_send(none, read_status)
user1_send_recv(none, status)
if int(status[1]) & 1 == 0:
break
sleep_ms(1)
retry -= 1
if retry <= 0:
print('error %d flash status 0x%02X & 1 != 0' % (1001, status[1]))
</DeepExtract>
req = magic + bytearray([0, 32, flash_erase_cmd, addr >> 16, addr >> 8, addr])
<DeepExtract>
sir(2)
send_tms(1)
send_tms(0)
send_tms(0)
jtag.swspi.write(none)
jtag.swspi.write(req[:-1])
send_data_byte_reverse(req[-1], 1, 8)
send_tms(0)
send_tms(1)
send_tms(1)
send_tms(0)
</DeepExtract>
<DeepExtract>
retry = 2002
while retry > 0:
user1_send(none, read_status)
user1_send_recv(none, status)
if int(status[1]) & 1 == 0:
break
sleep_ms(1)
retry -= 1
if retry <= 0:
print('error %d flash status 0x%02X & 1 != 0' % (2002, status[1]))
</DeepExtract>
|
def flash_erase_block(addr=0):
sir(2)
send_tms(1)
send_tms(0)
send_tms(0)
jtag.swspi.write(none)
jtag.swspi.write(wrenable[:-1])
send_data_byte_reverse(wrenable[-1], 1, 8)
send_tms(0)
send_tms(1)
send_tms(1)
send_tms(0)
retry = 1001
while retry > 0:
user1_send(none, read_status)
user1_send_recv(none, status)
if int(status[1]) & 1 == 0:
break
sleep_ms(1)
retry -= 1
if retry <= 0:
print('error %d flash status 0x%02X & 1 != 0' % (1001, status[1]))
req = magic + bytearray([0, 32, flash_erase_cmd, addr >> 16, addr >> 8, addr])
sir(2)
send_tms(1)
send_tms(0)
send_tms(0)
jtag.swspi.write(none)
jtag.swspi.write(req[:-1])
send_data_byte_reverse(req[-1], 1, 8)
send_tms(0)
send_tms(1)
send_tms(1)
send_tms(0)
retry = 2002
while retry > 0:
user1_send(none, read_status)
user1_send_recv(none, status)
if int(status[1]) & 1 == 0:
break
sleep_ms(1)
retry -= 1
if retry <= 0:
print('error %d flash status 0x%02X & 1 != 0' % (2002, status[1]))
</DeepExtract>
|
esp32ecp5
|
positive
|
def test_more_intricate_hierarchy(self):
recon = BottomUpReconciliator()
<DeepExtract>
reconciled = recon.transform(self.series_complex)
def _assert_comps(comp, comps):
np.testing.assert_almost_equal(reconciled[comp].values(copy=False), sum((reconciled[c] for c in comps)).values(copy=False))
_assert_comps('a', ['ax', 'ay'])
_assert_comps('b', ['bx', 'by'])
_assert_comps('x', ['ax', 'bx'])
_assert_comps('y', ['ay', 'by'])
_assert_comps('total', ['ax', 'ay', 'bx', 'by'])
_assert_comps('total', ['a', 'b'])
_assert_comps('total', ['x', 'y'])
</DeepExtract>
recon = TopDownReconciliator()
recon.fit(self.series_complex)
<DeepExtract>
reconciled = recon.transform(self.series_complex)
def _assert_comps(comp, comps):
np.testing.assert_almost_equal(reconciled[comp].values(copy=False), sum((reconciled[c] for c in comps)).values(copy=False))
_assert_comps('a', ['ax', 'ay'])
_assert_comps('b', ['bx', 'by'])
_assert_comps('x', ['ax', 'bx'])
_assert_comps('y', ['ay', 'by'])
_assert_comps('total', ['ax', 'ay', 'bx', 'by'])
_assert_comps('total', ['a', 'b'])
_assert_comps('total', ['x', 'y'])
</DeepExtract>
recon = MinTReconciliator('ols')
recon.fit(self.series_complex)
<DeepExtract>
reconciled = recon.transform(self.series_complex)
def _assert_comps(comp, comps):
np.testing.assert_almost_equal(reconciled[comp].values(copy=False), sum((reconciled[c] for c in comps)).values(copy=False))
_assert_comps('a', ['ax', 'ay'])
_assert_comps('b', ['bx', 'by'])
_assert_comps('x', ['ax', 'bx'])
_assert_comps('y', ['ay', 'by'])
_assert_comps('total', ['ax', 'ay', 'bx', 'by'])
_assert_comps('total', ['a', 'b'])
_assert_comps('total', ['x', 'y'])
</DeepExtract>
recon = MinTReconciliator('wls_struct')
recon.fit(self.series_complex)
<DeepExtract>
reconciled = recon.transform(self.series_complex)
def _assert_comps(comp, comps):
np.testing.assert_almost_equal(reconciled[comp].values(copy=False), sum((reconciled[c] for c in comps)).values(copy=False))
_assert_comps('a', ['ax', 'ay'])
_assert_comps('b', ['bx', 'by'])
_assert_comps('x', ['ax', 'bx'])
_assert_comps('y', ['ay', 'by'])
_assert_comps('total', ['ax', 'ay', 'bx', 'by'])
_assert_comps('total', ['a', 'b'])
_assert_comps('total', ['x', 'y'])
</DeepExtract>
recon = MinTReconciliator('wls_val')
recon.fit(self.series_complex)
<DeepExtract>
reconciled = recon.transform(self.series_complex)
def _assert_comps(comp, comps):
np.testing.assert_almost_equal(reconciled[comp].values(copy=False), sum((reconciled[c] for c in comps)).values(copy=False))
_assert_comps('a', ['ax', 'ay'])
_assert_comps('b', ['bx', 'by'])
_assert_comps('x', ['ax', 'bx'])
_assert_comps('y', ['ay', 'by'])
_assert_comps('total', ['ax', 'ay', 'bx', 'by'])
_assert_comps('total', ['a', 'b'])
_assert_comps('total', ['x', 'y'])
</DeepExtract>
|
def test_more_intricate_hierarchy(self):
recon = BottomUpReconciliator()
reconciled = recon.transform(self.series_complex)
def _assert_comps(comp, comps):
np.testing.assert_almost_equal(reconciled[comp].values(copy=False), sum((reconciled[c] for c in comps)).values(copy=False))
_assert_comps('a', ['ax', 'ay'])
_assert_comps('b', ['bx', 'by'])
_assert_comps('x', ['ax', 'bx'])
_assert_comps('y', ['ay', 'by'])
_assert_comps('total', ['ax', 'ay', 'bx', 'by'])
_assert_comps('total', ['a', 'b'])
_assert_comps('total', ['x', 'y'])
recon = TopDownReconciliator()
recon.fit(self.series_complex)
reconciled = recon.transform(self.series_complex)
def _assert_comps(comp, comps):
np.testing.assert_almost_equal(reconciled[comp].values(copy=False), sum((reconciled[c] for c in comps)).values(copy=False))
_assert_comps('a', ['ax', 'ay'])
_assert_comps('b', ['bx', 'by'])
_assert_comps('x', ['ax', 'bx'])
_assert_comps('y', ['ay', 'by'])
_assert_comps('total', ['ax', 'ay', 'bx', 'by'])
_assert_comps('total', ['a', 'b'])
_assert_comps('total', ['x', 'y'])
recon = MinTReconciliator('ols')
recon.fit(self.series_complex)
reconciled = recon.transform(self.series_complex)
def _assert_comps(comp, comps):
np.testing.assert_almost_equal(reconciled[comp].values(copy=False), sum((reconciled[c] for c in comps)).values(copy=False))
_assert_comps('a', ['ax', 'ay'])
_assert_comps('b', ['bx', 'by'])
_assert_comps('x', ['ax', 'bx'])
_assert_comps('y', ['ay', 'by'])
_assert_comps('total', ['ax', 'ay', 'bx', 'by'])
_assert_comps('total', ['a', 'b'])
_assert_comps('total', ['x', 'y'])
recon = MinTReconciliator('wls_struct')
recon.fit(self.series_complex)
reconciled = recon.transform(self.series_complex)
def _assert_comps(comp, comps):
np.testing.assert_almost_equal(reconciled[comp].values(copy=False), sum((reconciled[c] for c in comps)).values(copy=False))
_assert_comps('a', ['ax', 'ay'])
_assert_comps('b', ['bx', 'by'])
_assert_comps('x', ['ax', 'bx'])
_assert_comps('y', ['ay', 'by'])
_assert_comps('total', ['ax', 'ay', 'bx', 'by'])
_assert_comps('total', ['a', 'b'])
_assert_comps('total', ['x', 'y'])
recon = MinTReconciliator('wls_val')
recon.fit(self.series_complex)
reconciled = recon.transform(self.series_complex)
def _assert_comps(comp, comps):
np.testing.assert_almost_equal(reconciled[comp].values(copy=False), sum((reconciled[c] for c in comps)).values(copy=False))
_assert_comps('a', ['ax', 'ay'])
_assert_comps('b', ['bx', 'by'])
_assert_comps('x', ['ax', 'bx'])
_assert_comps('y', ['ay', 'by'])
_assert_comps('total', ['ax', 'ay', 'bx', 'by'])
_assert_comps('total', ['a', 'b'])
_assert_comps('total', ['x', 'y'])
</DeepExtract>
|
darts
|
positive
|
def environment_of_unit(self, unit):
""" [UNIT]. -- show environment parts """
<DeepExtract>
try:
conf = self.load_sysd_unit_conf(unit)
if conf is not None:
conf = conf
conf = self.load_sysd_template_conf(unit)
if conf is not None:
conf = conf
conf = self.load_sysv_unit_conf(unit)
if conf is not None:
conf = conf
except Exception as e:
logg.warning('%s not loaded: %s', unit, e)
conf = None
</DeepExtract>
if conf is None:
logg.error('Unit %s could not be found.', unit)
self.error |= NOT_FOUND
return None
return self.get_env(conf)
|
def environment_of_unit(self, unit):
""" [UNIT]. -- show environment parts """
try:
conf = self.load_sysd_unit_conf(unit)
if conf is not None:
conf = conf
conf = self.load_sysd_template_conf(unit)
if conf is not None:
conf = conf
conf = self.load_sysv_unit_conf(unit)
if conf is not None:
conf = conf
except Exception as e:
logg.warning('%s not loaded: %s', unit, e)
conf = None
if conf is None:
logg.error('Unit %s could not be found.', unit)
self.error |= NOT_FOUND
return None
return self.get_env(conf)
|
docker-systemctl-images
|
positive
|
def __init__(self, arguments):
WorkFlowSettings.__init__(self, arguments)
<DeepExtract>
subject_id = arguments['<subject>']
self.subject = Subject(self.work_dir, subject_id)
</DeepExtract>
self.fmri_label = arguments['<task_label>']
<DeepExtract>
results_dir = os.path.join(self.subject.atlas_space_dir, 'Results', self.fmri_label)
log = os.path.join(results_dir, 'ciftify_subject_fmri.log')
if os.path.exists(log):
logger.error('Subject output already exits.\n To force rerun, delete or rename the logfile:\n\t{}'.format(log))
sys.exit(1)
if not os.path.exists(results_dir):
ciftify.utils.make_dir(results_dir)
(self.results_dir, self.log) = (results_dir, log)
</DeepExtract>
<DeepExtract>
if not os.path.isfile(arguments['<func.nii.gz>']):
logger.error('fMRI input {} does not exist :(..Exiting'.format(arguments['<func.nii.gz>']))
sys.exit(1)
num_TR = first_word(get_stdout(['fslval', arguments['<func.nii.gz>'], 'dim4']))
TR_in_ms = first_word(get_stdout(['fslval', arguments['<func.nii.gz>'], 'pixdim4']))
(self.func_4D, self.num_TR, self.TR_in_ms) = (arguments['<func.nii.gz>'], num_TR, TR_in_ms)
</DeepExtract>
<DeepExtract>
self.func_ref = ReferenceVolume(arguments['--func-ref'])
</DeepExtract>
<DeepExtract>
if arguments['--T1w-anat']:
anat_input = ciftify.meants.NibInput(arguments['--T1w-anat'])
if not anat_input.type == 'nifti':
logger.critical('--T1w-anat input {} is not a readable nifti file.'.format(arguments['--T1w-anat']))
sys.exit(1)
self.registered_to_this_T1w = anat_input.path
else:
self.registered_to_this_T1w = None
</DeepExtract>
<DeepExtract>
self.smoothing = Smoothing(arguments['--SmoothingFWHM'])
</DeepExtract>
self.dilate_percent_below = arguments['--DilateBelowPct']
self.dilate_factor = 10
<DeepExtract>
self.diagnostics = DiagnosticSettings(self.results_dir, arguments['--OutputSurfDiagnostics'])
</DeepExtract>
self.already_atlas_transformed = arguments['--already-in-MNI']
self.run_flirt = arguments['--FLIRT-to-T1w']
<DeepExtract>
registration_config = WorkFlowSettings.get_config_entry(self, 'registration')
for key in ['src_dir', 'dest_dir', 'xfms_dir']:
try:
subfolders = registration_config[key]
except KeyError:
logger.critical('registration config does not contain expectedkey {}'.format(key))
sys.exit(1)
registration_config[key] = os.path.join(self.subject.path, subfolders)
resolution_config = WorkFlowSettings.get_resolution_config(self, method, standard_res)
registration_config.update(resolution_config)
self.vol_reg = registration_config
</DeepExtract>
<DeepExtract>
surf_mode = ciftify.utils.get_registration_mode(arguments)
if surf_mode == 'MSMSulc':
RegName = 'MSMSulc'
elif surf_mode == 'FS':
RegName = 'reg.reg_LR'
else:
logger.critical('--reg-name argument must be "FS" or "MSMSulc"')
sys.exit(1)
L_sphere = os.path.join(self.subject.atlas_native_dir, '{}.L.sphere.{}.native.surf.gii'.format(self.subject.id, RegName))
if not os.path.exists(L_sphere):
logger.critical('Registration Sphere {} not found'.format(L_sphere))
sys.exit(1)
self.surf_reg = RegName
</DeepExtract>
self.grayord_res = self.grayord_res[0]
|
def __init__(self, arguments):
WorkFlowSettings.__init__(self, arguments)
subject_id = arguments['<subject>']
self.subject = Subject(self.work_dir, subject_id)
self.fmri_label = arguments['<task_label>']
results_dir = os.path.join(self.subject.atlas_space_dir, 'Results', self.fmri_label)
log = os.path.join(results_dir, 'ciftify_subject_fmri.log')
if os.path.exists(log):
logger.error('Subject output already exits.\n To force rerun, delete or rename the logfile:\n\t{}'.format(log))
sys.exit(1)
if not os.path.exists(results_dir):
ciftify.utils.make_dir(results_dir)
(self.results_dir, self.log) = (results_dir, log)
if not os.path.isfile(arguments['<func.nii.gz>']):
logger.error('fMRI input {} does not exist :(..Exiting'.format(arguments['<func.nii.gz>']))
sys.exit(1)
num_TR = first_word(get_stdout(['fslval', arguments['<func.nii.gz>'], 'dim4']))
TR_in_ms = first_word(get_stdout(['fslval', arguments['<func.nii.gz>'], 'pixdim4']))
(self.func_4D, self.num_TR, self.TR_in_ms) = (arguments['<func.nii.gz>'], num_TR, TR_in_ms)
self.func_ref = ReferenceVolume(arguments['--func-ref'])
if arguments['--T1w-anat']:
anat_input = ciftify.meants.NibInput(arguments['--T1w-anat'])
if not anat_input.type == 'nifti':
logger.critical('--T1w-anat input {} is not a readable nifti file.'.format(arguments['--T1w-anat']))
sys.exit(1)
self.registered_to_this_T1w = anat_input.path
else:
self.registered_to_this_T1w = None
self.smoothing = Smoothing(arguments['--SmoothingFWHM'])
self.dilate_percent_below = arguments['--DilateBelowPct']
self.dilate_factor = 10
self.diagnostics = DiagnosticSettings(self.results_dir, arguments['--OutputSurfDiagnostics'])
self.already_atlas_transformed = arguments['--already-in-MNI']
self.run_flirt = arguments['--FLIRT-to-T1w']
registration_config = WorkFlowSettings.get_config_entry(self, 'registration')
for key in ['src_dir', 'dest_dir', 'xfms_dir']:
try:
subfolders = registration_config[key]
except KeyError:
logger.critical('registration config does not contain expectedkey {}'.format(key))
sys.exit(1)
registration_config[key] = os.path.join(self.subject.path, subfolders)
resolution_config = WorkFlowSettings.get_resolution_config(self, method, standard_res)
registration_config.update(resolution_config)
self.vol_reg = registration_config
surf_mode = ciftify.utils.get_registration_mode(arguments)
if surf_mode == 'MSMSulc':
RegName = 'MSMSulc'
elif surf_mode == 'FS':
RegName = 'reg.reg_LR'
else:
logger.critical('--reg-name argument must be "FS" or "MSMSulc"')
sys.exit(1)
L_sphere = os.path.join(self.subject.atlas_native_dir, '{}.L.sphere.{}.native.surf.gii'.format(self.subject.id, RegName))
if not os.path.exists(L_sphere):
logger.critical('Registration Sphere {} not found'.format(L_sphere))
sys.exit(1)
self.surf_reg = RegName
self.grayord_res = self.grayord_res[0]
|
ciftify
|
positive
|
def insert(self, node, data, ch):
if node is None:
return self.createNode(data)
if ch == 'L':
<DeepExtract>
if node.left is None:
node.left.left = self.createNode(data)
if ch == 'L':
node.left.left = self.insert(node.left.left, data, ch)
node.left.left = node.left.left
else:
node.left.right = self.insert(node.left.right, data, ch)
node.left.left = node.left.right
</DeepExtract>
return node.left
else:
<DeepExtract>
if node.right is None:
node.right.right = self.createNode(data)
if ch == 'L':
node.right.left = self.insert(node.right.left, data, ch)
node.right.right = node.right.left
else:
node.right.right = self.insert(node.right.right, data, ch)
node.right.right = node.right.right
</DeepExtract>
return node.right
|
def insert(self, node, data, ch):
if node is None:
return self.createNode(data)
if ch == 'L':
if node.left is None:
node.left.left = self.createNode(data)
if ch == 'L':
node.left.left = self.insert(node.left.left, data, ch)
node.left.left = node.left.left
else:
node.left.right = self.insert(node.left.right, data, ch)
node.left.left = node.left.right
return node.left
else:
if node.right is None:
node.right.right = self.createNode(data)
if ch == 'L':
node.right.left = self.insert(node.right.left, data, ch)
node.right.right = node.right.left
else:
node.right.right = self.insert(node.right.right, data, ch)
node.right.right = node.right.right
return node.right
|
Competitive-Coding-Platforms
|
positive
|
def format_and_wrap_text(self):
"""
"""
blf.size(0, self.text_size, self.text_dpi)
self.raw_text = self.raw_text.replace('\r', '')
useful_width = self.width - 2 * self.border
if '\n' not in self.raw_text and self.txt_width(self.raw_text) < useful_width:
self.text_lines = [self.raw_text]
return
def split_word(line):
"""
splits off first word, including any leading spaces
"""
if not line:
return (None, None)
sp = line[0] == ' '
for (i, c) in enumerate(line):
if c == ' ':
if not sp:
return (line[:i], line[i:])
continue
sp = False
return (line, '')
def wrap_line(line):
"""
takes a string, returns a list of strings, corresponding to wrapped
text of the specified pixel width, given current BLF settings
"""
line = line.rstrip()
if self.txt_width(line) < useful_width:
lines = [line]
return lines
lines = []
working = ''
while line:
<DeepExtract>
if not line:
(word, line) = (None, None)
sp = line[0] == ' '
for (i, c) in enumerate(line):
if c == ' ':
if not sp:
(word, line) = (line[:i], line[i:])
continue
sp = False
(word, line) = (line, '')
</DeepExtract>
if self.txt_width(working + word) < useful_width:
working += word
else:
lines += [working]
working = ' ' + word.strip()
lines += [working]
return lines
self.text_lines = []
for line in self.raw_text.split('\n'):
self.text_lines += wrap_line(line)
<DeepExtract>
blf.size(0, self.text_size, self.text_dpi)
line_height = self.txt_height('A')
line_count = len(self.text_lines)
self.height = line_count * (line_height + self.spacer) + 2 * self.border
</DeepExtract>
<DeepExtract>
blf.size(0, self.text_size, self.text_dpi)
max_width = max((self.txt_width(line) for line in self.text_lines))
self.width = max_width + 2 * self.border
</DeepExtract>
|
def format_and_wrap_text(self):
"""
"""
blf.size(0, self.text_size, self.text_dpi)
self.raw_text = self.raw_text.replace('\r', '')
useful_width = self.width - 2 * self.border
if '\n' not in self.raw_text and self.txt_width(self.raw_text) < useful_width:
self.text_lines = [self.raw_text]
return
def split_word(line):
"""
splits off first word, including any leading spaces
"""
if not line:
return (None, None)
sp = line[0] == ' '
for (i, c) in enumerate(line):
if c == ' ':
if not sp:
return (line[:i], line[i:])
continue
sp = False
return (line, '')
def wrap_line(line):
"""
takes a string, returns a list of strings, corresponding to wrapped
text of the specified pixel width, given current BLF settings
"""
line = line.rstrip()
if self.txt_width(line) < useful_width:
lines = [line]
return lines
lines = []
working = ''
while line:
if not line:
(word, line) = (None, None)
sp = line[0] == ' '
for (i, c) in enumerate(line):
if c == ' ':
if not sp:
(word, line) = (line[:i], line[i:])
continue
sp = False
(word, line) = (line, '')
if self.txt_width(working + word) < useful_width:
working += word
else:
lines += [working]
working = ' ' + word.strip()
lines += [working]
return lines
self.text_lines = []
for line in self.raw_text.split('\n'):
self.text_lines += wrap_line(line)
blf.size(0, self.text_size, self.text_dpi)
line_height = self.txt_height('A')
line_count = len(self.text_lines)
self.height = line_count * (line_height + self.spacer) + 2 * self.border
blf.size(0, self.text_size, self.text_dpi)
max_width = max((self.txt_width(line) for line in self.text_lines))
self.width = max_width + 2 * self.border
</DeepExtract>
|
BlenderPro
|
positive
|
def test_non_gold_aliases(self):
"""
Test non-gold aliases.
ENTITY SYMBOLS
{
"multi word alias2":[["Q2",5.0],["Q1",3.0],["Q4",2.0]],
"alias1":[["Q1",10.0],["Q4",6.0]],
"alias3":[["Q1",30.0]],
"alias4":[["Q4",20.0],["Q3",15.0],["Q2",1.0]]
}
"""
max_seq_len = 50
max_window_len = 10
max_aliases = 1
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_seq_window_len = max_window_len
input_data = [{'aliases': ['alias3', 'alias4', 'alias3'], 'qids': ['Q1', 'Q4', 'Q1'], 'sent_idx_unq': 0, 'sentence': 'alias3 alias4 alias3', 'char_spans': [[0, 6], [7, 13], [14, 20]], 'gold': [True, False, False]}]
gold_cand_idx_train = [[0, 0, 0]]
gold_cand_idx = [[0, 0, 0]]
use_weak_label = True
<DeepExtract>
(X_dict, Y_dict) = (defaultdict(list), defaultdict(list))
for (i, inp) in enumerate(input_data):
guids = get_uniq_ids(i, len(inp['aliases']), self.guid_dtype)
for j in range(len(inp['aliases'])):
if use_weak_label is False and inp['gold'][j] is False:
continue
X_dict['guids'].append(guids[j])
(tok_sent, new_span) = adjust_sentence(inp['sentence'], max_seq_len, max_window_len, inp['char_spans'][j], self.tokenizer)
for k in tok_sent:
X_dict[k].append(tok_sent[k])
X_dict['sent_idx'].append(i)
X_dict['subsent_idx'].append(j)
if inp['aliases'][j] not in self.entity_symbols.get_all_aliases():
alias_idx = -2
else:
alias_idx = self.entity_symbols.get_alias_idx(inp['aliases'][j])
X_dict['alias_idx'].append(alias_idx)
X_dict['alias_orig_list_pos'].append(j)
if gold_cand_idx[i][j] != -1:
gold_eid = self.entity_symbols.get_eid(inp['qids'][j])
else:
gold_eid = -1
if gold_cand_idx[i][j] == 0 and (not self.args.data_config.train_in_candidates):
gold_eid = 0
X_dict['gold_eid'].append(gold_eid)
X_dict['for_dump_gold_eid'].append(self.entity_symbols.get_eid(inp['qids'][j]))
word_mask_scores = [-1 for _ in range(len(tok_sent['input_ids']))]
new_span_start = tok_sent.char_to_token(new_span[0]) + 1
if tok_sent.char_to_token(new_span[1] - 1) is None:
new_span_end = len(tok_sent['input_ids'])
else:
new_span_end = tok_sent.char_to_token(new_span[1] - 1)
word_mask_scores[new_span_start:new_span_end] = [1 for _ in range(new_span_start, new_span_end)]
X_dict['word_qid_cnt_mask_score'].append(word_mask_scores)
X_dict['for_dump_gold_cand_K_idx_train'].append(gold_cand_idx_train[i][j])
Y_dict['gold_cand_K_idx'].append(gold_cand_idx[i][j])
for k in X_dict:
if k == 'guids':
X_dict[k] = np.array(X_dict[k])
else:
X_dict[k] = torch.tensor(X_dict[k])
for k in Y_dict:
Y_dict[k] = torch.tensor(Y_dict[k])
(X_dict, Y_dict) = (X_dict, Y_dict)
</DeepExtract>
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(self.args, name='Bootleg_test', dataset=self.temp_file_name, use_weak_label=use_weak_label, load_entity_data=False, tokenizer=self.tokenizer, entity_symbols=self.entity_symbols, dataset_threads=1, split='train', is_bert=True)
<DeepExtract>
for k in X_dict:
assert k in dataset.X_dict, f'Key is {k}'
if type(X_dict[k]) is torch.Tensor:
assert torch.allclose(X_dict[k].float(), dataset.X_dict[k].float()), f'Key is {k}'
elif type(X_dict[k]) is np.ndarray:
np.testing.assert_array_equal(X_dict[k], dataset.X_dict[k])
elif type(X_dict[k]) is list:
assert len(X_dict[k]) == len(dataset.X_dict[k]), f'Key is {k}'
for (item_l, item_r) in zip(X_dict[k], dataset.X_dict[k]):
if type(item_l) is np.ndarray:
for (subitem_l, subitem_r) in zip(item_l.tolist()[0], item_r.tolist()):
if type(subitem_l) is np.ndarray:
assert all(subitem_l == subitem_r), f'Key is {k}'
else:
assert subitem_l == subitem_r, f'Key is {k}'
else:
assert item_l == item_r, f'Key is {k}'
else:
assert X_dict[k] == dataset.X_dict[k], f'Key is {k}'
for k in dataset.X_dict:
assert k in X_dict, f'Key is {k}'
</DeepExtract>
<DeepExtract>
for k in Y_dict:
assert k in dataset.Y_dict, f'Key is {k}'
if type(Y_dict[k]) is torch.Tensor:
assert torch.allclose(Y_dict[k].float(), dataset.Y_dict[k].float()), f'Key is {k}'
elif type(Y_dict[k]) is np.ndarray:
np.testing.assert_array_equal(Y_dict[k], dataset.Y_dict[k])
elif type(Y_dict[k]) is list:
assert len(Y_dict[k]) == len(dataset.Y_dict[k]), f'Key is {k}'
for (item_l, item_r) in zip(Y_dict[k], dataset.Y_dict[k]):
if type(item_l) is np.ndarray:
for (subitem_l, subitem_r) in zip(item_l.tolist()[0], item_r.tolist()):
if type(subitem_l) is np.ndarray:
assert all(subitem_l == subitem_r), f'Key is {k}'
else:
assert subitem_l == subitem_r, f'Key is {k}'
else:
assert item_l == item_r, f'Key is {k}'
else:
assert Y_dict[k] == dataset.Y_dict[k], f'Key is {k}'
for k in dataset.Y_dict:
assert k in Y_dict, f'Key is {k}'
</DeepExtract>
max_seq_len = 50
max_window_len = 10
max_aliases = 1
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_seq_window_len = max_window_len
input_data = [{'aliases': ['alias3', 'alias4', 'alias3'], 'qids': ['Q1', 'Q4', 'Q1'], 'sent_idx_unq': 0, 'sentence': 'alias3 alias4 alias3', 'char_spans': [[0, 6], [7, 13], [14, 20]], 'gold': [False, False, False]}]
gold_cand_idx_train = [[0, 0, 0]]
gold_cand_idx = [[0, 0, 0]]
use_weak_label = True
<DeepExtract>
(X_dict, Y_dict) = (defaultdict(list), defaultdict(list))
for (i, inp) in enumerate(input_data):
guids = get_uniq_ids(i, len(inp['aliases']), self.guid_dtype)
for j in range(len(inp['aliases'])):
if use_weak_label is False and inp['gold'][j] is False:
continue
X_dict['guids'].append(guids[j])
(tok_sent, new_span) = adjust_sentence(inp['sentence'], max_seq_len, max_window_len, inp['char_spans'][j], self.tokenizer)
for k in tok_sent:
X_dict[k].append(tok_sent[k])
X_dict['sent_idx'].append(i)
X_dict['subsent_idx'].append(j)
if inp['aliases'][j] not in self.entity_symbols.get_all_aliases():
alias_idx = -2
else:
alias_idx = self.entity_symbols.get_alias_idx(inp['aliases'][j])
X_dict['alias_idx'].append(alias_idx)
X_dict['alias_orig_list_pos'].append(j)
if gold_cand_idx[i][j] != -1:
gold_eid = self.entity_symbols.get_eid(inp['qids'][j])
else:
gold_eid = -1
if gold_cand_idx[i][j] == 0 and (not self.args.data_config.train_in_candidates):
gold_eid = 0
X_dict['gold_eid'].append(gold_eid)
X_dict['for_dump_gold_eid'].append(self.entity_symbols.get_eid(inp['qids'][j]))
word_mask_scores = [-1 for _ in range(len(tok_sent['input_ids']))]
new_span_start = tok_sent.char_to_token(new_span[0]) + 1
if tok_sent.char_to_token(new_span[1] - 1) is None:
new_span_end = len(tok_sent['input_ids'])
else:
new_span_end = tok_sent.char_to_token(new_span[1] - 1)
word_mask_scores[new_span_start:new_span_end] = [1 for _ in range(new_span_start, new_span_end)]
X_dict['word_qid_cnt_mask_score'].append(word_mask_scores)
X_dict['for_dump_gold_cand_K_idx_train'].append(gold_cand_idx_train[i][j])
Y_dict['gold_cand_K_idx'].append(gold_cand_idx[i][j])
for k in X_dict:
if k == 'guids':
X_dict[k] = np.array(X_dict[k])
else:
X_dict[k] = torch.tensor(X_dict[k])
for k in Y_dict:
Y_dict[k] = torch.tensor(Y_dict[k])
(X_dict, Y_dict) = (X_dict, Y_dict)
</DeepExtract>
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(self.args, name='Bootleg_test', dataset=self.temp_file_name, use_weak_label=use_weak_label, load_entity_data=False, tokenizer=self.tokenizer, entity_symbols=self.entity_symbols, dataset_threads=1, split='train', is_bert=True)
<DeepExtract>
for k in X_dict:
assert k in dataset.X_dict, f'Key is {k}'
if type(X_dict[k]) is torch.Tensor:
assert torch.allclose(X_dict[k].float(), dataset.X_dict[k].float()), f'Key is {k}'
elif type(X_dict[k]) is np.ndarray:
np.testing.assert_array_equal(X_dict[k], dataset.X_dict[k])
elif type(X_dict[k]) is list:
assert len(X_dict[k]) == len(dataset.X_dict[k]), f'Key is {k}'
for (item_l, item_r) in zip(X_dict[k], dataset.X_dict[k]):
if type(item_l) is np.ndarray:
for (subitem_l, subitem_r) in zip(item_l.tolist()[0], item_r.tolist()):
if type(subitem_l) is np.ndarray:
assert all(subitem_l == subitem_r), f'Key is {k}'
else:
assert subitem_l == subitem_r, f'Key is {k}'
else:
assert item_l == item_r, f'Key is {k}'
else:
assert X_dict[k] == dataset.X_dict[k], f'Key is {k}'
for k in dataset.X_dict:
assert k in X_dict, f'Key is {k}'
</DeepExtract>
<DeepExtract>
for k in Y_dict:
assert k in dataset.Y_dict, f'Key is {k}'
if type(Y_dict[k]) is torch.Tensor:
assert torch.allclose(Y_dict[k].float(), dataset.Y_dict[k].float()), f'Key is {k}'
elif type(Y_dict[k]) is np.ndarray:
np.testing.assert_array_equal(Y_dict[k], dataset.Y_dict[k])
elif type(Y_dict[k]) is list:
assert len(Y_dict[k]) == len(dataset.Y_dict[k]), f'Key is {k}'
for (item_l, item_r) in zip(Y_dict[k], dataset.Y_dict[k]):
if type(item_l) is np.ndarray:
for (subitem_l, subitem_r) in zip(item_l.tolist()[0], item_r.tolist()):
if type(subitem_l) is np.ndarray:
assert all(subitem_l == subitem_r), f'Key is {k}'
else:
assert subitem_l == subitem_r, f'Key is {k}'
else:
assert item_l == item_r, f'Key is {k}'
else:
assert Y_dict[k] == dataset.Y_dict[k], f'Key is {k}'
for k in dataset.Y_dict:
assert k in Y_dict, f'Key is {k}'
</DeepExtract>
max_seq_len = 50
max_window_len = 10
max_aliases = 1
split = 'dev'
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_seq_window_len = max_window_len
input_data = [{'aliases': ['alias3', 'alias4', 'alias3'], 'qids': ['Q1', 'Q4', 'Q1'], 'sent_idx_unq': 0, 'sentence': 'alias3 alias4 alias3', 'char_spans': [[0, 6], [7, 13], [14, 20]], 'gold': [True, False, False]}]
gold_cand_idx_train = [[0, 0, 0]]
gold_cand_idx = [[0, -1, -1]]
use_weak_label = True
<DeepExtract>
(X_dict, Y_dict) = (defaultdict(list), defaultdict(list))
for (i, inp) in enumerate(input_data):
guids = get_uniq_ids(i, len(inp['aliases']), self.guid_dtype)
for j in range(len(inp['aliases'])):
if use_weak_label is False and inp['gold'][j] is False:
continue
X_dict['guids'].append(guids[j])
(tok_sent, new_span) = adjust_sentence(inp['sentence'], max_seq_len, max_window_len, inp['char_spans'][j], self.tokenizer)
for k in tok_sent:
X_dict[k].append(tok_sent[k])
X_dict['sent_idx'].append(i)
X_dict['subsent_idx'].append(j)
if inp['aliases'][j] not in self.entity_symbols.get_all_aliases():
alias_idx = -2
else:
alias_idx = self.entity_symbols.get_alias_idx(inp['aliases'][j])
X_dict['alias_idx'].append(alias_idx)
X_dict['alias_orig_list_pos'].append(j)
if gold_cand_idx[i][j] != -1:
gold_eid = self.entity_symbols.get_eid(inp['qids'][j])
else:
gold_eid = -1
if gold_cand_idx[i][j] == 0 and (not self.args.data_config.train_in_candidates):
gold_eid = 0
X_dict['gold_eid'].append(gold_eid)
X_dict['for_dump_gold_eid'].append(self.entity_symbols.get_eid(inp['qids'][j]))
word_mask_scores = [-1 for _ in range(len(tok_sent['input_ids']))]
new_span_start = tok_sent.char_to_token(new_span[0]) + 1
if tok_sent.char_to_token(new_span[1] - 1) is None:
new_span_end = len(tok_sent['input_ids'])
else:
new_span_end = tok_sent.char_to_token(new_span[1] - 1)
word_mask_scores[new_span_start:new_span_end] = [1 for _ in range(new_span_start, new_span_end)]
X_dict['word_qid_cnt_mask_score'].append(word_mask_scores)
X_dict['for_dump_gold_cand_K_idx_train'].append(gold_cand_idx_train[i][j])
Y_dict['gold_cand_K_idx'].append(gold_cand_idx[i][j])
for k in X_dict:
if k == 'guids':
X_dict[k] = np.array(X_dict[k])
else:
X_dict[k] = torch.tensor(X_dict[k])
for k in Y_dict:
Y_dict[k] = torch.tensor(Y_dict[k])
(X_dict, Y_dict) = (X_dict, Y_dict)
</DeepExtract>
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(self.args, name='Bootleg_test', dataset=self.temp_file_name, use_weak_label=use_weak_label, load_entity_data=False, tokenizer=self.tokenizer, entity_symbols=self.entity_symbols, dataset_threads=1, split=split, is_bert=True)
<DeepExtract>
for k in X_dict:
assert k in dataset.X_dict, f'Key is {k}'
if type(X_dict[k]) is torch.Tensor:
assert torch.allclose(X_dict[k].float(), dataset.X_dict[k].float()), f'Key is {k}'
elif type(X_dict[k]) is np.ndarray:
np.testing.assert_array_equal(X_dict[k], dataset.X_dict[k])
elif type(X_dict[k]) is list:
assert len(X_dict[k]) == len(dataset.X_dict[k]), f'Key is {k}'
for (item_l, item_r) in zip(X_dict[k], dataset.X_dict[k]):
if type(item_l) is np.ndarray:
for (subitem_l, subitem_r) in zip(item_l.tolist()[0], item_r.tolist()):
if type(subitem_l) is np.ndarray:
assert all(subitem_l == subitem_r), f'Key is {k}'
else:
assert subitem_l == subitem_r, f'Key is {k}'
else:
assert item_l == item_r, f'Key is {k}'
else:
assert X_dict[k] == dataset.X_dict[k], f'Key is {k}'
for k in dataset.X_dict:
assert k in X_dict, f'Key is {k}'
</DeepExtract>
<DeepExtract>
for k in Y_dict:
assert k in dataset.Y_dict, f'Key is {k}'
if type(Y_dict[k]) is torch.Tensor:
assert torch.allclose(Y_dict[k].float(), dataset.Y_dict[k].float()), f'Key is {k}'
elif type(Y_dict[k]) is np.ndarray:
np.testing.assert_array_equal(Y_dict[k], dataset.Y_dict[k])
elif type(Y_dict[k]) is list:
assert len(Y_dict[k]) == len(dataset.Y_dict[k]), f'Key is {k}'
for (item_l, item_r) in zip(Y_dict[k], dataset.Y_dict[k]):
if type(item_l) is np.ndarray:
for (subitem_l, subitem_r) in zip(item_l.tolist()[0], item_r.tolist()):
if type(subitem_l) is np.ndarray:
assert all(subitem_l == subitem_r), f'Key is {k}'
else:
assert subitem_l == subitem_r, f'Key is {k}'
else:
assert item_l == item_r, f'Key is {k}'
else:
assert Y_dict[k] == dataset.Y_dict[k], f'Key is {k}'
for k in dataset.Y_dict:
assert k in Y_dict, f'Key is {k}'
</DeepExtract>
max_seq_len = 50
max_window_len = 10
max_aliases = 1
split = 'dev'
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_seq_window_len = max_window_len
input_data = [{'aliases': ['alias3', 'alias4', 'alias3'], 'qids': ['Q1', 'Q4', 'Q1'], 'sent_idx_unq': 0, 'sentence': 'alias3 alias4 alias3', 'char_spans': [[0, 6], [7, 13], [14, 20]], 'gold': [False, False, False]}]
gold_cand_idx_train = [[0, 0, 0]]
gold_cand_idx = [[-1, -1, -1]]
use_weak_label = True
<DeepExtract>
(X_dict, Y_dict) = (defaultdict(list), defaultdict(list))
for (i, inp) in enumerate(input_data):
guids = get_uniq_ids(i, len(inp['aliases']), self.guid_dtype)
for j in range(len(inp['aliases'])):
if use_weak_label is False and inp['gold'][j] is False:
continue
X_dict['guids'].append(guids[j])
(tok_sent, new_span) = adjust_sentence(inp['sentence'], max_seq_len, max_window_len, inp['char_spans'][j], self.tokenizer)
for k in tok_sent:
X_dict[k].append(tok_sent[k])
X_dict['sent_idx'].append(i)
X_dict['subsent_idx'].append(j)
if inp['aliases'][j] not in self.entity_symbols.get_all_aliases():
alias_idx = -2
else:
alias_idx = self.entity_symbols.get_alias_idx(inp['aliases'][j])
X_dict['alias_idx'].append(alias_idx)
X_dict['alias_orig_list_pos'].append(j)
if gold_cand_idx[i][j] != -1:
gold_eid = self.entity_symbols.get_eid(inp['qids'][j])
else:
gold_eid = -1
if gold_cand_idx[i][j] == 0 and (not self.args.data_config.train_in_candidates):
gold_eid = 0
X_dict['gold_eid'].append(gold_eid)
X_dict['for_dump_gold_eid'].append(self.entity_symbols.get_eid(inp['qids'][j]))
word_mask_scores = [-1 for _ in range(len(tok_sent['input_ids']))]
new_span_start = tok_sent.char_to_token(new_span[0]) + 1
if tok_sent.char_to_token(new_span[1] - 1) is None:
new_span_end = len(tok_sent['input_ids'])
else:
new_span_end = tok_sent.char_to_token(new_span[1] - 1)
word_mask_scores[new_span_start:new_span_end] = [1 for _ in range(new_span_start, new_span_end)]
X_dict['word_qid_cnt_mask_score'].append(word_mask_scores)
X_dict['for_dump_gold_cand_K_idx_train'].append(gold_cand_idx_train[i][j])
Y_dict['gold_cand_K_idx'].append(gold_cand_idx[i][j])
for k in X_dict:
if k == 'guids':
X_dict[k] = np.array(X_dict[k])
else:
X_dict[k] = torch.tensor(X_dict[k])
for k in Y_dict:
Y_dict[k] = torch.tensor(Y_dict[k])
(X_dict, Y_dict) = (X_dict, Y_dict)
</DeepExtract>
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(self.args, name='Bootleg_test', dataset=self.temp_file_name, use_weak_label=use_weak_label, load_entity_data=False, tokenizer=self.tokenizer, entity_symbols=self.entity_symbols, dataset_threads=1, split=split, is_bert=True)
<DeepExtract>
for k in X_dict:
assert k in dataset.X_dict, f'Key is {k}'
if type(X_dict[k]) is torch.Tensor:
assert torch.allclose(X_dict[k].float(), dataset.X_dict[k].float()), f'Key is {k}'
elif type(X_dict[k]) is np.ndarray:
np.testing.assert_array_equal(X_dict[k], dataset.X_dict[k])
elif type(X_dict[k]) is list:
assert len(X_dict[k]) == len(dataset.X_dict[k]), f'Key is {k}'
for (item_l, item_r) in zip(X_dict[k], dataset.X_dict[k]):
if type(item_l) is np.ndarray:
for (subitem_l, subitem_r) in zip(item_l.tolist()[0], item_r.tolist()):
if type(subitem_l) is np.ndarray:
assert all(subitem_l == subitem_r), f'Key is {k}'
else:
assert subitem_l == subitem_r, f'Key is {k}'
else:
assert item_l == item_r, f'Key is {k}'
else:
assert X_dict[k] == dataset.X_dict[k], f'Key is {k}'
for k in dataset.X_dict:
assert k in X_dict, f'Key is {k}'
</DeepExtract>
<DeepExtract>
for k in Y_dict:
assert k in dataset.Y_dict, f'Key is {k}'
if type(Y_dict[k]) is torch.Tensor:
assert torch.allclose(Y_dict[k].float(), dataset.Y_dict[k].float()), f'Key is {k}'
elif type(Y_dict[k]) is np.ndarray:
np.testing.assert_array_equal(Y_dict[k], dataset.Y_dict[k])
elif type(Y_dict[k]) is list:
assert len(Y_dict[k]) == len(dataset.Y_dict[k]), f'Key is {k}'
for (item_l, item_r) in zip(Y_dict[k], dataset.Y_dict[k]):
if type(item_l) is np.ndarray:
for (subitem_l, subitem_r) in zip(item_l.tolist()[0], item_r.tolist()):
if type(subitem_l) is np.ndarray:
assert all(subitem_l == subitem_r), f'Key is {k}'
else:
assert subitem_l == subitem_r, f'Key is {k}'
else:
assert item_l == item_r, f'Key is {k}'
else:
assert Y_dict[k] == dataset.Y_dict[k], f'Key is {k}'
for k in dataset.Y_dict:
assert k in Y_dict, f'Key is {k}'
</DeepExtract>
|
def test_non_gold_aliases(self):
"""
Test non-gold aliases.
ENTITY SYMBOLS
{
"multi word alias2":[["Q2",5.0],["Q1",3.0],["Q4",2.0]],
"alias1":[["Q1",10.0],["Q4",6.0]],
"alias3":[["Q1",30.0]],
"alias4":[["Q4",20.0],["Q3",15.0],["Q2",1.0]]
}
"""
max_seq_len = 50
max_window_len = 10
max_aliases = 1
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_seq_window_len = max_window_len
input_data = [{'aliases': ['alias3', 'alias4', 'alias3'], 'qids': ['Q1', 'Q4', 'Q1'], 'sent_idx_unq': 0, 'sentence': 'alias3 alias4 alias3', 'char_spans': [[0, 6], [7, 13], [14, 20]], 'gold': [True, False, False]}]
gold_cand_idx_train = [[0, 0, 0]]
gold_cand_idx = [[0, 0, 0]]
use_weak_label = True
(X_dict, Y_dict) = (defaultdict(list), defaultdict(list))
for (i, inp) in enumerate(input_data):
guids = get_uniq_ids(i, len(inp['aliases']), self.guid_dtype)
for j in range(len(inp['aliases'])):
if use_weak_label is False and inp['gold'][j] is False:
continue
X_dict['guids'].append(guids[j])
(tok_sent, new_span) = adjust_sentence(inp['sentence'], max_seq_len, max_window_len, inp['char_spans'][j], self.tokenizer)
for k in tok_sent:
X_dict[k].append(tok_sent[k])
X_dict['sent_idx'].append(i)
X_dict['subsent_idx'].append(j)
if inp['aliases'][j] not in self.entity_symbols.get_all_aliases():
alias_idx = -2
else:
alias_idx = self.entity_symbols.get_alias_idx(inp['aliases'][j])
X_dict['alias_idx'].append(alias_idx)
X_dict['alias_orig_list_pos'].append(j)
if gold_cand_idx[i][j] != -1:
gold_eid = self.entity_symbols.get_eid(inp['qids'][j])
else:
gold_eid = -1
if gold_cand_idx[i][j] == 0 and (not self.args.data_config.train_in_candidates):
gold_eid = 0
X_dict['gold_eid'].append(gold_eid)
X_dict['for_dump_gold_eid'].append(self.entity_symbols.get_eid(inp['qids'][j]))
word_mask_scores = [-1 for _ in range(len(tok_sent['input_ids']))]
new_span_start = tok_sent.char_to_token(new_span[0]) + 1
if tok_sent.char_to_token(new_span[1] - 1) is None:
new_span_end = len(tok_sent['input_ids'])
else:
new_span_end = tok_sent.char_to_token(new_span[1] - 1)
word_mask_scores[new_span_start:new_span_end] = [1 for _ in range(new_span_start, new_span_end)]
X_dict['word_qid_cnt_mask_score'].append(word_mask_scores)
X_dict['for_dump_gold_cand_K_idx_train'].append(gold_cand_idx_train[i][j])
Y_dict['gold_cand_K_idx'].append(gold_cand_idx[i][j])
for k in X_dict:
if k == 'guids':
X_dict[k] = np.array(X_dict[k])
else:
X_dict[k] = torch.tensor(X_dict[k])
for k in Y_dict:
Y_dict[k] = torch.tensor(Y_dict[k])
(X_dict, Y_dict) = (X_dict, Y_dict)
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(self.args, name='Bootleg_test', dataset=self.temp_file_name, use_weak_label=use_weak_label, load_entity_data=False, tokenizer=self.tokenizer, entity_symbols=self.entity_symbols, dataset_threads=1, split='train', is_bert=True)
for k in X_dict:
assert k in dataset.X_dict, f'Key is {k}'
if type(X_dict[k]) is torch.Tensor:
assert torch.allclose(X_dict[k].float(), dataset.X_dict[k].float()), f'Key is {k}'
elif type(X_dict[k]) is np.ndarray:
np.testing.assert_array_equal(X_dict[k], dataset.X_dict[k])
elif type(X_dict[k]) is list:
assert len(X_dict[k]) == len(dataset.X_dict[k]), f'Key is {k}'
for (item_l, item_r) in zip(X_dict[k], dataset.X_dict[k]):
if type(item_l) is np.ndarray:
for (subitem_l, subitem_r) in zip(item_l.tolist()[0], item_r.tolist()):
if type(subitem_l) is np.ndarray:
assert all(subitem_l == subitem_r), f'Key is {k}'
else:
assert subitem_l == subitem_r, f'Key is {k}'
else:
assert item_l == item_r, f'Key is {k}'
else:
assert X_dict[k] == dataset.X_dict[k], f'Key is {k}'
for k in dataset.X_dict:
assert k in X_dict, f'Key is {k}'
for k in Y_dict:
assert k in dataset.Y_dict, f'Key is {k}'
if type(Y_dict[k]) is torch.Tensor:
assert torch.allclose(Y_dict[k].float(), dataset.Y_dict[k].float()), f'Key is {k}'
elif type(Y_dict[k]) is np.ndarray:
np.testing.assert_array_equal(Y_dict[k], dataset.Y_dict[k])
elif type(Y_dict[k]) is list:
assert len(Y_dict[k]) == len(dataset.Y_dict[k]), f'Key is {k}'
for (item_l, item_r) in zip(Y_dict[k], dataset.Y_dict[k]):
if type(item_l) is np.ndarray:
for (subitem_l, subitem_r) in zip(item_l.tolist()[0], item_r.tolist()):
if type(subitem_l) is np.ndarray:
assert all(subitem_l == subitem_r), f'Key is {k}'
else:
assert subitem_l == subitem_r, f'Key is {k}'
else:
assert item_l == item_r, f'Key is {k}'
else:
assert Y_dict[k] == dataset.Y_dict[k], f'Key is {k}'
for k in dataset.Y_dict:
assert k in Y_dict, f'Key is {k}'
max_seq_len = 50
max_window_len = 10
max_aliases = 1
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_seq_window_len = max_window_len
input_data = [{'aliases': ['alias3', 'alias4', 'alias3'], 'qids': ['Q1', 'Q4', 'Q1'], 'sent_idx_unq': 0, 'sentence': 'alias3 alias4 alias3', 'char_spans': [[0, 6], [7, 13], [14, 20]], 'gold': [False, False, False]}]
gold_cand_idx_train = [[0, 0, 0]]
gold_cand_idx = [[0, 0, 0]]
use_weak_label = True
(X_dict, Y_dict) = (defaultdict(list), defaultdict(list))
for (i, inp) in enumerate(input_data):
guids = get_uniq_ids(i, len(inp['aliases']), self.guid_dtype)
for j in range(len(inp['aliases'])):
if use_weak_label is False and inp['gold'][j] is False:
continue
X_dict['guids'].append(guids[j])
(tok_sent, new_span) = adjust_sentence(inp['sentence'], max_seq_len, max_window_len, inp['char_spans'][j], self.tokenizer)
for k in tok_sent:
X_dict[k].append(tok_sent[k])
X_dict['sent_idx'].append(i)
X_dict['subsent_idx'].append(j)
if inp['aliases'][j] not in self.entity_symbols.get_all_aliases():
alias_idx = -2
else:
alias_idx = self.entity_symbols.get_alias_idx(inp['aliases'][j])
X_dict['alias_idx'].append(alias_idx)
X_dict['alias_orig_list_pos'].append(j)
if gold_cand_idx[i][j] != -1:
gold_eid = self.entity_symbols.get_eid(inp['qids'][j])
else:
gold_eid = -1
if gold_cand_idx[i][j] == 0 and (not self.args.data_config.train_in_candidates):
gold_eid = 0
X_dict['gold_eid'].append(gold_eid)
X_dict['for_dump_gold_eid'].append(self.entity_symbols.get_eid(inp['qids'][j]))
word_mask_scores = [-1 for _ in range(len(tok_sent['input_ids']))]
new_span_start = tok_sent.char_to_token(new_span[0]) + 1
if tok_sent.char_to_token(new_span[1] - 1) is None:
new_span_end = len(tok_sent['input_ids'])
else:
new_span_end = tok_sent.char_to_token(new_span[1] - 1)
word_mask_scores[new_span_start:new_span_end] = [1 for _ in range(new_span_start, new_span_end)]
X_dict['word_qid_cnt_mask_score'].append(word_mask_scores)
X_dict['for_dump_gold_cand_K_idx_train'].append(gold_cand_idx_train[i][j])
Y_dict['gold_cand_K_idx'].append(gold_cand_idx[i][j])
for k in X_dict:
if k == 'guids':
X_dict[k] = np.array(X_dict[k])
else:
X_dict[k] = torch.tensor(X_dict[k])
for k in Y_dict:
Y_dict[k] = torch.tensor(Y_dict[k])
(X_dict, Y_dict) = (X_dict, Y_dict)
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(self.args, name='Bootleg_test', dataset=self.temp_file_name, use_weak_label=use_weak_label, load_entity_data=False, tokenizer=self.tokenizer, entity_symbols=self.entity_symbols, dataset_threads=1, split='train', is_bert=True)
for k in X_dict:
assert k in dataset.X_dict, f'Key is {k}'
if type(X_dict[k]) is torch.Tensor:
assert torch.allclose(X_dict[k].float(), dataset.X_dict[k].float()), f'Key is {k}'
elif type(X_dict[k]) is np.ndarray:
np.testing.assert_array_equal(X_dict[k], dataset.X_dict[k])
elif type(X_dict[k]) is list:
assert len(X_dict[k]) == len(dataset.X_dict[k]), f'Key is {k}'
for (item_l, item_r) in zip(X_dict[k], dataset.X_dict[k]):
if type(item_l) is np.ndarray:
for (subitem_l, subitem_r) in zip(item_l.tolist()[0], item_r.tolist()):
if type(subitem_l) is np.ndarray:
assert all(subitem_l == subitem_r), f'Key is {k}'
else:
assert subitem_l == subitem_r, f'Key is {k}'
else:
assert item_l == item_r, f'Key is {k}'
else:
assert X_dict[k] == dataset.X_dict[k], f'Key is {k}'
for k in dataset.X_dict:
assert k in X_dict, f'Key is {k}'
for k in Y_dict:
assert k in dataset.Y_dict, f'Key is {k}'
if type(Y_dict[k]) is torch.Tensor:
assert torch.allclose(Y_dict[k].float(), dataset.Y_dict[k].float()), f'Key is {k}'
elif type(Y_dict[k]) is np.ndarray:
np.testing.assert_array_equal(Y_dict[k], dataset.Y_dict[k])
elif type(Y_dict[k]) is list:
assert len(Y_dict[k]) == len(dataset.Y_dict[k]), f'Key is {k}'
for (item_l, item_r) in zip(Y_dict[k], dataset.Y_dict[k]):
if type(item_l) is np.ndarray:
for (subitem_l, subitem_r) in zip(item_l.tolist()[0], item_r.tolist()):
if type(subitem_l) is np.ndarray:
assert all(subitem_l == subitem_r), f'Key is {k}'
else:
assert subitem_l == subitem_r, f'Key is {k}'
else:
assert item_l == item_r, f'Key is {k}'
else:
assert Y_dict[k] == dataset.Y_dict[k], f'Key is {k}'
for k in dataset.Y_dict:
assert k in Y_dict, f'Key is {k}'
max_seq_len = 50
max_window_len = 10
max_aliases = 1
split = 'dev'
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_seq_window_len = max_window_len
input_data = [{'aliases': ['alias3', 'alias4', 'alias3'], 'qids': ['Q1', 'Q4', 'Q1'], 'sent_idx_unq': 0, 'sentence': 'alias3 alias4 alias3', 'char_spans': [[0, 6], [7, 13], [14, 20]], 'gold': [True, False, False]}]
gold_cand_idx_train = [[0, 0, 0]]
gold_cand_idx = [[0, -1, -1]]
use_weak_label = True
(X_dict, Y_dict) = (defaultdict(list), defaultdict(list))
for (i, inp) in enumerate(input_data):
guids = get_uniq_ids(i, len(inp['aliases']), self.guid_dtype)
for j in range(len(inp['aliases'])):
if use_weak_label is False and inp['gold'][j] is False:
continue
X_dict['guids'].append(guids[j])
(tok_sent, new_span) = adjust_sentence(inp['sentence'], max_seq_len, max_window_len, inp['char_spans'][j], self.tokenizer)
for k in tok_sent:
X_dict[k].append(tok_sent[k])
X_dict['sent_idx'].append(i)
X_dict['subsent_idx'].append(j)
if inp['aliases'][j] not in self.entity_symbols.get_all_aliases():
alias_idx = -2
else:
alias_idx = self.entity_symbols.get_alias_idx(inp['aliases'][j])
X_dict['alias_idx'].append(alias_idx)
X_dict['alias_orig_list_pos'].append(j)
if gold_cand_idx[i][j] != -1:
gold_eid = self.entity_symbols.get_eid(inp['qids'][j])
else:
gold_eid = -1
if gold_cand_idx[i][j] == 0 and (not self.args.data_config.train_in_candidates):
gold_eid = 0
X_dict['gold_eid'].append(gold_eid)
X_dict['for_dump_gold_eid'].append(self.entity_symbols.get_eid(inp['qids'][j]))
word_mask_scores = [-1 for _ in range(len(tok_sent['input_ids']))]
new_span_start = tok_sent.char_to_token(new_span[0]) + 1
if tok_sent.char_to_token(new_span[1] - 1) is None:
new_span_end = len(tok_sent['input_ids'])
else:
new_span_end = tok_sent.char_to_token(new_span[1] - 1)
word_mask_scores[new_span_start:new_span_end] = [1 for _ in range(new_span_start, new_span_end)]
X_dict['word_qid_cnt_mask_score'].append(word_mask_scores)
X_dict['for_dump_gold_cand_K_idx_train'].append(gold_cand_idx_train[i][j])
Y_dict['gold_cand_K_idx'].append(gold_cand_idx[i][j])
for k in X_dict:
if k == 'guids':
X_dict[k] = np.array(X_dict[k])
else:
X_dict[k] = torch.tensor(X_dict[k])
for k in Y_dict:
Y_dict[k] = torch.tensor(Y_dict[k])
(X_dict, Y_dict) = (X_dict, Y_dict)
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(self.args, name='Bootleg_test', dataset=self.temp_file_name, use_weak_label=use_weak_label, load_entity_data=False, tokenizer=self.tokenizer, entity_symbols=self.entity_symbols, dataset_threads=1, split=split, is_bert=True)
for k in X_dict:
assert k in dataset.X_dict, f'Key is {k}'
if type(X_dict[k]) is torch.Tensor:
assert torch.allclose(X_dict[k].float(), dataset.X_dict[k].float()), f'Key is {k}'
elif type(X_dict[k]) is np.ndarray:
np.testing.assert_array_equal(X_dict[k], dataset.X_dict[k])
elif type(X_dict[k]) is list:
assert len(X_dict[k]) == len(dataset.X_dict[k]), f'Key is {k}'
for (item_l, item_r) in zip(X_dict[k], dataset.X_dict[k]):
if type(item_l) is np.ndarray:
for (subitem_l, subitem_r) in zip(item_l.tolist()[0], item_r.tolist()):
if type(subitem_l) is np.ndarray:
assert all(subitem_l == subitem_r), f'Key is {k}'
else:
assert subitem_l == subitem_r, f'Key is {k}'
else:
assert item_l == item_r, f'Key is {k}'
else:
assert X_dict[k] == dataset.X_dict[k], f'Key is {k}'
for k in dataset.X_dict:
assert k in X_dict, f'Key is {k}'
for k in Y_dict:
assert k in dataset.Y_dict, f'Key is {k}'
if type(Y_dict[k]) is torch.Tensor:
assert torch.allclose(Y_dict[k].float(), dataset.Y_dict[k].float()), f'Key is {k}'
elif type(Y_dict[k]) is np.ndarray:
np.testing.assert_array_equal(Y_dict[k], dataset.Y_dict[k])
elif type(Y_dict[k]) is list:
assert len(Y_dict[k]) == len(dataset.Y_dict[k]), f'Key is {k}'
for (item_l, item_r) in zip(Y_dict[k], dataset.Y_dict[k]):
if type(item_l) is np.ndarray:
for (subitem_l, subitem_r) in zip(item_l.tolist()[0], item_r.tolist()):
if type(subitem_l) is np.ndarray:
assert all(subitem_l == subitem_r), f'Key is {k}'
else:
assert subitem_l == subitem_r, f'Key is {k}'
else:
assert item_l == item_r, f'Key is {k}'
else:
assert Y_dict[k] == dataset.Y_dict[k], f'Key is {k}'
for k in dataset.Y_dict:
assert k in Y_dict, f'Key is {k}'
max_seq_len = 50
max_window_len = 10
max_aliases = 1
split = 'dev'
self.args.data_config.max_aliases = max_aliases
self.args.data_config.max_seq_len = max_seq_len
self.args.data_config.max_seq_window_len = max_window_len
input_data = [{'aliases': ['alias3', 'alias4', 'alias3'], 'qids': ['Q1', 'Q4', 'Q1'], 'sent_idx_unq': 0, 'sentence': 'alias3 alias4 alias3', 'char_spans': [[0, 6], [7, 13], [14, 20]], 'gold': [False, False, False]}]
gold_cand_idx_train = [[0, 0, 0]]
gold_cand_idx = [[-1, -1, -1]]
use_weak_label = True
(X_dict, Y_dict) = (defaultdict(list), defaultdict(list))
for (i, inp) in enumerate(input_data):
guids = get_uniq_ids(i, len(inp['aliases']), self.guid_dtype)
for j in range(len(inp['aliases'])):
if use_weak_label is False and inp['gold'][j] is False:
continue
X_dict['guids'].append(guids[j])
(tok_sent, new_span) = adjust_sentence(inp['sentence'], max_seq_len, max_window_len, inp['char_spans'][j], self.tokenizer)
for k in tok_sent:
X_dict[k].append(tok_sent[k])
X_dict['sent_idx'].append(i)
X_dict['subsent_idx'].append(j)
if inp['aliases'][j] not in self.entity_symbols.get_all_aliases():
alias_idx = -2
else:
alias_idx = self.entity_symbols.get_alias_idx(inp['aliases'][j])
X_dict['alias_idx'].append(alias_idx)
X_dict['alias_orig_list_pos'].append(j)
if gold_cand_idx[i][j] != -1:
gold_eid = self.entity_symbols.get_eid(inp['qids'][j])
else:
gold_eid = -1
if gold_cand_idx[i][j] == 0 and (not self.args.data_config.train_in_candidates):
gold_eid = 0
X_dict['gold_eid'].append(gold_eid)
X_dict['for_dump_gold_eid'].append(self.entity_symbols.get_eid(inp['qids'][j]))
word_mask_scores = [-1 for _ in range(len(tok_sent['input_ids']))]
new_span_start = tok_sent.char_to_token(new_span[0]) + 1
if tok_sent.char_to_token(new_span[1] - 1) is None:
new_span_end = len(tok_sent['input_ids'])
else:
new_span_end = tok_sent.char_to_token(new_span[1] - 1)
word_mask_scores[new_span_start:new_span_end] = [1 for _ in range(new_span_start, new_span_end)]
X_dict['word_qid_cnt_mask_score'].append(word_mask_scores)
X_dict['for_dump_gold_cand_K_idx_train'].append(gold_cand_idx_train[i][j])
Y_dict['gold_cand_K_idx'].append(gold_cand_idx[i][j])
for k in X_dict:
if k == 'guids':
X_dict[k] = np.array(X_dict[k])
else:
X_dict[k] = torch.tensor(X_dict[k])
for k in Y_dict:
Y_dict[k] = torch.tensor(Y_dict[k])
(X_dict, Y_dict) = (X_dict, Y_dict)
utils.write_jsonl(self.temp_file_name, input_data)
dataset = BootlegDataset(self.args, name='Bootleg_test', dataset=self.temp_file_name, use_weak_label=use_weak_label, load_entity_data=False, tokenizer=self.tokenizer, entity_symbols=self.entity_symbols, dataset_threads=1, split=split, is_bert=True)
for k in X_dict:
assert k in dataset.X_dict, f'Key is {k}'
if type(X_dict[k]) is torch.Tensor:
assert torch.allclose(X_dict[k].float(), dataset.X_dict[k].float()), f'Key is {k}'
elif type(X_dict[k]) is np.ndarray:
np.testing.assert_array_equal(X_dict[k], dataset.X_dict[k])
elif type(X_dict[k]) is list:
assert len(X_dict[k]) == len(dataset.X_dict[k]), f'Key is {k}'
for (item_l, item_r) in zip(X_dict[k], dataset.X_dict[k]):
if type(item_l) is np.ndarray:
for (subitem_l, subitem_r) in zip(item_l.tolist()[0], item_r.tolist()):
if type(subitem_l) is np.ndarray:
assert all(subitem_l == subitem_r), f'Key is {k}'
else:
assert subitem_l == subitem_r, f'Key is {k}'
else:
assert item_l == item_r, f'Key is {k}'
else:
assert X_dict[k] == dataset.X_dict[k], f'Key is {k}'
for k in dataset.X_dict:
assert k in X_dict, f'Key is {k}'
for k in Y_dict:
assert k in dataset.Y_dict, f'Key is {k}'
if type(Y_dict[k]) is torch.Tensor:
assert torch.allclose(Y_dict[k].float(), dataset.Y_dict[k].float()), f'Key is {k}'
elif type(Y_dict[k]) is np.ndarray:
np.testing.assert_array_equal(Y_dict[k], dataset.Y_dict[k])
elif type(Y_dict[k]) is list:
assert len(Y_dict[k]) == len(dataset.Y_dict[k]), f'Key is {k}'
for (item_l, item_r) in zip(Y_dict[k], dataset.Y_dict[k]):
if type(item_l) is np.ndarray:
for (subitem_l, subitem_r) in zip(item_l.tolist()[0], item_r.tolist()):
if type(subitem_l) is np.ndarray:
assert all(subitem_l == subitem_r), f'Key is {k}'
else:
assert subitem_l == subitem_r, f'Key is {k}'
else:
assert item_l == item_r, f'Key is {k}'
else:
assert Y_dict[k] == dataset.Y_dict[k], f'Key is {k}'
for k in dataset.Y_dict:
assert k in Y_dict, f'Key is {k}'
</DeepExtract>
|
bootleg
|
positive
|
def _sock_recv(self, bufsize):
try:
return self._sock.recv(bufsize)
except ssl.SSLWantReadError:
raise BlockingIOError
except ssl.SSLWantWriteError:
<DeepExtract>
if not self._sock or self._registered_write:
return
self._registered_write = True
with self._callback_mutex:
on_socket_register_write = self.on_socket_register_write
if on_socket_register_write:
try:
on_socket_register_write(self, self._userdata, self._sock)
except Exception as err:
self._easy_log(MQTT_LOG_ERR, 'Caught exception in on_socket_register_write: %s', err)
if not self.suppress_exceptions:
raise
</DeepExtract>
raise BlockingIOError
|
def _sock_recv(self, bufsize):
try:
return self._sock.recv(bufsize)
except ssl.SSLWantReadError:
raise BlockingIOError
except ssl.SSLWantWriteError:
if not self._sock or self._registered_write:
return
self._registered_write = True
with self._callback_mutex:
on_socket_register_write = self.on_socket_register_write
if on_socket_register_write:
try:
on_socket_register_write(self, self._userdata, self._sock)
except Exception as err:
self._easy_log(MQTT_LOG_ERR, 'Caught exception in on_socket_register_write: %s', err)
if not self.suppress_exceptions:
raise
raise BlockingIOError
|
depthai-experiments
|
positive
|
def load_test_model(opt, model_path=None):
if model_path is None:
model_path = opt.models[0]
checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)
model_opt = ArgumentParser.ckpt_model_opts(checkpoint['opt'])
ArgumentParser.update_model_opts(model_opt)
ArgumentParser.validate_model_opts(model_opt)
vocab = checkpoint['vocab']
if inputters.old_style_vocab(vocab):
fields = inputters.load_old_vocab(vocab, opt.data_type, dynamic_dict=model_opt.copy_attn)
else:
fields = vocab
<DeepExtract>
if model_opt.model_type == 'text':
src_field = fields['src']
src_emb = build_embeddings(model_opt, src_field)
else:
src_emb = None
encoder = build_encoder(model_opt, src_emb)
tgt_field = fields['tgt']
tgt_emb = build_embeddings(model_opt, tgt_field, for_encoder=False)
if model_opt.share_embeddings:
assert src_field.base_field.vocab == tgt_field.base_field.vocab, 'preprocess with -share_vocab if you use share_embeddings'
tgt_emb.word_lut.weight = src_emb.word_lut.weight
if model_opt.share_position_embeddings:
tgt_emb.make_embedding.pe.pe.weight = src_emb.make_embedding.pe.pe.weight
decoder = build_decoder(model_opt, tgt_emb)
if use_gpu(opt) and opt.gpu is not None:
device = torch.device('cuda', opt.gpu)
elif use_gpu(opt) and (not opt.gpu):
device = torch.device('cuda')
elif not use_gpu(opt):
device = torch.device('cpu')
if model_opt.simple_fusion:
layers = 12
size = 768
heads = 12
lm_decoder_opt = copy.deepcopy(model_opt)
lm_decoder_opt.dec_layers = layers
lm_decoder_opt.use_GPT_version_ctxattn = False
lm_decoder_opt.use_GPT_version_psa = False
lm_decoder_opt.use_GPT_version_unconditional = True
lm_decoder_opt.tgt_word_vec_size = size
lm_decoder_opt.rnn_size = size
lm_decoder_opt.dec_rnn_size = size
lm_decoder_opt.transformer_ff = size * 4
lm_decoder_opt.dec_heads = heads
lm_decoder_opt.position_encoding_learned_dec = True
lm_decoder_opt.share_decoder_embeddings = True
lm_decoder_opt.dropout = 0
lm_decoder_emb = build_embeddings(lm_decoder_opt, tgt_field, for_encoder=False)
logger.info(lm_decoder_emb)
lm_decoder = build_decoder(lm_decoder_opt, lm_decoder_emb)
load_decoder = lm_decoder
model = onmt.models.SimpleFusionModel(encoder, decoder, lm_decoder)
generator = SimpleFusionGenerator(model_opt.dec_rnn_size, lm_decoder_opt.dec_rnn_size, len(fields['tgt'].base_field.vocab))
generator.lm_linear.weight = lm_decoder.embeddings.word_lut.weight
if model_opt.share_decoder_embeddings:
generator.decoder_linear.weight = decoder.embeddings.word_lut.weight
gen_linear = generator.lm_linear
else:
load_decoder = decoder
if model_opt.unconditional:
model = onmt.models.UncondModel(decoder)
else:
model = onmt.models.NMTModel(encoder, decoder)
if not model_opt.copy_attn:
if model_opt.generator_function == 'sparsemax':
gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1)
else:
gen_func = nn.LogSoftmax(dim=-1)
if model_opt.padded_vocab_fix_me_later:
gen_func = nn.Sequential(PadGen(), gen_func)
generator = nn.Sequential(nn.Linear(model_opt.dec_rnn_size, len(fields['tgt'].base_field.vocab)), Cast(torch.float32), gen_func)
if model_opt.share_decoder_embeddings:
generator[0].weight = decoder.embeddings.word_lut.weight
gen_linear = generator[0]
else:
tgt_base_field = fields['tgt'].base_field
vocab_size = len(tgt_base_field.vocab)
pad_idx = tgt_base_field.vocab.stoi[tgt_base_field.pad_token]
generator = CopyGenerator(model_opt.dec_rnn_size, vocab_size, pad_idx)
if model_opt.share_decoder_embeddings:
generator.linear.weight = decoder.embeddings.word_lut.weight
gen_linear = generator.linear
if model_opt.encdec_share_params:
for (name, p) in decoder.named_parameters():
if 'ctx' in name or 'context' in name:
continue
pointer = encoder
attrs = name.split('.')
for attr_name in attrs[:-1]:
pointer = getattr(pointer, attr_name)
setattr(pointer, attrs[-1], p)
if checkpoint is not None:
if 'gpt2_params' not in checkpoint and 'enc_model' not in checkpoint:
def fix_key(s):
s = re.sub('(.*)\\.layer_norm((_\\d+)?)\\.b_2', '\\1.layer_norm\\2.bias', s)
s = re.sub('(.*)\\.layer_norm((_\\d+)?)\\.a_2', '\\1.layer_norm\\2.weight', s)
model = s
checkpoint['model'] = {fix_key(k): v for (k, v) in checkpoint['model'].items()}
if hasattr(model_opt, 'load_uncond_from') and model_opt.load_uncond_from:
for p in decoder.parameters():
if p.dim() > 1:
xavier_uniform_(p)
for p in encoder.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if model_opt.ctx_weight_param:
for (name, p) in decoder.named_parameters():
if 'ctx_weight' in name:
p.data.zero_()
if 'ctx_bias' in name:
p.data.fill_(-10)
model.load_state_dict(checkpoint['model'], strict=False)
generator.load_state_dict(checkpoint['generator'], strict=False)
else:
if 'gpt2_params' in checkpoint:
init_something = model_opt.gpt2_init_embanddec or model_opt.simple_fusion or model_opt.gpt2_init_embandenc or (model_opt.GPT_representation_mode != 'none')
if init_something:
if model_opt.gpt2_init_zero:
for p in decoder.parameters():
p.data.zero_()
if model_opt.simple_fusion:
generator.decoder_linear.weight.data.zero_()
generator.decoder_linear.bias.data.zero_()
else:
for p in decoder.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if encoder is not None:
for p in encoder.parameters():
if p.dim() > 1:
xavier_uniform_(p)
for p in generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if model_opt.zero_bias_init:
gen_linear.bias.data.zero_()
if model_opt.ctx_weight_param:
for (name, p) in decoder.named_parameters():
if 'ctx_weight' in name:
p.data.zero_()
if 'ctx_bias' in name:
p.data.fill_(-10)
gen_linear.bias.data.zero_()
load_models = []
if model_opt.GPT_representation_mode != 'none':
load_embs = []
if model_opt.GPT_representation_loc in ['both', 'src']:
load_models.append(src_emb.gpt_model)
load_embs.append(src_emb)
if model_opt.GPT_representation_loc in ['both', 'tgt']:
load_models.append(tgt_emb.gpt_model)
load_embs.append(tgt_emb)
elif model_opt.gpt2_init_embanddec or model_opt.simple_fusion:
load_models = [load_decoder]
elif model_opt.gpt2_init_embandenc:
load_models = [encoder]
it_list = list(checkpoint['gpt2_params'])
for (lm_idx, load_model) in enumerate(load_models):
for (name, array) in it_list:
name = name[6:]
name = name.split('/')
assigned = False
if name[0] == 'wpe':
if model_opt.GPT_representation_mode != 'none':
pointer = load_embs[lm_idx].make_embedding.pe.pe.weight
else:
pointer = load_model.embeddings.make_embedding.pe.pe.weight
elif name[0] == 'wte':
if model_opt.GPT_representation_mode != 'none':
pointer = [load_embs[lm_idx].make_embedding.emb_luts[0].weight, gen_linear.weight]
else:
pointer = [load_model.embeddings.make_embedding.emb_luts[0].weight]
if not model_opt.nopretrain_decemb:
pointer.append(gen_linear.weight)
if model_opt.simple_fusion and model_opt.sf_pretrain_dec_emb:
pointer.append(decoder.embeddings.make_embedding.emb_luts[0].weight)
elif name[0] == 'ln_f':
if name[1] == 'g':
pointer = load_model.layer_norm.weight
elif name[1] == 'b':
pointer = load_model.layer_norm.bias
else:
raise ValueError('I am missing something here!')
elif name[0][0] == 'h':
layer_num = name[0][1:]
pointer = getattr(load_model.transformer_layers, layer_num)
if name[1] == 'attn':
assigned = True
pointer = pointer.self_attn
full_data = torch.from_numpy(array)
if name[2] == 'c_attn':
end_size = full_data.shape[-1] // 3
assert full_data.shape[-1] % 3 == 0
if name[3] == 'b':
if init_something:
pointer.linear_query.bias.data = full_data[:end_size]
pointer.linear_keys.bias.data = full_data[end_size:end_size * 2]
pointer.linear_values.bias.data = full_data[end_size * 2:]
if model_opt.gpt2_params_std > 0:
pointer.linear_query.bias.orig = full_data[:end_size].clone()
pointer.linear_keys.bias.orig = full_data[end_size:end_size * 2].clone()
pointer.linear_values.bias.orig = full_data[end_size * 2:].clone()
elif name[3] == 'w':
if init_something:
pointer.linear_query.weight.data = full_data[:, :end_size].t().contiguous()
pointer.linear_keys.weight.data = full_data[:, end_size:end_size * 2].t().contiguous()
pointer.linear_values.weight.data = full_data[:, end_size * 2:].t().contiguous()
if model_opt.gpt2_params_std > 0:
pointer.linear_query.weight.orig = full_data[:, :end_size].t().contiguous().clone()
pointer.linear_keys.weight.orig = full_data[:, end_size:end_size * 2].t().contiguous().clone()
pointer.linear_values.weight.orig = full_data[:, end_size * 2:].t().contiguous().clone()
else:
raise ValueError('I am missing something here!')
elif name[2] == 'c_proj':
if name[3] == 'b':
if init_something:
pointer.final_linear.bias.data = full_data
if model_opt.gpt2_params_std > 0:
pointer.final_linear.bias.orig = full_data.clone()
elif name[3] == 'w':
if init_something:
pointer.final_linear.weight.data = full_data.t().contiguous()
if model_opt.gpt2_params_std > 0:
pointer.final_linear.weight.orig = full_data.t().contiguous().clone()
else:
raise ValueError('I am missing something here!')
elif name[1] == 'ln_1' or name[1] == 'ln_2':
num = name[1][3]
pointer = getattr(pointer, 'layer_norm_' + num)
if name[2] == 'b':
pointer = pointer.bias
elif name[2] == 'g':
pointer = pointer.weight
else:
raise ValueError('I am missing something here!')
elif name[1] == 'mlp':
pointer = pointer.feed_forward
pointer = getattr(pointer, name[2])
if name[3] == 'b':
pointer = pointer.bias
elif name[3] == 'w':
pointer = pointer.weight
else:
raise ValueError('I am missing something here!')
else:
raise ValueError('I am missing something here!')
else:
raise ValueError('I am missing something here!')
if not assigned:
if name[-1] == 'w' or name[-1] == 'g':
array = array.T
if not isinstance(pointer, list):
pointer = [pointer]
for pointer_i in pointer:
target_size = int(math.ceil(array.shape[0] / 8)) * 8
padded_vocab = name[0] == 'wte' and pointer_i.shape[0] == target_size
padded_vocab = padded_vocab and pointer_i.shape[1:] == array.shape[1:]
try:
assert pointer_i.shape == array.shape or padded_vocab
except AssertionError as e:
e.args += (pointer_i.shape, array.shape)
raise
if init_something:
print('Initialize PyTorch weight {}'.format(name))
if padded_vocab:
pointer_i.data[:array.shape[0]] = torch.from_numpy(array)
else:
pointer_i.data = torch.from_numpy(array)
if model_opt.gpt2_params_std > 0:
if padded_vocab:
raise NotImplementedError
else:
pointer_i.orig = torch.from_numpy(array).clone()
if 'enc_model' in checkpoint:
load_dict = {k[8:]: v for (k, v) in checkpoint['enc_model'] if 'encoder' in k}
encoder.load_state_dict(load_dict, strict=True)
else:
if model_opt.param_init != 0.0:
for p in model.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
for p in generator.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
if model_opt.param_init_glorot:
for p in model.parameters():
if p.dim() > 1:
xavier_uniform_(p)
for p in generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if not model_opt.unconditional and hasattr(model.encoder, 'embeddings') and (model.encoder.embeddings is not None):
model.encoder.embeddings.load_pretrained_vectors(model_opt.pre_word_vecs_enc)
if hasattr(model.decoder, 'embeddings'):
model.decoder.embeddings.load_pretrained_vectors(model_opt.pre_word_vecs_dec)
if model_opt.notrain_emb or model_opt.notrain_embanddec:
if model_opt.position_encoding_learned_enc and model_opt.share_position_embeddings:
model.encoder.embeddings.make_embedding.pe.pe.weight.requires_grad = False
if model_opt.share_embeddings:
model.encoder.embeddings.make_embedding.emb_luts[0].weight.requires_grad = False
model.decoder.embeddings.make_embedding.pe.pe.weight.requires_grad = False
model.decoder.embeddings.make_embedding.emb_luts[0].weight.requires_grad = False
generator[0].weight.requires_grad = False
if model_opt.notrain_genbias:
generator[0].bias.requires_grad = False
if model_opt.notrain_embanddec:
for (name, p) in load_decoder.layer_norm.named_parameters():
p.requires_grad = False
for (name, p) in load_decoder.transformer_layers.named_parameters():
if 'context' not in name and 'ctx' not in name:
p.requires_grad = False
if model_opt.onlytrainln:
for (name, p) in model.decoder.named_parameters():
if 'layer_norm' not in name:
p.requires_grad = False
for p in generator.parameters():
p.requires_grad = False
if model_opt.onlytrainoutp:
if model_opt.share_decoder_embeddings:
raise ValueError
for p in model.decoder.parameters():
p.requires_grad = False
if model_opt.simple_fusion:
for p in lm_decoder.parameters():
p.requires_grad = False
for p in generator.lm_linear.parameters():
p.requires_grad = False
model.generator = generator
model.to(device)
if model_opt.model_dtype == 'fp16':
model.half()
for p in model.parameters():
if hasattr(p, 'orig'):
p.orig = p.orig.to(device)
if model_opt.model_dtype == 'fp16':
p.orig = p.orig.half()
model = model
</DeepExtract>
if opt.fp32:
model.float()
model.eval()
model.generator.eval()
return (fields, model, model_opt)
|
def load_test_model(opt, model_path=None):
if model_path is None:
model_path = opt.models[0]
checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)
model_opt = ArgumentParser.ckpt_model_opts(checkpoint['opt'])
ArgumentParser.update_model_opts(model_opt)
ArgumentParser.validate_model_opts(model_opt)
vocab = checkpoint['vocab']
if inputters.old_style_vocab(vocab):
fields = inputters.load_old_vocab(vocab, opt.data_type, dynamic_dict=model_opt.copy_attn)
else:
fields = vocab
if model_opt.model_type == 'text':
src_field = fields['src']
src_emb = build_embeddings(model_opt, src_field)
else:
src_emb = None
encoder = build_encoder(model_opt, src_emb)
tgt_field = fields['tgt']
tgt_emb = build_embeddings(model_opt, tgt_field, for_encoder=False)
if model_opt.share_embeddings:
assert src_field.base_field.vocab == tgt_field.base_field.vocab, 'preprocess with -share_vocab if you use share_embeddings'
tgt_emb.word_lut.weight = src_emb.word_lut.weight
if model_opt.share_position_embeddings:
tgt_emb.make_embedding.pe.pe.weight = src_emb.make_embedding.pe.pe.weight
decoder = build_decoder(model_opt, tgt_emb)
if use_gpu(opt) and opt.gpu is not None:
device = torch.device('cuda', opt.gpu)
elif use_gpu(opt) and (not opt.gpu):
device = torch.device('cuda')
elif not use_gpu(opt):
device = torch.device('cpu')
if model_opt.simple_fusion:
layers = 12
size = 768
heads = 12
lm_decoder_opt = copy.deepcopy(model_opt)
lm_decoder_opt.dec_layers = layers
lm_decoder_opt.use_GPT_version_ctxattn = False
lm_decoder_opt.use_GPT_version_psa = False
lm_decoder_opt.use_GPT_version_unconditional = True
lm_decoder_opt.tgt_word_vec_size = size
lm_decoder_opt.rnn_size = size
lm_decoder_opt.dec_rnn_size = size
lm_decoder_opt.transformer_ff = size * 4
lm_decoder_opt.dec_heads = heads
lm_decoder_opt.position_encoding_learned_dec = True
lm_decoder_opt.share_decoder_embeddings = True
lm_decoder_opt.dropout = 0
lm_decoder_emb = build_embeddings(lm_decoder_opt, tgt_field, for_encoder=False)
logger.info(lm_decoder_emb)
lm_decoder = build_decoder(lm_decoder_opt, lm_decoder_emb)
load_decoder = lm_decoder
model = onmt.models.SimpleFusionModel(encoder, decoder, lm_decoder)
generator = SimpleFusionGenerator(model_opt.dec_rnn_size, lm_decoder_opt.dec_rnn_size, len(fields['tgt'].base_field.vocab))
generator.lm_linear.weight = lm_decoder.embeddings.word_lut.weight
if model_opt.share_decoder_embeddings:
generator.decoder_linear.weight = decoder.embeddings.word_lut.weight
gen_linear = generator.lm_linear
else:
load_decoder = decoder
if model_opt.unconditional:
model = onmt.models.UncondModel(decoder)
else:
model = onmt.models.NMTModel(encoder, decoder)
if not model_opt.copy_attn:
if model_opt.generator_function == 'sparsemax':
gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1)
else:
gen_func = nn.LogSoftmax(dim=-1)
if model_opt.padded_vocab_fix_me_later:
gen_func = nn.Sequential(PadGen(), gen_func)
generator = nn.Sequential(nn.Linear(model_opt.dec_rnn_size, len(fields['tgt'].base_field.vocab)), Cast(torch.float32), gen_func)
if model_opt.share_decoder_embeddings:
generator[0].weight = decoder.embeddings.word_lut.weight
gen_linear = generator[0]
else:
tgt_base_field = fields['tgt'].base_field
vocab_size = len(tgt_base_field.vocab)
pad_idx = tgt_base_field.vocab.stoi[tgt_base_field.pad_token]
generator = CopyGenerator(model_opt.dec_rnn_size, vocab_size, pad_idx)
if model_opt.share_decoder_embeddings:
generator.linear.weight = decoder.embeddings.word_lut.weight
gen_linear = generator.linear
if model_opt.encdec_share_params:
for (name, p) in decoder.named_parameters():
if 'ctx' in name or 'context' in name:
continue
pointer = encoder
attrs = name.split('.')
for attr_name in attrs[:-1]:
pointer = getattr(pointer, attr_name)
setattr(pointer, attrs[-1], p)
if checkpoint is not None:
if 'gpt2_params' not in checkpoint and 'enc_model' not in checkpoint:
def fix_key(s):
s = re.sub('(.*)\\.layer_norm((_\\d+)?)\\.b_2', '\\1.layer_norm\\2.bias', s)
s = re.sub('(.*)\\.layer_norm((_\\d+)?)\\.a_2', '\\1.layer_norm\\2.weight', s)
model = s
checkpoint['model'] = {fix_key(k): v for (k, v) in checkpoint['model'].items()}
if hasattr(model_opt, 'load_uncond_from') and model_opt.load_uncond_from:
for p in decoder.parameters():
if p.dim() > 1:
xavier_uniform_(p)
for p in encoder.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if model_opt.ctx_weight_param:
for (name, p) in decoder.named_parameters():
if 'ctx_weight' in name:
p.data.zero_()
if 'ctx_bias' in name:
p.data.fill_(-10)
model.load_state_dict(checkpoint['model'], strict=False)
generator.load_state_dict(checkpoint['generator'], strict=False)
else:
if 'gpt2_params' in checkpoint:
init_something = model_opt.gpt2_init_embanddec or model_opt.simple_fusion or model_opt.gpt2_init_embandenc or (model_opt.GPT_representation_mode != 'none')
if init_something:
if model_opt.gpt2_init_zero:
for p in decoder.parameters():
p.data.zero_()
if model_opt.simple_fusion:
generator.decoder_linear.weight.data.zero_()
generator.decoder_linear.bias.data.zero_()
else:
for p in decoder.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if encoder is not None:
for p in encoder.parameters():
if p.dim() > 1:
xavier_uniform_(p)
for p in generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if model_opt.zero_bias_init:
gen_linear.bias.data.zero_()
if model_opt.ctx_weight_param:
for (name, p) in decoder.named_parameters():
if 'ctx_weight' in name:
p.data.zero_()
if 'ctx_bias' in name:
p.data.fill_(-10)
gen_linear.bias.data.zero_()
load_models = []
if model_opt.GPT_representation_mode != 'none':
load_embs = []
if model_opt.GPT_representation_loc in ['both', 'src']:
load_models.append(src_emb.gpt_model)
load_embs.append(src_emb)
if model_opt.GPT_representation_loc in ['both', 'tgt']:
load_models.append(tgt_emb.gpt_model)
load_embs.append(tgt_emb)
elif model_opt.gpt2_init_embanddec or model_opt.simple_fusion:
load_models = [load_decoder]
elif model_opt.gpt2_init_embandenc:
load_models = [encoder]
it_list = list(checkpoint['gpt2_params'])
for (lm_idx, load_model) in enumerate(load_models):
for (name, array) in it_list:
name = name[6:]
name = name.split('/')
assigned = False
if name[0] == 'wpe':
if model_opt.GPT_representation_mode != 'none':
pointer = load_embs[lm_idx].make_embedding.pe.pe.weight
else:
pointer = load_model.embeddings.make_embedding.pe.pe.weight
elif name[0] == 'wte':
if model_opt.GPT_representation_mode != 'none':
pointer = [load_embs[lm_idx].make_embedding.emb_luts[0].weight, gen_linear.weight]
else:
pointer = [load_model.embeddings.make_embedding.emb_luts[0].weight]
if not model_opt.nopretrain_decemb:
pointer.append(gen_linear.weight)
if model_opt.simple_fusion and model_opt.sf_pretrain_dec_emb:
pointer.append(decoder.embeddings.make_embedding.emb_luts[0].weight)
elif name[0] == 'ln_f':
if name[1] == 'g':
pointer = load_model.layer_norm.weight
elif name[1] == 'b':
pointer = load_model.layer_norm.bias
else:
raise ValueError('I am missing something here!')
elif name[0][0] == 'h':
layer_num = name[0][1:]
pointer = getattr(load_model.transformer_layers, layer_num)
if name[1] == 'attn':
assigned = True
pointer = pointer.self_attn
full_data = torch.from_numpy(array)
if name[2] == 'c_attn':
end_size = full_data.shape[-1] // 3
assert full_data.shape[-1] % 3 == 0
if name[3] == 'b':
if init_something:
pointer.linear_query.bias.data = full_data[:end_size]
pointer.linear_keys.bias.data = full_data[end_size:end_size * 2]
pointer.linear_values.bias.data = full_data[end_size * 2:]
if model_opt.gpt2_params_std > 0:
pointer.linear_query.bias.orig = full_data[:end_size].clone()
pointer.linear_keys.bias.orig = full_data[end_size:end_size * 2].clone()
pointer.linear_values.bias.orig = full_data[end_size * 2:].clone()
elif name[3] == 'w':
if init_something:
pointer.linear_query.weight.data = full_data[:, :end_size].t().contiguous()
pointer.linear_keys.weight.data = full_data[:, end_size:end_size * 2].t().contiguous()
pointer.linear_values.weight.data = full_data[:, end_size * 2:].t().contiguous()
if model_opt.gpt2_params_std > 0:
pointer.linear_query.weight.orig = full_data[:, :end_size].t().contiguous().clone()
pointer.linear_keys.weight.orig = full_data[:, end_size:end_size * 2].t().contiguous().clone()
pointer.linear_values.weight.orig = full_data[:, end_size * 2:].t().contiguous().clone()
else:
raise ValueError('I am missing something here!')
elif name[2] == 'c_proj':
if name[3] == 'b':
if init_something:
pointer.final_linear.bias.data = full_data
if model_opt.gpt2_params_std > 0:
pointer.final_linear.bias.orig = full_data.clone()
elif name[3] == 'w':
if init_something:
pointer.final_linear.weight.data = full_data.t().contiguous()
if model_opt.gpt2_params_std > 0:
pointer.final_linear.weight.orig = full_data.t().contiguous().clone()
else:
raise ValueError('I am missing something here!')
elif name[1] == 'ln_1' or name[1] == 'ln_2':
num = name[1][3]
pointer = getattr(pointer, 'layer_norm_' + num)
if name[2] == 'b':
pointer = pointer.bias
elif name[2] == 'g':
pointer = pointer.weight
else:
raise ValueError('I am missing something here!')
elif name[1] == 'mlp':
pointer = pointer.feed_forward
pointer = getattr(pointer, name[2])
if name[3] == 'b':
pointer = pointer.bias
elif name[3] == 'w':
pointer = pointer.weight
else:
raise ValueError('I am missing something here!')
else:
raise ValueError('I am missing something here!')
else:
raise ValueError('I am missing something here!')
if not assigned:
if name[-1] == 'w' or name[-1] == 'g':
array = array.T
if not isinstance(pointer, list):
pointer = [pointer]
for pointer_i in pointer:
target_size = int(math.ceil(array.shape[0] / 8)) * 8
padded_vocab = name[0] == 'wte' and pointer_i.shape[0] == target_size
padded_vocab = padded_vocab and pointer_i.shape[1:] == array.shape[1:]
try:
assert pointer_i.shape == array.shape or padded_vocab
except AssertionError as e:
e.args += (pointer_i.shape, array.shape)
raise
if init_something:
print('Initialize PyTorch weight {}'.format(name))
if padded_vocab:
pointer_i.data[:array.shape[0]] = torch.from_numpy(array)
else:
pointer_i.data = torch.from_numpy(array)
if model_opt.gpt2_params_std > 0:
if padded_vocab:
raise NotImplementedError
else:
pointer_i.orig = torch.from_numpy(array).clone()
if 'enc_model' in checkpoint:
load_dict = {k[8:]: v for (k, v) in checkpoint['enc_model'] if 'encoder' in k}
encoder.load_state_dict(load_dict, strict=True)
else:
if model_opt.param_init != 0.0:
for p in model.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
for p in generator.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
if model_opt.param_init_glorot:
for p in model.parameters():
if p.dim() > 1:
xavier_uniform_(p)
for p in generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if not model_opt.unconditional and hasattr(model.encoder, 'embeddings') and (model.encoder.embeddings is not None):
model.encoder.embeddings.load_pretrained_vectors(model_opt.pre_word_vecs_enc)
if hasattr(model.decoder, 'embeddings'):
model.decoder.embeddings.load_pretrained_vectors(model_opt.pre_word_vecs_dec)
if model_opt.notrain_emb or model_opt.notrain_embanddec:
if model_opt.position_encoding_learned_enc and model_opt.share_position_embeddings:
model.encoder.embeddings.make_embedding.pe.pe.weight.requires_grad = False
if model_opt.share_embeddings:
model.encoder.embeddings.make_embedding.emb_luts[0].weight.requires_grad = False
model.decoder.embeddings.make_embedding.pe.pe.weight.requires_grad = False
model.decoder.embeddings.make_embedding.emb_luts[0].weight.requires_grad = False
generator[0].weight.requires_grad = False
if model_opt.notrain_genbias:
generator[0].bias.requires_grad = False
if model_opt.notrain_embanddec:
for (name, p) in load_decoder.layer_norm.named_parameters():
p.requires_grad = False
for (name, p) in load_decoder.transformer_layers.named_parameters():
if 'context' not in name and 'ctx' not in name:
p.requires_grad = False
if model_opt.onlytrainln:
for (name, p) in model.decoder.named_parameters():
if 'layer_norm' not in name:
p.requires_grad = False
for p in generator.parameters():
p.requires_grad = False
if model_opt.onlytrainoutp:
if model_opt.share_decoder_embeddings:
raise ValueError
for p in model.decoder.parameters():
p.requires_grad = False
if model_opt.simple_fusion:
for p in lm_decoder.parameters():
p.requires_grad = False
for p in generator.lm_linear.parameters():
p.requires_grad = False
model.generator = generator
model.to(device)
if model_opt.model_dtype == 'fp16':
model.half()
for p in model.parameters():
if hasattr(p, 'orig'):
p.orig = p.orig.to(device)
if model_opt.model_dtype == 'fp16':
p.orig = p.orig.half()
model = model
if opt.fp32:
model.float()
model.eval()
model.generator.eval()
return (fields, model, model_opt)
|
encoder-agnostic-adaptation
|
positive
|
def test_register_with_features_makes_lookup_succeed(self):
<DeepExtract>
cls = type('Builder_' + '_'.join(feature_list), (object,), {'features': feature_list})
self.registry.register(cls)
builder = cls
</DeepExtract>
self.assertEqual(self.registry.lookup('foo'), builder)
self.assertEqual(self.registry.lookup('bar'), builder)
|
def test_register_with_features_makes_lookup_succeed(self):
cls = type('Builder_' + '_'.join(feature_list), (object,), {'features': feature_list})
self.registry.register(cls)
builder = cls
self.assertEqual(self.registry.lookup('foo'), builder)
self.assertEqual(self.registry.lookup('bar'), builder)
|
BeautifulSoup4
|
positive
|
def visitFrom(self, node):
<DeepExtract>
self.s.write('from ')
</DeepExtract>
<DeepExtract>
self.s.write(node.modname)
</DeepExtract>
<DeepExtract>
self.s.write(' import ')
</DeepExtract>
for (mod, as_word) in node.names:
<DeepExtract>
self.s.write(mod)
</DeepExtract>
if as_word is not None:
<DeepExtract>
self.s.write(' as ')
</DeepExtract>
<DeepExtract>
self.s.write(as_word)
</DeepExtract>
<DeepExtract>
self.s.write(', ')
</DeepExtract>
<DeepExtract>
self.s.write('\n')
self.s.write(' ' * 4 * self._i)
</DeepExtract>
|
def visitFrom(self, node):
self.s.write('from ')
self.s.write(node.modname)
self.s.write(' import ')
for (mod, as_word) in node.names:
self.s.write(mod)
if as_word is not None:
self.s.write(' as ')
self.s.write(as_word)
self.s.write(', ')
self.s.write('\n')
self.s.write(' ' * 4 * self._i)
</DeepExtract>
|
cheesecake
|
positive
|
def log_prob(self, x):
<DeepExtract>
(x, ldj) = LogisticDistribution.unshift_x(x, self.mu, self.sigma, self.log_sigma)
</DeepExtract>
logp = -ldj
assert torch.isnan(logp).sum() == 0, '[!] ERROR: Found NaN values in log-prob of distribution.\n' + 'NaN logp: ' + str(torch.isnan(logp).sum().item()) + '\n' + 'NaN x: ' + str(torch.isnan(x).sum().item()) + ', X(abs) max: ' + str(x.abs().max())
return logp
|
def log_prob(self, x):
(x, ldj) = LogisticDistribution.unshift_x(x, self.mu, self.sigma, self.log_sigma)
logp = -ldj
assert torch.isnan(logp).sum() == 0, '[!] ERROR: Found NaN values in log-prob of distribution.\n' + 'NaN logp: ' + str(torch.isnan(logp).sum().item()) + '\n' + 'NaN x: ' + str(torch.isnan(x).sum().item()) + ', X(abs) max: ' + str(x.abs().max())
return logp
|
CategoricalNF
|
positive
|
def save_checkpoint_and_serving_model(self, algorithm=None, env_string=None, config=None):
<DeepExtract>
with open(os.path.join(MODEL_OUTPUT_DIR, 'params.json'), 'w') as f:
json.dump(config, f, indent=2)
print('Saved model configuration.')
</DeepExtract>
<DeepExtract>
checkpoints = []
count = 0
while not checkpoints:
count += 1
for (root, directories, filenames) in os.walk(INTERMEDIATE_DIR):
for filename in filenames:
if filename.startswith('checkpoint'):
checkpoints.append(os.path.join(root, filename))
time.sleep(5)
if count >= 6:
raise RuntimeError('Failed to find checkpoint files')
checkpoints.sort(key=natural_keys)
latest_checkpoints = checkpoints[-2:]
validation = sum((1 if x.endswith('tune_metadata') or x.endswith('extra_data') else 0 for x in latest_checkpoints))
if ray.__version__ >= '0.6.5':
if validation is not 1:
raise RuntimeError('Failed to save checkpoint files - .tune_metadata')
elif validation is not 2:
raise RuntimeError('Failed to save checkpoint files - .tune_metadata or .extra_data')
for source_path in latest_checkpoints:
(_, ext) = os.path.splitext(source_path)
destination_path = os.path.join(MODEL_OUTPUT_DIR, 'checkpoint%s' % ext)
copyfile(source_path, destination_path)
print('Saved the checkpoint file %s as %s' % (source_path, destination_path))
</DeepExtract>
<DeepExtract>
self.register_env_creator()
if ray.__version__ >= '0.6.5':
from ray.rllib.agents.registry import get_agent_class
else:
from ray.rllib.agents.agent import get_agent_class
cls = get_agent_class(algorithm)
config['monitor'] = False
config['num_workers'] = 1
config['num_gpus'] = 0
agent = cls(env=env_string, config=config)
checkpoint = os.path.join(MODEL_OUTPUT_DIR, 'checkpoint')
agent.restore(checkpoint)
export_tf_serving(agent, MODEL_OUTPUT_DIR)
</DeepExtract>
change_permissions_recursive(INTERMEDIATE_DIR, 511)
change_permissions_recursive(MODEL_OUTPUT_DIR, 511)
|
def save_checkpoint_and_serving_model(self, algorithm=None, env_string=None, config=None):
with open(os.path.join(MODEL_OUTPUT_DIR, 'params.json'), 'w') as f:
json.dump(config, f, indent=2)
print('Saved model configuration.')
checkpoints = []
count = 0
while not checkpoints:
count += 1
for (root, directories, filenames) in os.walk(INTERMEDIATE_DIR):
for filename in filenames:
if filename.startswith('checkpoint'):
checkpoints.append(os.path.join(root, filename))
time.sleep(5)
if count >= 6:
raise RuntimeError('Failed to find checkpoint files')
checkpoints.sort(key=natural_keys)
latest_checkpoints = checkpoints[-2:]
validation = sum((1 if x.endswith('tune_metadata') or x.endswith('extra_data') else 0 for x in latest_checkpoints))
if ray.__version__ >= '0.6.5':
if validation is not 1:
raise RuntimeError('Failed to save checkpoint files - .tune_metadata')
elif validation is not 2:
raise RuntimeError('Failed to save checkpoint files - .tune_metadata or .extra_data')
for source_path in latest_checkpoints:
(_, ext) = os.path.splitext(source_path)
destination_path = os.path.join(MODEL_OUTPUT_DIR, 'checkpoint%s' % ext)
copyfile(source_path, destination_path)
print('Saved the checkpoint file %s as %s' % (source_path, destination_path))
self.register_env_creator()
if ray.__version__ >= '0.6.5':
from ray.rllib.agents.registry import get_agent_class
else:
from ray.rllib.agents.agent import get_agent_class
cls = get_agent_class(algorithm)
config['monitor'] = False
config['num_workers'] = 1
config['num_gpus'] = 0
agent = cls(env=env_string, config=config)
checkpoint = os.path.join(MODEL_OUTPUT_DIR, 'checkpoint')
agent.restore(checkpoint)
export_tf_serving(agent, MODEL_OUTPUT_DIR)
change_permissions_recursive(INTERMEDIATE_DIR, 511)
change_permissions_recursive(MODEL_OUTPUT_DIR, 511)
|
deepracer-local
|
positive
|
def __init__(self, path, type):
self.datapath = path
self.type = type
self.slash = '/'
if self.slash != self.datapath[-1]:
self.datapath = self.datapath + self.slash
self.dic_wavlist_thchs30 = {}
self.dic_symbollist_thchs30 = {}
self.symbolnum = 0
self.datanum = 0
self.wavs_data = []
self.list_wavnum_thchs30 = []
self.list_symbolnum_thchs30 = []
<DeepExtract>
if self.type == 'train':
filename_wavlist_thchs30 = 'thchs30' + self.slash + 'train.wav.lst'
filename_symbollist_thchs30 = 'thchs30' + self.slash + 'train.syllable.txt'
elif self.type == 'dev':
filename_wavlist_thchs30 = 'thchs30' + self.slash + 'cv.wav.lst'
filename_symbollist_thchs30 = 'thchs30' + self.slash + 'cv.syllable.txt'
elif self.type == 'test':
filename_wavlist_thchs30 = 'thchs30' + self.slash + 'test.wav.lst'
filename_symbollist_thchs30 = 'thchs30' + self.slash + 'test.syllable.txt'
else:
pass
(self.dic_wavlist_thchs30, self.list_wavnum_thchs30) = get_wav_list(self.datapath + filename_wavlist_thchs30)
(self.dic_symbollist_thchs30, self.list_symbolnum_thchs30) = get_wav_symbol(self.datapath + filename_symbollist_thchs30)
self.datanum = self.get_datanum()
</DeepExtract>
<DeepExtract>
list_symbol = []
with open('dict.txt', 'r') as fr:
lines = fr.readlines()
for line in lines:
res = line.split()
list_symbol.append(res[0])
list_symbol.append('_')
self.symbolnum = len(list_symbol)
self.list_symbol = list_symbol
</DeepExtract>
pass
|
def __init__(self, path, type):
self.datapath = path
self.type = type
self.slash = '/'
if self.slash != self.datapath[-1]:
self.datapath = self.datapath + self.slash
self.dic_wavlist_thchs30 = {}
self.dic_symbollist_thchs30 = {}
self.symbolnum = 0
self.datanum = 0
self.wavs_data = []
self.list_wavnum_thchs30 = []
self.list_symbolnum_thchs30 = []
if self.type == 'train':
filename_wavlist_thchs30 = 'thchs30' + self.slash + 'train.wav.lst'
filename_symbollist_thchs30 = 'thchs30' + self.slash + 'train.syllable.txt'
elif self.type == 'dev':
filename_wavlist_thchs30 = 'thchs30' + self.slash + 'cv.wav.lst'
filename_symbollist_thchs30 = 'thchs30' + self.slash + 'cv.syllable.txt'
elif self.type == 'test':
filename_wavlist_thchs30 = 'thchs30' + self.slash + 'test.wav.lst'
filename_symbollist_thchs30 = 'thchs30' + self.slash + 'test.syllable.txt'
else:
pass
(self.dic_wavlist_thchs30, self.list_wavnum_thchs30) = get_wav_list(self.datapath + filename_wavlist_thchs30)
(self.dic_symbollist_thchs30, self.list_symbolnum_thchs30) = get_wav_symbol(self.datapath + filename_symbollist_thchs30)
self.datanum = self.get_datanum()
list_symbol = []
with open('dict.txt', 'r') as fr:
lines = fr.readlines()
for line in lines:
res = line.split()
list_symbol.append(res[0])
list_symbol.append('_')
self.symbolnum = len(list_symbol)
self.list_symbol = list_symbol
pass
|
ASR_Syllable
|
positive
|
def forward_x8(self, x, forward_function):
def _transform(v, op):
if self.precision != 'single':
v = v.float()
v2np = v.data.cpu().numpy()
if op == 'v':
tfnp = v2np[:, :, :, ::-1].copy()
elif op == 'h':
tfnp = v2np[:, :, ::-1, :].copy()
elif op == 't':
tfnp = v2np.transpose((0, 1, 3, 2)).copy()
ret = torch.Tensor(tfnp).to(self.device)
if self.precision == 'half':
ret = ret.half()
return ret
lr_list = [x]
for tf in ('v', 'h', 't'):
lr_list.extend([_transform(t, tf) for t in lr_list])
sr_list = [forward_function(aug) for aug in lr_list]
for i in range(len(sr_list)):
if i > 3:
<DeepExtract>
if self.precision != 'single':
sr_list[i] = sr_list[i].float()
v2np = sr_list[i].data.cpu().numpy()
if 't' == 'v':
tfnp = v2np[:, :, :, ::-1].copy()
elif 't' == 'h':
tfnp = v2np[:, :, ::-1, :].copy()
elif 't' == 't':
tfnp = v2np.transpose((0, 1, 3, 2)).copy()
ret = torch.Tensor(tfnp).to(self.device)
if self.precision == 'half':
ret = ret.half()
sr_list[i] = ret
</DeepExtract>
if i % 4 > 1:
<DeepExtract>
if self.precision != 'single':
sr_list[i] = sr_list[i].float()
v2np = sr_list[i].data.cpu().numpy()
if 'h' == 'v':
tfnp = v2np[:, :, :, ::-1].copy()
elif 'h' == 'h':
tfnp = v2np[:, :, ::-1, :].copy()
elif 'h' == 't':
tfnp = v2np.transpose((0, 1, 3, 2)).copy()
ret = torch.Tensor(tfnp).to(self.device)
if self.precision == 'half':
ret = ret.half()
sr_list[i] = ret
</DeepExtract>
if i % 4 % 2 == 1:
<DeepExtract>
if self.precision != 'single':
sr_list[i] = sr_list[i].float()
v2np = sr_list[i].data.cpu().numpy()
if 'v' == 'v':
tfnp = v2np[:, :, :, ::-1].copy()
elif 'v' == 'h':
tfnp = v2np[:, :, ::-1, :].copy()
elif 'v' == 't':
tfnp = v2np.transpose((0, 1, 3, 2)).copy()
ret = torch.Tensor(tfnp).to(self.device)
if self.precision == 'half':
ret = ret.half()
sr_list[i] = ret
</DeepExtract>
output_cat = torch.cat(sr_list, dim=0)
output = output_cat.mean(dim=0, keepdim=True)
return output
|
def forward_x8(self, x, forward_function):
def _transform(v, op):
if self.precision != 'single':
v = v.float()
v2np = v.data.cpu().numpy()
if op == 'v':
tfnp = v2np[:, :, :, ::-1].copy()
elif op == 'h':
tfnp = v2np[:, :, ::-1, :].copy()
elif op == 't':
tfnp = v2np.transpose((0, 1, 3, 2)).copy()
ret = torch.Tensor(tfnp).to(self.device)
if self.precision == 'half':
ret = ret.half()
return ret
lr_list = [x]
for tf in ('v', 'h', 't'):
lr_list.extend([_transform(t, tf) for t in lr_list])
sr_list = [forward_function(aug) for aug in lr_list]
for i in range(len(sr_list)):
if i > 3:
if self.precision != 'single':
sr_list[i] = sr_list[i].float()
v2np = sr_list[i].data.cpu().numpy()
if 't' == 'v':
tfnp = v2np[:, :, :, ::-1].copy()
elif 't' == 'h':
tfnp = v2np[:, :, ::-1, :].copy()
elif 't' == 't':
tfnp = v2np.transpose((0, 1, 3, 2)).copy()
ret = torch.Tensor(tfnp).to(self.device)
if self.precision == 'half':
ret = ret.half()
sr_list[i] = ret
if i % 4 > 1:
if self.precision != 'single':
sr_list[i] = sr_list[i].float()
v2np = sr_list[i].data.cpu().numpy()
if 'h' == 'v':
tfnp = v2np[:, :, :, ::-1].copy()
elif 'h' == 'h':
tfnp = v2np[:, :, ::-1, :].copy()
elif 'h' == 't':
tfnp = v2np.transpose((0, 1, 3, 2)).copy()
ret = torch.Tensor(tfnp).to(self.device)
if self.precision == 'half':
ret = ret.half()
sr_list[i] = ret
if i % 4 % 2 == 1:
if self.precision != 'single':
sr_list[i] = sr_list[i].float()
v2np = sr_list[i].data.cpu().numpy()
if 'v' == 'v':
tfnp = v2np[:, :, :, ::-1].copy()
elif 'v' == 'h':
tfnp = v2np[:, :, ::-1, :].copy()
elif 'v' == 't':
tfnp = v2np.transpose((0, 1, 3, 2)).copy()
ret = torch.Tensor(tfnp).to(self.device)
if self.precision == 'half':
ret = ret.half()
sr_list[i] = ret
output_cat = torch.cat(sr_list, dim=0)
output = output_cat.mean(dim=0, keepdim=True)
return output
|
CVPR-2020-Semi-Low-Light
|
positive
|
def calculate_schedule_usage_for_period(schedule_name, start_dt, stop_dt=None, logger=None):
result = {}
def running_seconds(startdt, stopdt):
return max(int((stopdt - startdt).total_seconds()), 60)
def running_hours(startdt, stopdt):
return int(((stopdt - startdt).total_seconds() - 1) / 3600) + 1
def make_period(started_dt, stopped_dt):
running_period = {'begin': started_dt, 'end': stopped_dt, 'billing_hours': running_hours(started_dt, stopped_dt), 'billing_seconds': running_seconds(started_dt, stopped_dt)}
return running_period
self._logger = logger
stop = stop_dt or start_dt
if start_dt > stop:
raise ValueError(ERR_STOP_MUST_BE_LATER_OR_EQUAL_TO_START)
dt = start_dt if isinstance(start_dt, datetime) else datetime(start_dt.year, start_dt.month, start_dt.day)
config_data = ConfigDynamodbAdapter(self._table.name).config
while dt <= stop:
self._configuration = SchedulerConfigBuilder(logger=self._logger).build(config_data)
conf = configuration.SchedulerConfigBuilder(self._logger).build(config=config_data, dt=dt)
schedule = conf.get_schedule(schedule_name)
timeline = {dt.replace(hour=0, minute=0)}
for p in schedule.periods:
begintime = p['period'].begintime
endtime = p['period'].endtime
if begintime is None and endtime is None:
timeline.add(dt.replace(hour=0, minute=0))
timeline.add(dt.replace(hour=23, minute=59))
else:
if begintime:
timeline.add(dt.replace(hour=begintime.hour, minute=begintime.minute))
if endtime:
timeline.add(dt.replace(hour=endtime.hour, minute=endtime.minute))
running_periods = {}
started = None
starting_period = None
current_state = None
inst = as_namedtuple('Instance', {'instance_str': 'instance', 'allow_resize': False})
for tm in sorted(list(timeline)):
(desired_state, instance_type, period) = schedule.get_desired_state(inst, self._logger, tm, False)
if current_state != desired_state:
if desired_state == InstanceSchedule.STATE_RUNNING:
started = tm
current_state = InstanceSchedule.STATE_RUNNING
starting_period = period
elif desired_state == InstanceSchedule.STATE_STOPPED:
stopped = tm
(desired_state_with_adj_check, _, __) = schedule.get_desired_state(inst, self._logger, tm, True)
if desired_state_with_adj_check == InstanceSchedule.STATE_RUNNING:
stopped += timedelta(minutes=1)
if current_state == InstanceSchedule.STATE_RUNNING:
current_state = InstanceSchedule.STATE_STOPPED
<DeepExtract>
running_period = {'begin': started, 'end': stopped, 'billing_hours': running_hours(started, stopped), 'billing_seconds': running_seconds(started, stopped)}
running_periods[starting_period] = running_period
</DeepExtract>
if current_state == InstanceSchedule.STATE_RUNNING:
stopped = dt.replace(hour=23, minute=59) + timedelta(minutes=1)
<DeepExtract>
running_period = {'begin': started, 'end': stopped, 'billing_hours': running_hours(started, stopped), 'billing_seconds': running_seconds(started, stopped)}
running_periods[starting_period] = running_period
</DeepExtract>
result[str(dt.date())] = {'running_periods': running_periods, 'billing_seconds': sum([running_periods[ps]['billing_seconds'] for ps in running_periods]), 'billing_hours': sum([running_periods[ph]['billing_hours'] for ph in running_periods])}
dt += timedelta(days=1)
return {'schedule': schedule_name, 'usage': result}
|
def calculate_schedule_usage_for_period(schedule_name, start_dt, stop_dt=None, logger=None):
result = {}
def running_seconds(startdt, stopdt):
return max(int((stopdt - startdt).total_seconds()), 60)
def running_hours(startdt, stopdt):
return int(((stopdt - startdt).total_seconds() - 1) / 3600) + 1
def make_period(started_dt, stopped_dt):
running_period = {'begin': started_dt, 'end': stopped_dt, 'billing_hours': running_hours(started_dt, stopped_dt), 'billing_seconds': running_seconds(started_dt, stopped_dt)}
return running_period
self._logger = logger
stop = stop_dt or start_dt
if start_dt > stop:
raise ValueError(ERR_STOP_MUST_BE_LATER_OR_EQUAL_TO_START)
dt = start_dt if isinstance(start_dt, datetime) else datetime(start_dt.year, start_dt.month, start_dt.day)
config_data = ConfigDynamodbAdapter(self._table.name).config
while dt <= stop:
self._configuration = SchedulerConfigBuilder(logger=self._logger).build(config_data)
conf = configuration.SchedulerConfigBuilder(self._logger).build(config=config_data, dt=dt)
schedule = conf.get_schedule(schedule_name)
timeline = {dt.replace(hour=0, minute=0)}
for p in schedule.periods:
begintime = p['period'].begintime
endtime = p['period'].endtime
if begintime is None and endtime is None:
timeline.add(dt.replace(hour=0, minute=0))
timeline.add(dt.replace(hour=23, minute=59))
else:
if begintime:
timeline.add(dt.replace(hour=begintime.hour, minute=begintime.minute))
if endtime:
timeline.add(dt.replace(hour=endtime.hour, minute=endtime.minute))
running_periods = {}
started = None
starting_period = None
current_state = None
inst = as_namedtuple('Instance', {'instance_str': 'instance', 'allow_resize': False})
for tm in sorted(list(timeline)):
(desired_state, instance_type, period) = schedule.get_desired_state(inst, self._logger, tm, False)
if current_state != desired_state:
if desired_state == InstanceSchedule.STATE_RUNNING:
started = tm
current_state = InstanceSchedule.STATE_RUNNING
starting_period = period
elif desired_state == InstanceSchedule.STATE_STOPPED:
stopped = tm
(desired_state_with_adj_check, _, __) = schedule.get_desired_state(inst, self._logger, tm, True)
if desired_state_with_adj_check == InstanceSchedule.STATE_RUNNING:
stopped += timedelta(minutes=1)
if current_state == InstanceSchedule.STATE_RUNNING:
current_state = InstanceSchedule.STATE_STOPPED
running_period = {'begin': started, 'end': stopped, 'billing_hours': running_hours(started, stopped), 'billing_seconds': running_seconds(started, stopped)}
running_periods[starting_period] = running_period
if current_state == InstanceSchedule.STATE_RUNNING:
stopped = dt.replace(hour=23, minute=59) + timedelta(minutes=1)
running_period = {'begin': started, 'end': stopped, 'billing_hours': running_hours(started, stopped), 'billing_seconds': running_seconds(started, stopped)}
running_periods[starting_period] = running_period
result[str(dt.date())] = {'running_periods': running_periods, 'billing_seconds': sum([running_periods[ps]['billing_seconds'] for ps in running_periods]), 'billing_hours': sum([running_periods[ph]['billing_hours'] for ph in running_periods])}
dt += timedelta(days=1)
return {'schedule': schedule_name, 'usage': result}
|
aws-instance-scheduler
|
positive
|
def is_file_exists(self, bucket_name, key):
""" Check if file exists.
:type bucket_name: str
:type key: str
"""
<DeepExtract>
bucket = self.resource.Bucket(bucket_name)
result = [obj.key for obj in bucket.objects.all()]
keys = result
</DeepExtract>
if key in keys:
return True
|
def is_file_exists(self, bucket_name, key):
""" Check if file exists.
:type bucket_name: str
:type key: str
"""
bucket = self.resource.Bucket(bucket_name)
result = [obj.key for obj in bucket.objects.all()]
keys = result
if key in keys:
return True
|
aws-syndicate
|
positive
|
def enrich_model(base_model, pooling, dropout, reg, n_classes, params, verbose):
params = {} if params is None else params
<DeepExtract>
params['pooling'] = pooling if pooling is not None else params['pooling']
</DeepExtract>
<DeepExtract>
params['n_classes'] = n_classes if n_classes is not None else params['n_classes']
</DeepExtract>
x = base_model.layers[-1].output
if params['pooling'] == 'None':
x = Flatten()(x)
elif params['pooling'] == 'avg':
x = GlobalAveragePooling2D()(x)
elif params['pooling'] == 'max':
x = GlobalMaxPooling2D()(x)
if dropout is not None and dropout != 0.0:
x = Dropout(dropout)(x)
if verbose:
print('Adding dropout to model with rate: {}'.format(dropout))
regularizer = None
if reg is not None:
reg_l2 = reg['l2']
reg_l1 = reg['l1']
if reg_l1 != 0.0 and reg_l2 != 0.0:
regularizer = regularizers.l1_l2(l1=reg_l1, l2=reg_l2)
if reg_l1 == 0.0 and reg_l2 != 0.0:
regularizer = regularizers.l2(reg_l2)
if reg_l1 != 0.0 and reg_l2 == 0.0:
regularizer = regularizers.l1(reg_l1)
if verbose:
print('Using regularizer for model: {}'.format(reg))
predictions = Dense(params['n_classes'], activation='softmax', name='predictions', kernel_regularizer=regularizer)(x)
model = Model(input=base_model.input, output=predictions)
return (model, params)
|
def enrich_model(base_model, pooling, dropout, reg, n_classes, params, verbose):
params = {} if params is None else params
params['pooling'] = pooling if pooling is not None else params['pooling']
params['n_classes'] = n_classes if n_classes is not None else params['n_classes']
x = base_model.layers[-1].output
if params['pooling'] == 'None':
x = Flatten()(x)
elif params['pooling'] == 'avg':
x = GlobalAveragePooling2D()(x)
elif params['pooling'] == 'max':
x = GlobalMaxPooling2D()(x)
if dropout is not None and dropout != 0.0:
x = Dropout(dropout)(x)
if verbose:
print('Adding dropout to model with rate: {}'.format(dropout))
regularizer = None
if reg is not None:
reg_l2 = reg['l2']
reg_l1 = reg['l1']
if reg_l1 != 0.0 and reg_l2 != 0.0:
regularizer = regularizers.l1_l2(l1=reg_l1, l2=reg_l2)
if reg_l1 == 0.0 and reg_l2 != 0.0:
regularizer = regularizers.l2(reg_l2)
if reg_l1 != 0.0 and reg_l2 == 0.0:
regularizer = regularizers.l1(reg_l1)
if verbose:
print('Using regularizer for model: {}'.format(reg))
predictions = Dense(params['n_classes'], activation='softmax', name='predictions', kernel_regularizer=regularizer)(x)
model = Model(input=base_model.input, output=predictions)
return (model, params)
|
dataiku-contrib
|
positive
|
def test_match_device_correct_descriptors_cname_dns_suffix_override_region(mocker):
get_dns_name_mock = mocker.patch('mount_efs.get_dns_name_and_fallback_mount_target_ip_address', return_value=('fs-deadbeef.efs.cn-north-1.amazonaws.com.cn', None))
gethostbyname_ex_mock = mocker.patch('socket.gethostbyname_ex', return_value=('fs-deadbeef.efs.cn-north-1.amazonaws.com.cn', [], None))
<DeepExtract>
try:
config = ConfigParser.SafeConfigParser()
except AttributeError:
config = ConfigParser()
config.add_section(mount_efs.CONFIG_SECTION)
config.add_section(mount_efs.CLOUDWATCH_LOG_SECTION)
config.set(mount_efs.CONFIG_SECTION, 'dns_name_format', dns_name_format)
config.set(mount_efs.CONFIG_SECTION, 'dns_name_suffix', dns_name_suffix)
config.set(mount_efs.CLOUDWATCH_LOG_SECTION, 'enabled', cloudwatch_enabled)
if has_fallback_to_mount_target_ip_address_item:
config.set(mount_efs.CONFIG_SECTION, mount_efs.FALLBACK_TO_MOUNT_TARGET_IP_ADDRESS_ITEM, str(fallback_to_mount_target_ip_address))
config = config
</DeepExtract>
for (device, (fs_id, path, az)) in CORRECT_DEVICE_DESCRIPTORS_CNAME_DNS:
assert (fs_id, path, az) == mount_efs.match_device(config, device, DEFAULT_NFS_OPTIONS)
utils.assert_called(get_dns_name_mock)
utils.assert_called(gethostbyname_ex_mock)
|
def test_match_device_correct_descriptors_cname_dns_suffix_override_region(mocker):
get_dns_name_mock = mocker.patch('mount_efs.get_dns_name_and_fallback_mount_target_ip_address', return_value=('fs-deadbeef.efs.cn-north-1.amazonaws.com.cn', None))
gethostbyname_ex_mock = mocker.patch('socket.gethostbyname_ex', return_value=('fs-deadbeef.efs.cn-north-1.amazonaws.com.cn', [], None))
try:
config = ConfigParser.SafeConfigParser()
except AttributeError:
config = ConfigParser()
config.add_section(mount_efs.CONFIG_SECTION)
config.add_section(mount_efs.CLOUDWATCH_LOG_SECTION)
config.set(mount_efs.CONFIG_SECTION, 'dns_name_format', dns_name_format)
config.set(mount_efs.CONFIG_SECTION, 'dns_name_suffix', dns_name_suffix)
config.set(mount_efs.CLOUDWATCH_LOG_SECTION, 'enabled', cloudwatch_enabled)
if has_fallback_to_mount_target_ip_address_item:
config.set(mount_efs.CONFIG_SECTION, mount_efs.FALLBACK_TO_MOUNT_TARGET_IP_ADDRESS_ITEM, str(fallback_to_mount_target_ip_address))
config = config
for (device, (fs_id, path, az)) in CORRECT_DEVICE_DESCRIPTORS_CNAME_DNS:
assert (fs_id, path, az) == mount_efs.match_device(config, device, DEFAULT_NFS_OPTIONS)
utils.assert_called(get_dns_name_mock)
utils.assert_called(gethostbyname_ex_mock)
|
efs-utils
|
positive
|
def print_info(self):
import h5py
def h5_tree(h5, depth=0):
for (k, v) in h5.items():
if isinstance(v, h5py._hl.group.Group):
<DeepExtract>
for (k, v) in v.items():
if isinstance(v, h5py._hl.group.Group):
h5_tree(v, depth + 1 + 1)
else:
print(' ' * (depth + 1 * 3), k, v)
for (p, q) in v.attrs.items():
print(' ' * (depth + 1 * 3 + 3), p, q)
</DeepExtract>
else:
print(' ' * (depth * 3), k, v)
for (p, q) in v.attrs.items():
print(' ' * (depth * 3 + 3), p, q)
size = os.path.getsize(self.path)
print(f'HDF5 file {self.path}: {size:,} ({bytes(size)})')
with h5py.File(self.path, mode='r') as f:
print('Content:')
<DeepExtract>
for (k, v) in f.items():
if isinstance(v, h5py._hl.group.Group):
h5_tree(v, 1 + 1)
else:
print(' ' * (1 * 3), k, v)
for (p, q) in v.attrs.items():
print(' ' * (1 * 3 + 3), p, q)
</DeepExtract>
|
def print_info(self):
import h5py
def h5_tree(h5, depth=0):
for (k, v) in h5.items():
if isinstance(v, h5py._hl.group.Group):
for (k, v) in v.items():
if isinstance(v, h5py._hl.group.Group):
h5_tree(v, depth + 1 + 1)
else:
print(' ' * (depth + 1 * 3), k, v)
for (p, q) in v.attrs.items():
print(' ' * (depth + 1 * 3 + 3), p, q)
else:
print(' ' * (depth * 3), k, v)
for (p, q) in v.attrs.items():
print(' ' * (depth * 3 + 3), p, q)
size = os.path.getsize(self.path)
print(f'HDF5 file {self.path}: {size:,} ({bytes(size)})')
with h5py.File(self.path, mode='r') as f:
print('Content:')
for (k, v) in f.items():
if isinstance(v, h5py._hl.group.Group):
h5_tree(v, 1 + 1)
else:
print(' ' * (1 * 3), k, v)
for (p, q) in v.attrs.items():
print(' ' * (1 * 3 + 3), p, q)
</DeepExtract>
|
climetlab
|
positive
|
def build_time_series_hidden_layers(self):
"""Builds hidden layers for the time series features.
Inputs:
self.time_series_features
Outputs:
self.time_series_hidden_layers
"""
time_series_hidden_layers = {}
for (name, time_series) in self.time_series_features.items():
<DeepExtract>
with tf.name_scope(name + '_hidden'):
net = time_series
if net.shape.rank == 2:
net = tf.expand_dims(net, -1)
if net.shape.rank != 3:
raise ValueError('Expected inputs to have rank 2 or 3. Got: {}'.format(time_series))
for i in range(self.hparams.time_series_hidden[name].cnn_num_blocks):
num_filters = int(self.hparams.time_series_hidden[name].cnn_initial_num_filters * self.hparams.time_series_hidden[name].cnn_block_filter_factor ** i)
with tf.name_scope('block_{}'.format(i + 1)):
for j in range(self.hparams.time_series_hidden[name].cnn_block_size):
conv_op = tf.keras.layers.Conv1D(filters=num_filters, kernel_size=int(self.hparams.time_series_hidden[name].cnn_kernel_size), padding=self.hparams.time_series_hidden[name].convolution_padding, activation=tf.nn.relu, name='conv_{}'.format(j + 1))
net = conv_op(net)
if self.hparams.time_series_hidden[name].pool_size > 1:
pool_op = tf.keras.layers.MaxPool1D(pool_size=int(self.hparams.time_series_hidden[name].pool_size), strides=int(self.hparams.time_series_hidden[name].pool_strides), name='pool')
net = pool_op(net)
net.shape.assert_has_rank(3)
net_shape = net.shape.as_list()
output_dim = net_shape[1] * net_shape[2]
net = tf.reshape(net, [-1, output_dim], name='flatten')
time_series_hidden_layers[name] = net
</DeepExtract>
self.time_series_hidden_layers = time_series_hidden_layers
|
def build_time_series_hidden_layers(self):
"""Builds hidden layers for the time series features.
Inputs:
self.time_series_features
Outputs:
self.time_series_hidden_layers
"""
time_series_hidden_layers = {}
for (name, time_series) in self.time_series_features.items():
with tf.name_scope(name + '_hidden'):
net = time_series
if net.shape.rank == 2:
net = tf.expand_dims(net, -1)
if net.shape.rank != 3:
raise ValueError('Expected inputs to have rank 2 or 3. Got: {}'.format(time_series))
for i in range(self.hparams.time_series_hidden[name].cnn_num_blocks):
num_filters = int(self.hparams.time_series_hidden[name].cnn_initial_num_filters * self.hparams.time_series_hidden[name].cnn_block_filter_factor ** i)
with tf.name_scope('block_{}'.format(i + 1)):
for j in range(self.hparams.time_series_hidden[name].cnn_block_size):
conv_op = tf.keras.layers.Conv1D(filters=num_filters, kernel_size=int(self.hparams.time_series_hidden[name].cnn_kernel_size), padding=self.hparams.time_series_hidden[name].convolution_padding, activation=tf.nn.relu, name='conv_{}'.format(j + 1))
net = conv_op(net)
if self.hparams.time_series_hidden[name].pool_size > 1:
pool_op = tf.keras.layers.MaxPool1D(pool_size=int(self.hparams.time_series_hidden[name].pool_size), strides=int(self.hparams.time_series_hidden[name].pool_strides), name='pool')
net = pool_op(net)
net.shape.assert_has_rank(3)
net_shape = net.shape.as_list()
output_dim = net_shape[1] * net_shape[2]
net = tf.reshape(net, [-1, output_dim], name='flatten')
time_series_hidden_layers[name] = net
self.time_series_hidden_layers = time_series_hidden_layers
|
exoplanet-ml
|
positive
|
def forward(self, x, init=False):
if init is True:
self.V.data.copy_(torch.randn(self.V.data.size()).type_as(self.V.data) * 0.05)
v_norm = self.V.data / self.V.data.transpose(0, 1).contiguous().view(self.out_channels, -1).norm(2, 1).view(self.in_channels, self.out_channels, *[1] * len(self.kernel_size)).expand_as(self.V.data)
x_init = F.conv_transpose2d(x, v_norm, None, self.stride, self.padding, self.output_padding, self.groups).data
t_x_init = x_init.tranpose(0, 1).contiguous().view(self.out_channels, -1)
(m_init, v_init) = (t_x_init.mean(1).squeeze(1), t_x_init.var(1).squeeze(1))
scale_init = self.init_scale / torch.sqrt(v_init + 1e-10)
self.g.data.copy_(scale_init)
self.b.data.copy_(-m_init * scale_init)
scale_init_shape = scale_init.view(1, self.out_channels, *[1] * (len(x_init.size()) - 2))
m_init_shape = m_init.view(1, self.out_channels, *[1] * (len(x_init.size()) - 2))
x_init = scale_init_shape.expand_as(x_init) * (x_init - m_init_shape.expand_as(x_init))
self.V_avg.copy_(self.V.data)
self.g_avg.copy_(self.g.data)
self.b_avg.copy_(self.b.data)
return x_init
else:
<DeepExtract>
vars = []
for vn in ['V', 'g', 'b']:
vars.append(get_var_maybe_avg(self, vn, self.training, self.polyak_decay))
(v, g, b) = vars
</DeepExtract>
scalar = g / torch.norm(v.transpose(0, 1).contiguous().view(self.out_channels, -1), 2, 1).squeeze(1)
w = scalar.view(self.in_channels, self.out_channels, *[1] * (len(v.size()) - 2)).expand_as(v) * v
x = F.conv_transpose2d(x, w, b, self.stride, self.padding, self.output_padding, self.groups)
return x
|
def forward(self, x, init=False):
if init is True:
self.V.data.copy_(torch.randn(self.V.data.size()).type_as(self.V.data) * 0.05)
v_norm = self.V.data / self.V.data.transpose(0, 1).contiguous().view(self.out_channels, -1).norm(2, 1).view(self.in_channels, self.out_channels, *[1] * len(self.kernel_size)).expand_as(self.V.data)
x_init = F.conv_transpose2d(x, v_norm, None, self.stride, self.padding, self.output_padding, self.groups).data
t_x_init = x_init.tranpose(0, 1).contiguous().view(self.out_channels, -1)
(m_init, v_init) = (t_x_init.mean(1).squeeze(1), t_x_init.var(1).squeeze(1))
scale_init = self.init_scale / torch.sqrt(v_init + 1e-10)
self.g.data.copy_(scale_init)
self.b.data.copy_(-m_init * scale_init)
scale_init_shape = scale_init.view(1, self.out_channels, *[1] * (len(x_init.size()) - 2))
m_init_shape = m_init.view(1, self.out_channels, *[1] * (len(x_init.size()) - 2))
x_init = scale_init_shape.expand_as(x_init) * (x_init - m_init_shape.expand_as(x_init))
self.V_avg.copy_(self.V.data)
self.g_avg.copy_(self.g.data)
self.b_avg.copy_(self.b.data)
return x_init
else:
vars = []
for vn in ['V', 'g', 'b']:
vars.append(get_var_maybe_avg(self, vn, self.training, self.polyak_decay))
(v, g, b) = vars
scalar = g / torch.norm(v.transpose(0, 1).contiguous().view(self.out_channels, -1), 2, 1).squeeze(1)
w = scalar.view(self.in_channels, self.out_channels, *[1] * (len(v.size()) - 2)).expand_as(v) * v
x = F.conv_transpose2d(x, w, b, self.stride, self.padding, self.output_padding, self.groups)
return x
|
encoder-agnostic-adaptation
|
positive
|
def test_credentials_file_helper_awsprofile_found_with_token(tmpdir):
<DeepExtract>
fake_file = os.path.join(str(tmpdir), AWS_CONFIG_FILE)
</DeepExtract>
<DeepExtract>
try:
config = ConfigParser.SafeConfigParser()
except AttributeError:
config = ConfigParser()
if True:
config.add_section(AWSPROFILE)
config = config
</DeepExtract>
config.set(DEFAULT_PROFILE, ACCESS_KEY_ID_KEY, WRONG_ACCESS_KEY_ID_VAL)
config.set(DEFAULT_PROFILE, SECRET_ACCESS_KEY_KEY, WRONG_SECRET_ACCESS_KEY_VAL)
config.set(DEFAULT_PROFILE, SESSION_TOKEN_KEY, WRONG_SESSION_TOKEN_VAL)
config.set(AWSPROFILE, ACCESS_KEY_ID_KEY, ACCESS_KEY_ID_VAL)
config.set(AWSPROFILE, SECRET_ACCESS_KEY_KEY, SECRET_ACCESS_KEY_VAL)
config.set(AWSPROFILE, SESSION_TOKEN_KEY, SESSION_TOKEN_VAL)
with open(fake_file, 'w') as f:
config.write(f)
credentials = mount_efs.credentials_file_helper(fake_file, AWSPROFILE)
assert credentials['AccessKeyId'] == ACCESS_KEY_ID_VAL
assert credentials['SecretAccessKey'] == SECRET_ACCESS_KEY_VAL
assert credentials['Token'] == SESSION_TOKEN_VAL
|
def test_credentials_file_helper_awsprofile_found_with_token(tmpdir):
fake_file = os.path.join(str(tmpdir), AWS_CONFIG_FILE)
try:
config = ConfigParser.SafeConfigParser()
except AttributeError:
config = ConfigParser()
if True:
config.add_section(AWSPROFILE)
config = config
config.set(DEFAULT_PROFILE, ACCESS_KEY_ID_KEY, WRONG_ACCESS_KEY_ID_VAL)
config.set(DEFAULT_PROFILE, SECRET_ACCESS_KEY_KEY, WRONG_SECRET_ACCESS_KEY_VAL)
config.set(DEFAULT_PROFILE, SESSION_TOKEN_KEY, WRONG_SESSION_TOKEN_VAL)
config.set(AWSPROFILE, ACCESS_KEY_ID_KEY, ACCESS_KEY_ID_VAL)
config.set(AWSPROFILE, SECRET_ACCESS_KEY_KEY, SECRET_ACCESS_KEY_VAL)
config.set(AWSPROFILE, SESSION_TOKEN_KEY, SESSION_TOKEN_VAL)
with open(fake_file, 'w') as f:
config.write(f)
credentials = mount_efs.credentials_file_helper(fake_file, AWSPROFILE)
assert credentials['AccessKeyId'] == ACCESS_KEY_ID_VAL
assert credentials['SecretAccessKey'] == SECRET_ACCESS_KEY_VAL
assert credentials['Token'] == SESSION_TOKEN_VAL
|
efs-utils
|
positive
|
def local_previous_frame_nearest_neighbor_features_per_object(prev_frame_embedding, query_embedding, prev_frame_labels, gt_ids, max_distance=15):
"""Computes nearest neighbor features while only allowing local matches.
Args:
prev_frame_embedding: Tensor of shape [height, width, embedding_dim],
the embedding vectors for the last frame.
query_embedding: Tensor of shape [height, width, embedding_dim],
the embedding vectors for the query frames.
prev_frame_labels: Tensor of shape [height, width, 1], the class labels of
the previous frame.
gt_ids: Int Tensor of shape [n_objs] of the sorted unique ground truth
ids in the first frame.
max_distance: Integer, the maximum distance allowed for local matching.
Returns:
nn_features: A float32 np.array of nearest neighbor features of shape
[1, height, width, n_objects, 1].
"""
if USE_CORRELATION_COST:
<DeepExtract>
if cfg.MODEL_LOCAL_DOWNSAMPLE:
(ori_h, ori_w, _) = query_embedding.size()
query_embedding = query_embedding.permute(2, 0, 1).unsqueeze(0)
query_embedding = F.avg_pool2d(query_embedding, (2, 2), (2, 2))
prev_frame_embedding = prev_frame_embedding.permute(2, 0, 1).unsqueeze(0)
prev_frame_embedding = F.avg_pool2d(prev_frame_embedding, (2, 2), (2, 2))
query_embedding = query_embedding.squeeze(0).permute(1, 2, 0)
prev_frame_embedding = prev_frame_embedding.squeeze(0).permute(1, 2, 0)
corr = cross_correlate(query_embedding, prev_frame_embedding, max_distance=max_distance)
xs = torch.sum(query_embedding * query_embedding, 2, keepdim=True)
ys = torch.sum(prev_frame_embedding * prev_frame_embedding, 2, keepdim=True)
ones_ys = torch.ones_like(ys)
ys = cross_correlate(ones_ys, ys, max_distance=max_distance)
d = xs + ys - 2 * corr
tmp = torch.zeros_like(d)
boundary = torch.eq(cross_correlate(ones_ys, ones_ys, max_distance=max_distance), 0)
d = torch.where(boundary, tmp.fill_(float('inf')), d)
d = (torch.sigmoid(d) - 0.5) * 2
d = d.permute(2, 0, 1).unsqueeze(0)
d = F.interpolate(d, size=(ori_h, ori_w), mode='bilinear', align_corners=True)
d = d.squeeze(0).permute(1, 2, 0)
else:
corr = cross_correlate(query_embedding, prev_frame_embedding, max_distance=max_distance)
xs = torch.sum(query_embedding * query_embedding, 2, keepdim=True)
ys = torch.sum(prev_frame_embedding * prev_frame_embedding, 2, keepdim=True)
ones_ys = torch.ones_like(ys)
ys = cross_correlate(ones_ys, ys, max_distance=max_distance)
d = xs + ys - 2 * corr
tmp = torch.zeros_like(d)
boundary = torch.eq(cross_correlate(ones_ys, ones_ys, max_distance=max_distance), 0)
d = torch.where(boundary, tmp.fill_(float('inf')), d)
d = d
</DeepExtract>
else:
<DeepExtract>
if cfg.MODEL_LOCAL_DOWNSAMPLE:
(ori_h, ori_w, _) = query_embedding.size()
query_embedding = query_embedding.permute(2, 0, 1).unsqueeze(0)
query_embedding = F.avg_pool2d(query_embedding, (2, 2), (2, 2))
prev_frame_embedding = prev_frame_embedding.permute(2, 0, 1).unsqueeze(0)
prev_frame_embedding = F.avg_pool2d(prev_frame_embedding, (2, 2), (2, 2))
(_, channels, height, width) = query_embedding.size()
padding_val = 1e+20
padded_y = F.pad(prev_frame_embedding, (max_distance, max_distance, max_distance, max_distance), mode='constant', value=padding_val)
offset_y = F.unfold(padded_y, kernel_size=(height, width)).view(1, channels, height, width, -1)
query_embedding = query_embedding.view(1, channels, height, width, 1)
minus = query_embedding - offset_y
dists = torch.sum(torch.mul(minus, minus), dim=1).view(1, height, width, -1).permute(0, 3, 1, 2)
dists = (torch.sigmoid(dists) - 0.5) * 2
dists = F.interpolate(dists, size=(ori_h, ori_w), mode='bilinear', align_corners=True)
dists = dists.squeeze(0).permute(1, 2, 0)
else:
padding_val = 1e+20
padded_y = nn.functional.pad(prev_frame_embedding, (0, 0, max_distance, max_distance, max_distance, max_distance), mode='constant', value=padding_val)
(height, width, _) = query_embedding.size()
dists = []
for y_start in range(2 * max_distance + 1):
y_end = y_start + height
y_slice = padded_y[y_start:y_end]
for x_start in range(2 * max_distance + 1):
x_end = x_start + width
offset_y = y_slice[:, x_start:x_end]
dist = torch.sum(torch.pow(query_embedding - offset_y, 2), dim=2)
dists.append(dist)
dists = torch.stack(dists, dim=2)
d = dists
</DeepExtract>
(height, width) = prev_frame_embedding.size()[:2]
if USE_CORRELATION_COST:
corr_op = SpatialCorrelationSampler(kernel_size=1, patch_size=2 * max_distance + 1, stride=1, dilation_patch=1, padding=0)
tmp_prev_frame_labels = (prev_frame_labels + 1).float().permute(2, 0, 1)
tmp_prev_frame_labels = torch.unsqueeze(tmp_prev_frame_labels, 0)
ones_ = torch.ones_like(tmp_prev_frame_labels)
offset_labels = corr_op(ones_, tmp_prev_frame_labels)
(bs, _, _, hh, ww) = offset_labels.size()
offset_labels = offset_labels.view(bs, -1, hh, ww)
offset_labels = torch.squeeze(offset_labels, 0)
offset_labels = offset_labels.permute(1, 2, 0)
offset_labels = torch.unsqueeze(offset_labels, 3)
offset_labels = torch.round(offset_labels - 1)
offset_masks = torch.eq(offset_labels, gt_ids.float().unsqueeze(0).unsqueeze(0).unsqueeze(0))
else:
masks = torch.eq(prev_frame_labels, gt_ids.unsqueeze(0).unsqueeze(0))
padded_masks = nn.functional.pad(masks, (0, 0, max_distance, max_distance, max_distance, max_distance))
offset_masks = []
for y_start in range(2 * max_distance + 1):
y_end = y_start + height
masks_slice = padded_masks[y_start:y_end]
for x_start in range(2 * max_distance + 1):
x_end = x_start + width
offset_mask = masks_slice[:, x_start:x_end]
offset_masks.append(offset_mask)
offset_masks = torch.stack(offset_masks, dim=2)
d_tiled = d.unsqueeze(-1).repeat((1, 1, 1, gt_ids.size(0)))
pad = torch.ones_like(d_tiled)
d_masked = torch.where(offset_masks, d_tiled, pad)
(dists, _) = torch.min(d_masked, dim=2)
dists = dists.view(1, height, width, gt_ids.size(0), 1)
return dists
|
def local_previous_frame_nearest_neighbor_features_per_object(prev_frame_embedding, query_embedding, prev_frame_labels, gt_ids, max_distance=15):
"""Computes nearest neighbor features while only allowing local matches.
Args:
prev_frame_embedding: Tensor of shape [height, width, embedding_dim],
the embedding vectors for the last frame.
query_embedding: Tensor of shape [height, width, embedding_dim],
the embedding vectors for the query frames.
prev_frame_labels: Tensor of shape [height, width, 1], the class labels of
the previous frame.
gt_ids: Int Tensor of shape [n_objs] of the sorted unique ground truth
ids in the first frame.
max_distance: Integer, the maximum distance allowed for local matching.
Returns:
nn_features: A float32 np.array of nearest neighbor features of shape
[1, height, width, n_objects, 1].
"""
if USE_CORRELATION_COST:
if cfg.MODEL_LOCAL_DOWNSAMPLE:
(ori_h, ori_w, _) = query_embedding.size()
query_embedding = query_embedding.permute(2, 0, 1).unsqueeze(0)
query_embedding = F.avg_pool2d(query_embedding, (2, 2), (2, 2))
prev_frame_embedding = prev_frame_embedding.permute(2, 0, 1).unsqueeze(0)
prev_frame_embedding = F.avg_pool2d(prev_frame_embedding, (2, 2), (2, 2))
query_embedding = query_embedding.squeeze(0).permute(1, 2, 0)
prev_frame_embedding = prev_frame_embedding.squeeze(0).permute(1, 2, 0)
corr = cross_correlate(query_embedding, prev_frame_embedding, max_distance=max_distance)
xs = torch.sum(query_embedding * query_embedding, 2, keepdim=True)
ys = torch.sum(prev_frame_embedding * prev_frame_embedding, 2, keepdim=True)
ones_ys = torch.ones_like(ys)
ys = cross_correlate(ones_ys, ys, max_distance=max_distance)
d = xs + ys - 2 * corr
tmp = torch.zeros_like(d)
boundary = torch.eq(cross_correlate(ones_ys, ones_ys, max_distance=max_distance), 0)
d = torch.where(boundary, tmp.fill_(float('inf')), d)
d = (torch.sigmoid(d) - 0.5) * 2
d = d.permute(2, 0, 1).unsqueeze(0)
d = F.interpolate(d, size=(ori_h, ori_w), mode='bilinear', align_corners=True)
d = d.squeeze(0).permute(1, 2, 0)
else:
corr = cross_correlate(query_embedding, prev_frame_embedding, max_distance=max_distance)
xs = torch.sum(query_embedding * query_embedding, 2, keepdim=True)
ys = torch.sum(prev_frame_embedding * prev_frame_embedding, 2, keepdim=True)
ones_ys = torch.ones_like(ys)
ys = cross_correlate(ones_ys, ys, max_distance=max_distance)
d = xs + ys - 2 * corr
tmp = torch.zeros_like(d)
boundary = torch.eq(cross_correlate(ones_ys, ones_ys, max_distance=max_distance), 0)
d = torch.where(boundary, tmp.fill_(float('inf')), d)
d = d
else:
if cfg.MODEL_LOCAL_DOWNSAMPLE:
(ori_h, ori_w, _) = query_embedding.size()
query_embedding = query_embedding.permute(2, 0, 1).unsqueeze(0)
query_embedding = F.avg_pool2d(query_embedding, (2, 2), (2, 2))
prev_frame_embedding = prev_frame_embedding.permute(2, 0, 1).unsqueeze(0)
prev_frame_embedding = F.avg_pool2d(prev_frame_embedding, (2, 2), (2, 2))
(_, channels, height, width) = query_embedding.size()
padding_val = 1e+20
padded_y = F.pad(prev_frame_embedding, (max_distance, max_distance, max_distance, max_distance), mode='constant', value=padding_val)
offset_y = F.unfold(padded_y, kernel_size=(height, width)).view(1, channels, height, width, -1)
query_embedding = query_embedding.view(1, channels, height, width, 1)
minus = query_embedding - offset_y
dists = torch.sum(torch.mul(minus, minus), dim=1).view(1, height, width, -1).permute(0, 3, 1, 2)
dists = (torch.sigmoid(dists) - 0.5) * 2
dists = F.interpolate(dists, size=(ori_h, ori_w), mode='bilinear', align_corners=True)
dists = dists.squeeze(0).permute(1, 2, 0)
else:
padding_val = 1e+20
padded_y = nn.functional.pad(prev_frame_embedding, (0, 0, max_distance, max_distance, max_distance, max_distance), mode='constant', value=padding_val)
(height, width, _) = query_embedding.size()
dists = []
for y_start in range(2 * max_distance + 1):
y_end = y_start + height
y_slice = padded_y[y_start:y_end]
for x_start in range(2 * max_distance + 1):
x_end = x_start + width
offset_y = y_slice[:, x_start:x_end]
dist = torch.sum(torch.pow(query_embedding - offset_y, 2), dim=2)
dists.append(dist)
dists = torch.stack(dists, dim=2)
d = dists
(height, width) = prev_frame_embedding.size()[:2]
if USE_CORRELATION_COST:
corr_op = SpatialCorrelationSampler(kernel_size=1, patch_size=2 * max_distance + 1, stride=1, dilation_patch=1, padding=0)
tmp_prev_frame_labels = (prev_frame_labels + 1).float().permute(2, 0, 1)
tmp_prev_frame_labels = torch.unsqueeze(tmp_prev_frame_labels, 0)
ones_ = torch.ones_like(tmp_prev_frame_labels)
offset_labels = corr_op(ones_, tmp_prev_frame_labels)
(bs, _, _, hh, ww) = offset_labels.size()
offset_labels = offset_labels.view(bs, -1, hh, ww)
offset_labels = torch.squeeze(offset_labels, 0)
offset_labels = offset_labels.permute(1, 2, 0)
offset_labels = torch.unsqueeze(offset_labels, 3)
offset_labels = torch.round(offset_labels - 1)
offset_masks = torch.eq(offset_labels, gt_ids.float().unsqueeze(0).unsqueeze(0).unsqueeze(0))
else:
masks = torch.eq(prev_frame_labels, gt_ids.unsqueeze(0).unsqueeze(0))
padded_masks = nn.functional.pad(masks, (0, 0, max_distance, max_distance, max_distance, max_distance))
offset_masks = []
for y_start in range(2 * max_distance + 1):
y_end = y_start + height
masks_slice = padded_masks[y_start:y_end]
for x_start in range(2 * max_distance + 1):
x_end = x_start + width
offset_mask = masks_slice[:, x_start:x_end]
offset_masks.append(offset_mask)
offset_masks = torch.stack(offset_masks, dim=2)
d_tiled = d.unsqueeze(-1).repeat((1, 1, 1, gt_ids.size(0)))
pad = torch.ones_like(d_tiled)
d_masked = torch.where(offset_masks, d_tiled, pad)
(dists, _) = torch.min(d_masked, dim=2)
dists = dists.view(1, height, width, gt_ids.size(0), 1)
return dists
|
CVPR2020_MANet
|
positive
|
def test_response(self):
def run():
bottle.response.bind()
bottle.response.content_type = 'test/thread'
self.assertEqual(bottle.response.headers['Content-Type'], 'test/thread')
bottle.response.bind()
bottle.response.content_type = 'test/main'
self.assertEqual(bottle.response.headers['Content-Type'], 'test/main')
<DeepExtract>
t = threading.Thread(target=run)
t.start()
t.join()
</DeepExtract>
self.assertEqual(bottle.response.headers['Content-Type'], 'test/main')
|
def test_response(self):
def run():
bottle.response.bind()
bottle.response.content_type = 'test/thread'
self.assertEqual(bottle.response.headers['Content-Type'], 'test/thread')
bottle.response.bind()
bottle.response.content_type = 'test/main'
self.assertEqual(bottle.response.headers['Content-Type'], 'test/main')
t = threading.Thread(target=run)
t.start()
t.join()
self.assertEqual(bottle.response.headers['Content-Type'], 'test/main')
|
bottle-doc-zh-cn
|
positive
|
@functools.wraps(func)
def func_with_args(*args, **kwargs):
<DeepExtract>
stack = _get_arg_stack()
current_scope = stack[-1]
</DeepExtract>
current_args = kwargs
key_func = (func.__module__, func.__name__)
if key_func in current_scope:
current_args = current_scope[key_func].copy()
current_args.update(kwargs)
return func(*args, **current_args)
|
@functools.wraps(func)
def func_with_args(*args, **kwargs):
stack = _get_arg_stack()
current_scope = stack[-1]
current_args = kwargs
key_func = (func.__module__, func.__name__)
if key_func in current_scope:
current_args = current_scope[key_func].copy()
current_args.update(kwargs)
return func(*args, **current_args)
|
deeplearning-benchmark
|
positive
|
def _generate_efs_driver_manifest(output_path: str, context: 'Context') -> str:
output_path = os.path.join(output_path, 'efs_driver')
os.makedirs(output_path, exist_ok=True)
<DeepExtract>
files = os.listdir(output_path)
for file in files:
if file.endswith('.yaml'):
os.remove(os.path.join(output_path, file))
</DeepExtract>
if context.account_id is None:
raise RuntimeError('context.account_id is None!')
if context.region is None:
raise RuntimeError('context.region is None!')
<DeepExtract>
os.makedirs(os.path.join(output_path, 'base'), exist_ok=True)
filenames = ('csidriver.yaml', 'kustomization.yaml', 'node.yaml')
for filename in filenames:
input = os.path.join(MODELS_PATH, 'kube-system', 'efs_driver', 'base', filename)
output = os.path.join(output_path, 'base', filename)
_logger.debug('Copying efs driver base file: %s -> %s', input, output)
shutil.copyfile(src=input, dst=output)
</DeepExtract>
overlays_path = os.path.join(output_path, 'overlays')
os.makedirs(overlays_path, exist_ok=True)
shutil.copyfile(src=os.path.join(MODELS_PATH, 'kube-system', 'efs_driver', 'overlays', 'kustomization.yaml'), dst=os.path.join(overlays_path, 'kustomization.yaml'))
return overlays_path
|
def _generate_efs_driver_manifest(output_path: str, context: 'Context') -> str:
output_path = os.path.join(output_path, 'efs_driver')
os.makedirs(output_path, exist_ok=True)
files = os.listdir(output_path)
for file in files:
if file.endswith('.yaml'):
os.remove(os.path.join(output_path, file))
if context.account_id is None:
raise RuntimeError('context.account_id is None!')
if context.region is None:
raise RuntimeError('context.region is None!')
os.makedirs(os.path.join(output_path, 'base'), exist_ok=True)
filenames = ('csidriver.yaml', 'kustomization.yaml', 'node.yaml')
for filename in filenames:
input = os.path.join(MODELS_PATH, 'kube-system', 'efs_driver', 'base', filename)
output = os.path.join(output_path, 'base', filename)
_logger.debug('Copying efs driver base file: %s -> %s', input, output)
shutil.copyfile(src=input, dst=output)
overlays_path = os.path.join(output_path, 'overlays')
os.makedirs(overlays_path, exist_ok=True)
shutil.copyfile(src=os.path.join(MODELS_PATH, 'kube-system', 'efs_driver', 'overlays', 'kustomization.yaml'), dst=os.path.join(overlays_path, 'kustomization.yaml'))
return overlays_path
|
aws-orbit-workbench
|
positive
|
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
<DeepExtract>
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
pairs = pairs
</DeepExtract>
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
(first, second) = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and (word[i + 1] == second):
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
<DeepExtract>
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
pairs = pairs
</DeepExtract>
word = ' '.join(word)
self.cache[token] = word
return word
|
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
pairs = pairs
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
(first, second) = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and (word[i + 1] == second):
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
pairs = pairs
word = ' '.join(word)
self.cache[token] = word
return word
|
DeBERTa
|
positive
|
def test(self):
print('Testing copycat with input file: {}'.format(TestCopycat.Filename))
try:
with open(TestCopycat.Filename, 'rb') as infile:
problems = pickle.load(infile)
except Exception as e:
print('Generating due to error:')
print(e)
<DeepExtract>
print('Generating distributions for new file')
iterations = 30
problems = [Problem('abc', 'abd', 'efg', iterations), Problem('abc', 'abd', 'ijk', iterations), Problem('abc', 'abd', 'xyz', iterations), Problem('abc', 'abd', 'ijkk', iterations), Problem('abc', 'abd', 'mrrjjj', iterations)]
with open(TestCopycat.Filename, 'wb') as outfile:
pickle.dump(problems, outfile)
problems = problems
</DeepExtract>
for problem in problems:
problem.test(iso_chi_squared)
|
def test(self):
print('Testing copycat with input file: {}'.format(TestCopycat.Filename))
try:
with open(TestCopycat.Filename, 'rb') as infile:
problems = pickle.load(infile)
except Exception as e:
print('Generating due to error:')
print(e)
print('Generating distributions for new file')
iterations = 30
problems = [Problem('abc', 'abd', 'efg', iterations), Problem('abc', 'abd', 'ijk', iterations), Problem('abc', 'abd', 'xyz', iterations), Problem('abc', 'abd', 'ijkk', iterations), Problem('abc', 'abd', 'mrrjjj', iterations)]
with open(TestCopycat.Filename, 'wb') as outfile:
pickle.dump(problems, outfile)
problems = problems
for problem in problems:
problem.test(iso_chi_squared)
|
copycat
|
positive
|
def cont(self):
"""Continues the execution of the target
:returns: True on success"""
try:
<DeepExtract>
try:
self.cmd_lock.acquire()
self.in_queue.put('resume')
ret = self.out_queue.get()
if 'FAILED' in ret:
raise RuntimeError("Command '%s' failed!" % 'resume')
resp = ret
except:
raise
finally:
self.cmd_lock.release()
</DeepExtract>
except:
self.log.exception('Error halting target')
return False
return True
|
def cont(self):
"""Continues the execution of the target
:returns: True on success"""
try:
try:
self.cmd_lock.acquire()
self.in_queue.put('resume')
ret = self.out_queue.get()
if 'FAILED' in ret:
raise RuntimeError("Command '%s' failed!" % 'resume')
resp = ret
except:
raise
finally:
self.cmd_lock.release()
except:
self.log.exception('Error halting target')
return False
return True
|
avatar2
|
positive
|
def parse_dataframes(self, dataframes: Dict[Any, DataFrame], aux: Dict[str, DataFrame], **parse_opts) -> DataFrame:
data = table_rename(dataframes[0], _column_adapter, remove_regex='[^\\w]', drop=True)
<DeepExtract>
data = data.copy()
data['date'] = data['date'].str.slice(0, 10)
if 'new_persons_fully_vaccinated' not in data.columns:
data['new_persons_fully_vaccinated'] = None
if 'total_persons_fully_vaccinated' not in data.columns:
data['total_persons_fully_vaccinated'] = None
data['new_vaccine_doses_administered'] = data['new_persons_vaccinated'] + data['new_persons_fully_vaccinated']
data['total_vaccine_doses_administered'] = data['total_persons_vaccinated'] + data['total_persons_fully_vaccinated']
data = data
</DeepExtract>
data['key'] = 'FR'
return data
|
def parse_dataframes(self, dataframes: Dict[Any, DataFrame], aux: Dict[str, DataFrame], **parse_opts) -> DataFrame:
data = table_rename(dataframes[0], _column_adapter, remove_regex='[^\\w]', drop=True)
data = data.copy()
data['date'] = data['date'].str.slice(0, 10)
if 'new_persons_fully_vaccinated' not in data.columns:
data['new_persons_fully_vaccinated'] = None
if 'total_persons_fully_vaccinated' not in data.columns:
data['total_persons_fully_vaccinated'] = None
data['new_vaccine_doses_administered'] = data['new_persons_vaccinated'] + data['new_persons_fully_vaccinated']
data['total_vaccine_doses_administered'] = data['total_persons_vaccinated'] + data['total_persons_fully_vaccinated']
data = data
data['key'] = 'FR'
return data
|
covid-19-open-data
|
positive
|
def test_loaded_dataset(self):
<DeepExtract>
dataset_directory = os.path.join(ydf_test_data_path(), 'dataset')
train_path = os.path.join(dataset_directory, 'adult_train.csv')
test_path = os.path.join(dataset_directory, 'adult_test.csv')
train = pd.read_csv(train_path)
test = pd.read_csv(test_path)
label = 'income'
def clean(ds):
ds[label] = np.where(ds[label] == '>50K', 1, 0)
dataset = ds
train = clean(train)
test = clean(test)
dataset = prepare_dataset(train, test, label, num_classes=2)
</DeepExtract>
<DeepExtract>
def df_to_ds(df):
(tf_train, _) = tf.data.Dataset.from_tensor_slices((dict(df.drop(dataset.label, axis=1)), df[dataset.label].values))
train_ds = df_to_ds(dataset.train).batch(1024)
test_ds = df_to_ds(dataset.test).batch(1024)
(tf_train, _) = (train_ds, test_ds)
</DeepExtract>
saved_dataset_path = os.path.join(tmp_path(), 'saved_model')
tf_train.save(saved_dataset_path)
tf_train_loaded = tf.data.Dataset.load(saved_dataset_path)
model = keras.RandomForestModel()
model.fit(tf_train_loaded)
<DeepExtract>
(tf_train, tf_test) = dataset_to_tf_dataset(dataset)
model.compile(metrics=['accuracy'])
evaluation = model.evaluate(tf_test)
logging.info('Pre-training evaluation: %s', evaluation)
predictions = model.predict(tf_test)
logging.info('Pre-training predictions: %s', predictions)
model.fit(x=tf_train)
logging.info('Trained model:')
model.summary()
plot = model_plotter.plot_model(model)
plot_path = os.path.join(self.get_temp_dir(), 'plot.html')
logging.info('Plot to %s', plot_path)
with open(plot_path, 'w') as f:
f.write(plot)
evaluation = model.evaluate(tf_test)
logging.info('Evaluation: %s', evaluation)
self.assertGreaterEqual(evaluation[1], 0.864)
predictions = model.predict(tf_test)
logging.info('Predictions: %s', predictions)
if True:
tf.keras.backend.clear_session()
saved_model_path = os.path.join(self.get_temp_dir(), 'saved_model')
new_saved_model_path = os.path.join(self.get_temp_dir(), 'saved_model_copy')
logging.info('Saving model to %s', saved_model_path)
model.save(saved_model_path)
tf.keras.backend.clear_session()
logging.info('Run model in separate binary')
process = subprocess.Popen([os.path.join(data_root_path(), 'tensorflow_decision_forests/keras/test_runner'), '--model_path', saved_model_path, '--dataset_path', os.path.join(ydf_test_data_path(), 'dataset', 'adult_test.csv')], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
logging.info('stdout:\n%s', stdout.decode('utf-8'))
logging.info('stderr:\n%s', stderr.decode('utf-8'))
logging.info('Copying model from %s to %s', saved_model_path, new_saved_model_path)
shutil.copytree(saved_model_path, new_saved_model_path)
shutil.rmtree(saved_model_path)
logging.info('Loading model from %s', new_saved_model_path)
loaded_model = models.load_model(new_saved_model_path)
loaded_model.summary()
evaluation = loaded_model.evaluate(tf_test)
logging.info('Loaded model evaluation: %s', evaluation)
self.assertGreaterEqual(evaluation[1], 0.864)
predictions = loaded_model.predict(tf_test)
logging.info('Loaded model predictions: %s', predictions)
</DeepExtract>
|
def test_loaded_dataset(self):
dataset_directory = os.path.join(ydf_test_data_path(), 'dataset')
train_path = os.path.join(dataset_directory, 'adult_train.csv')
test_path = os.path.join(dataset_directory, 'adult_test.csv')
train = pd.read_csv(train_path)
test = pd.read_csv(test_path)
label = 'income'
def clean(ds):
ds[label] = np.where(ds[label] == '>50K', 1, 0)
dataset = ds
train = clean(train)
test = clean(test)
dataset = prepare_dataset(train, test, label, num_classes=2)
def df_to_ds(df):
(tf_train, _) = tf.data.Dataset.from_tensor_slices((dict(df.drop(dataset.label, axis=1)), df[dataset.label].values))
train_ds = df_to_ds(dataset.train).batch(1024)
test_ds = df_to_ds(dataset.test).batch(1024)
(tf_train, _) = (train_ds, test_ds)
saved_dataset_path = os.path.join(tmp_path(), 'saved_model')
tf_train.save(saved_dataset_path)
tf_train_loaded = tf.data.Dataset.load(saved_dataset_path)
model = keras.RandomForestModel()
model.fit(tf_train_loaded)
(tf_train, tf_test) = dataset_to_tf_dataset(dataset)
model.compile(metrics=['accuracy'])
evaluation = model.evaluate(tf_test)
logging.info('Pre-training evaluation: %s', evaluation)
predictions = model.predict(tf_test)
logging.info('Pre-training predictions: %s', predictions)
model.fit(x=tf_train)
logging.info('Trained model:')
model.summary()
plot = model_plotter.plot_model(model)
plot_path = os.path.join(self.get_temp_dir(), 'plot.html')
logging.info('Plot to %s', plot_path)
with open(plot_path, 'w') as f:
f.write(plot)
evaluation = model.evaluate(tf_test)
logging.info('Evaluation: %s', evaluation)
self.assertGreaterEqual(evaluation[1], 0.864)
predictions = model.predict(tf_test)
logging.info('Predictions: %s', predictions)
if True:
tf.keras.backend.clear_session()
saved_model_path = os.path.join(self.get_temp_dir(), 'saved_model')
new_saved_model_path = os.path.join(self.get_temp_dir(), 'saved_model_copy')
logging.info('Saving model to %s', saved_model_path)
model.save(saved_model_path)
tf.keras.backend.clear_session()
logging.info('Run model in separate binary')
process = subprocess.Popen([os.path.join(data_root_path(), 'tensorflow_decision_forests/keras/test_runner'), '--model_path', saved_model_path, '--dataset_path', os.path.join(ydf_test_data_path(), 'dataset', 'adult_test.csv')], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
logging.info('stdout:\n%s', stdout.decode('utf-8'))
logging.info('stderr:\n%s', stderr.decode('utf-8'))
logging.info('Copying model from %s to %s', saved_model_path, new_saved_model_path)
shutil.copytree(saved_model_path, new_saved_model_path)
shutil.rmtree(saved_model_path)
logging.info('Loading model from %s', new_saved_model_path)
loaded_model = models.load_model(new_saved_model_path)
loaded_model.summary()
evaluation = loaded_model.evaluate(tf_test)
logging.info('Loaded model evaluation: %s', evaluation)
self.assertGreaterEqual(evaluation[1], 0.864)
predictions = loaded_model.predict(tf_test)
logging.info('Loaded model predictions: %s', predictions)
</DeepExtract>
|
decision-forests
|
positive
|
def _create_matrix_of_two_qubit_gate(self, n_qubits, gate, control, target, subs=None):
matrix_of_target_gate = self.SYMPY_GATE[gate.uppername[1:]]
if subs:
matrix_of_target_gate = matrix_of_target_gate.subs(subs, simultaneous=True)
<DeepExtract>
[control] = [idx for idx in [control]]
gates = []
for idx in range(n_qubits):
if idx in [control]:
gates.append(self.SYMPY_GATE['_C0'])
else:
gates.append(eye(2))
c0 = reduce(TensorProduct, reversed(gates))
</DeepExtract>
<DeepExtract>
[control] = [idx for idx in [control]]
gates = []
for idx in range(n_qubits):
if idx in [control]:
gates.append(self.SYMPY_GATE['_C1'])
else:
gates.append(eye(2))
c1 = reduce(TensorProduct, reversed(gates))
</DeepExtract>
<DeepExtract>
[target] = [idx for idx in [target]]
gates = []
for idx in range(n_qubits):
if idx in [target]:
gates.append(matrix_of_target_gate)
else:
gates.append(eye(2))
tgt = reduce(TensorProduct, reversed(gates))
</DeepExtract>
return c0 + tgt * c1
|
def _create_matrix_of_two_qubit_gate(self, n_qubits, gate, control, target, subs=None):
matrix_of_target_gate = self.SYMPY_GATE[gate.uppername[1:]]
if subs:
matrix_of_target_gate = matrix_of_target_gate.subs(subs, simultaneous=True)
[control] = [idx for idx in [control]]
gates = []
for idx in range(n_qubits):
if idx in [control]:
gates.append(self.SYMPY_GATE['_C0'])
else:
gates.append(eye(2))
c0 = reduce(TensorProduct, reversed(gates))
[control] = [idx for idx in [control]]
gates = []
for idx in range(n_qubits):
if idx in [control]:
gates.append(self.SYMPY_GATE['_C1'])
else:
gates.append(eye(2))
c1 = reduce(TensorProduct, reversed(gates))
[target] = [idx for idx in [target]]
gates = []
for idx in range(n_qubits):
if idx in [target]:
gates.append(matrix_of_target_gate)
else:
gates.append(eye(2))
tgt = reduce(TensorProduct, reversed(gates))
return c0 + tgt * c1
|
Blueqat
|
positive
|
def __init__(self, C_in, C_out, expansion, stride):
assert stride in [1, 2]
self.res_connect = stride == 1 and C_in == C_out
<DeepExtract>
ret = int(C_in * expansion)
if 8 > 0 and C_in * expansion % 8 != 0:
ret = int((_py2_round(C_in * expansion / 8) or 8) * 8)
C_mid = ret
</DeepExtract>
ops = [Conv2d(C_in, C_mid, 1, 1, 0, bias=False), BatchNorm2d(C_mid), nn.ReLU(inplace=True), Shift(C_mid, 5, stride, 2), Conv2d(C_mid, C_out, 1, 1, 0, bias=False), BatchNorm2d(C_out)]
super(ShiftBlock5x5, self).__init__(*ops)
|
def __init__(self, C_in, C_out, expansion, stride):
assert stride in [1, 2]
self.res_connect = stride == 1 and C_in == C_out
ret = int(C_in * expansion)
if 8 > 0 and C_in * expansion % 8 != 0:
ret = int((_py2_round(C_in * expansion / 8) or 8) * 8)
C_mid = ret
ops = [Conv2d(C_in, C_mid, 1, 1, 0, bias=False), BatchNorm2d(C_mid), nn.ReLU(inplace=True), Shift(C_mid, 5, stride, 2), Conv2d(C_mid, C_out, 1, 1, 0, bias=False), BatchNorm2d(C_out)]
super(ShiftBlock5x5, self).__init__(*ops)
|
APNet
|
positive
|
def _initialize(self, first_init):
if not first_init:
return
<DeepExtract>
env.log('info', 'Starting Migration Import')
(status, models) = ('Import successful.', kwargs['import_export_types'])
empty_database = kwargs.get('empty_database_before_import', False)
if empty_database:
db.delete_all(*models)
fetch_instance = {'user': [current_user.name] if current_user else [], 'service': ['[Shared] Start', '[Shared] End', '[Shared] Placeholder']}
relations = defaultdict(lambda : defaultdict(dict))
for model in models:
path = vs.path / 'files' / folder / kwargs['name'] / f'{model}.yaml'
if not path.exists():
if kwargs.get('service_import') and model == 'service':
raise Exception('Invalid archive provided in service import.')
continue
with open(path, 'r') as migration_file:
instances = yaml.load(migration_file)
for instance in instances:
(type, relation_dict) = (instance.pop('type', model), {})
for (related_model, relation) in vs.relationships[type].items():
relation_dict[related_model] = instance.pop(related_model, [])
instance_private_properties = {property: env.get_password(instance.pop(property)) for property in list(instance) if property in vs.private_properties_set}
try:
existing_instance = instance['name'] in fetch_instance.get(model, [])
instance = db.factory(type, migration_import=True, no_fetch=empty_database and (not existing_instance), update_pools=kwargs.get('update_pools', False), import_mechanism=True, **instance)
if kwargs.get('service_import') and instance.type == 'workflow':
instance.edges = []
relations[type][instance.name] = relation_dict
for property in instance_private_properties.items():
setattr(instance, *property)
except Exception:
info(f'{str(instance)} could not be imported:\n{format_exc()}')
status = 'Partial import (see logs).'
db.session.commit()
for (model, instances) in relations.items():
for (instance_name, related_models) in instances.items():
for (property, value) in related_models.items():
if not value:
continue
relation = vs.relationships[model][property]
if relation['list']:
related_instances = (db.fetch(relation['model'], name=vs.settings['app'].get('startup_migration', 'default'), allow_none=True) for vs.settings['app'].get('startup_migration', 'default') in value)
value = list(filter(None, related_instances))
else:
value = db.fetch(relation['model'], name=value, allow_none=True)
try:
setattr(db.fetch(model, name=instance_name), property, value)
except Exception:
info('\n'.join(format_exc().splitlines()))
status = 'Partial import (see logs).'
db.session.commit()
if not kwargs.get('skip_model_update'):
for model in ('user', 'service', 'network'):
for instance in db.fetch_all(model):
instance.post_update()
if not kwargs.get('skip_pool_update'):
for pool in db.fetch_all('pool'):
pool.compute_pool()
db.session.commit()
env.log('info', status)
return status
</DeepExtract>
<DeepExtract>
env.log('info', 'Starting Git Content Update')
repo = vs.settings['app']['git_repository']
if not repo:
return
local_path = vs.path / 'network_data'
try:
if exists(local_path):
Repo(local_path).remotes.origin.pull()
else:
local_path.mkdir(parents=True, exist_ok=True)
Repo.clone_from(repo, local_path)
except Exception as exc:
env.log('error', f'Git pull failed ({str(exc)})')
try:
self.update_database_configurations_from_git()
except Exception as exc:
env.log('error', f'Update of device configurations failed ({str(exc)})')
env.log('info', 'Git Content Update Successful')
</DeepExtract>
<DeepExtract>
env.log('info', 'Starting Scan of Files')
path = env.file_path if not path else path.replace('>', '/')
folders = {Path(path)}
while folders:
folder = folders.pop()
for file in folder.iterdir():
if file.suffix in vs.settings['files']['ignored_types']:
continue
if file.is_dir():
folders.add(file)
if db.fetch('file', path=str(file), allow_none=True):
continue
db.factory('folder' if file.is_dir() else 'file', path=str(file))
db.session.commit()
env.log('info', 'Scan of Files Successful')
</DeepExtract>
|
def _initialize(self, first_init):
if not first_init:
return
env.log('info', 'Starting Migration Import')
(status, models) = ('Import successful.', kwargs['import_export_types'])
empty_database = kwargs.get('empty_database_before_import', False)
if empty_database:
db.delete_all(*models)
fetch_instance = {'user': [current_user.name] if current_user else [], 'service': ['[Shared] Start', '[Shared] End', '[Shared] Placeholder']}
relations = defaultdict(lambda : defaultdict(dict))
for model in models:
path = vs.path / 'files' / folder / kwargs['name'] / f'{model}.yaml'
if not path.exists():
if kwargs.get('service_import') and model == 'service':
raise Exception('Invalid archive provided in service import.')
continue
with open(path, 'r') as migration_file:
instances = yaml.load(migration_file)
for instance in instances:
(type, relation_dict) = (instance.pop('type', model), {})
for (related_model, relation) in vs.relationships[type].items():
relation_dict[related_model] = instance.pop(related_model, [])
instance_private_properties = {property: env.get_password(instance.pop(property)) for property in list(instance) if property in vs.private_properties_set}
try:
existing_instance = instance['name'] in fetch_instance.get(model, [])
instance = db.factory(type, migration_import=True, no_fetch=empty_database and (not existing_instance), update_pools=kwargs.get('update_pools', False), import_mechanism=True, **instance)
if kwargs.get('service_import') and instance.type == 'workflow':
instance.edges = []
relations[type][instance.name] = relation_dict
for property in instance_private_properties.items():
setattr(instance, *property)
except Exception:
info(f'{str(instance)} could not be imported:\n{format_exc()}')
status = 'Partial import (see logs).'
db.session.commit()
for (model, instances) in relations.items():
for (instance_name, related_models) in instances.items():
for (property, value) in related_models.items():
if not value:
continue
relation = vs.relationships[model][property]
if relation['list']:
related_instances = (db.fetch(relation['model'], name=vs.settings['app'].get('startup_migration', 'default'), allow_none=True) for vs.settings['app'].get('startup_migration', 'default') in value)
value = list(filter(None, related_instances))
else:
value = db.fetch(relation['model'], name=value, allow_none=True)
try:
setattr(db.fetch(model, name=instance_name), property, value)
except Exception:
info('\n'.join(format_exc().splitlines()))
status = 'Partial import (see logs).'
db.session.commit()
if not kwargs.get('skip_model_update'):
for model in ('user', 'service', 'network'):
for instance in db.fetch_all(model):
instance.post_update()
if not kwargs.get('skip_pool_update'):
for pool in db.fetch_all('pool'):
pool.compute_pool()
db.session.commit()
env.log('info', status)
return status
env.log('info', 'Starting Git Content Update')
repo = vs.settings['app']['git_repository']
if not repo:
return
local_path = vs.path / 'network_data'
try:
if exists(local_path):
Repo(local_path).remotes.origin.pull()
else:
local_path.mkdir(parents=True, exist_ok=True)
Repo.clone_from(repo, local_path)
except Exception as exc:
env.log('error', f'Git pull failed ({str(exc)})')
try:
self.update_database_configurations_from_git()
except Exception as exc:
env.log('error', f'Update of device configurations failed ({str(exc)})')
env.log('info', 'Git Content Update Successful')
env.log('info', 'Starting Scan of Files')
path = env.file_path if not path else path.replace('>', '/')
folders = {Path(path)}
while folders:
folder = folders.pop()
for file in folder.iterdir():
if file.suffix in vs.settings['files']['ignored_types']:
continue
if file.is_dir():
folders.add(file)
if db.fetch('file', path=str(file), allow_none=True):
continue
db.factory('folder' if file.is_dir() else 'file', path=str(file))
db.session.commit()
env.log('info', 'Scan of Files Successful')
</DeepExtract>
|
eNMS
|
positive
|
def test(self):
self.update_exchange = Exchange('bgp-update', type='direct', durable=False, delivery_mode=1)
self.pg_amq_bridge = Exchange('amq.direct', type='direct', durable=True, delivery_mode=1)
self.update_queue = Queue('detection-testing', exchange=self.pg_amq_bridge, routing_key='update-update', durable=False, auto_delete=True, max_priority=1, consumer_arguments={'x-priority': 1})
self.hijack_db_queue = Queue('hijack-db-testing', exchange=self.pg_amq_bridge, routing_key='hijack-update', durable=False, auto_delete=True, max_priority=1, consumer_arguments={'x-priority': 1})
with Connection(RABBITMQ_URI) as connection:
<DeepExtract>
while True:
met_deps = set()
unmet_deps = set()
for service in DATA_WORKER_DEPENDENCIES:
try:
r = requests.get('http://{}:{}/health'.format(service, REST_PORT))
status = True if r.json()['status'] == 'running' else False
if not status:
unmet_deps.add(service)
else:
met_deps.add(service)
except Exception:
print("exception while waiting for service '{}'. Will retry".format(service))
if len(unmet_deps) == 0:
print('all needed data workers started: {}'.format(DATA_WORKER_DEPENDENCIES))
break
else:
print("'{}' data workers started, waiting for: '{}'".format(met_deps, unmet_deps))
time.sleep(1)
</DeepExtract>
while True:
try:
r = requests.get('http://{}:{}/config'.format(CONFIGURATION_HOST, REST_PORT))
result = r.json()
assert len(result) > 0
break
except Exception:
print('exception')
time.sleep(1)
time.sleep(1)
for testfile in os.listdir('testfiles/'):
<DeepExtract>
db_con = self.getDbConnection()
db_cur = db_con.cursor()
query = 'delete from bgp_updates; delete from hijacks;'
db_cur.execute(query)
db_con.commit()
db_cur.close()
db_con.close()
self.redis.flushall()
self.curr_idx = 0
self.send_cnt = 0
self.expected_messages = 0
</DeepExtract>
self.curr_test = testfile
self.messages = {}
with open('testfiles/{}'.format(testfile), 'r') as f:
self.messages = json.load(f)
send_len = len(self.messages)
with nested(connection.Consumer(self.hijack_db_queue, callbacks=[self.validate_message], accept=['ujson', 'txtjson'])):
send_cnt = 0
while send_cnt < send_len:
self.curr_idx = send_cnt
<DeepExtract>
with connection.Producer() as producer:
self.expected_messages = 0
for key in self.messages[self.curr_idx]:
if key != 'send':
if isinstance(self.messages[self.curr_idx][key], dict):
self.expected_messages += 1
else:
self.expected_messages += len(self.messages[self.curr_idx][key])
for key in self.messages[self.curr_idx]['send']:
if 'time' in key:
self.messages[self.curr_idx]['send'][key] += self.time_now
producer.publish(self.messages[self.curr_idx]['send'], exchange=self.update_exchange, routing_key='update', serializer='ujson')
</DeepExtract>
send_cnt += 1
while self.curr_idx != send_cnt:
time.sleep(0.1)
try:
connection.drain_events(timeout=60)
except socket.timeout:
assert False, 'Consumer timeout'
if send_cnt < send_len:
print('[+] Sleeping for 20 seconds to ensure auto-ignore works correctly')
time.sleep(20)
connection.close()
print('[+] Sleeping for 5 seconds...')
time.sleep(5)
|
def test(self):
self.update_exchange = Exchange('bgp-update', type='direct', durable=False, delivery_mode=1)
self.pg_amq_bridge = Exchange('amq.direct', type='direct', durable=True, delivery_mode=1)
self.update_queue = Queue('detection-testing', exchange=self.pg_amq_bridge, routing_key='update-update', durable=False, auto_delete=True, max_priority=1, consumer_arguments={'x-priority': 1})
self.hijack_db_queue = Queue('hijack-db-testing', exchange=self.pg_amq_bridge, routing_key='hijack-update', durable=False, auto_delete=True, max_priority=1, consumer_arguments={'x-priority': 1})
with Connection(RABBITMQ_URI) as connection:
while True:
met_deps = set()
unmet_deps = set()
for service in DATA_WORKER_DEPENDENCIES:
try:
r = requests.get('http://{}:{}/health'.format(service, REST_PORT))
status = True if r.json()['status'] == 'running' else False
if not status:
unmet_deps.add(service)
else:
met_deps.add(service)
except Exception:
print("exception while waiting for service '{}'. Will retry".format(service))
if len(unmet_deps) == 0:
print('all needed data workers started: {}'.format(DATA_WORKER_DEPENDENCIES))
break
else:
print("'{}' data workers started, waiting for: '{}'".format(met_deps, unmet_deps))
time.sleep(1)
while True:
try:
r = requests.get('http://{}:{}/config'.format(CONFIGURATION_HOST, REST_PORT))
result = r.json()
assert len(result) > 0
break
except Exception:
print('exception')
time.sleep(1)
time.sleep(1)
for testfile in os.listdir('testfiles/'):
db_con = self.getDbConnection()
db_cur = db_con.cursor()
query = 'delete from bgp_updates; delete from hijacks;'
db_cur.execute(query)
db_con.commit()
db_cur.close()
db_con.close()
self.redis.flushall()
self.curr_idx = 0
self.send_cnt = 0
self.expected_messages = 0
self.curr_test = testfile
self.messages = {}
with open('testfiles/{}'.format(testfile), 'r') as f:
self.messages = json.load(f)
send_len = len(self.messages)
with nested(connection.Consumer(self.hijack_db_queue, callbacks=[self.validate_message], accept=['ujson', 'txtjson'])):
send_cnt = 0
while send_cnt < send_len:
self.curr_idx = send_cnt
with connection.Producer() as producer:
self.expected_messages = 0
for key in self.messages[self.curr_idx]:
if key != 'send':
if isinstance(self.messages[self.curr_idx][key], dict):
self.expected_messages += 1
else:
self.expected_messages += len(self.messages[self.curr_idx][key])
for key in self.messages[self.curr_idx]['send']:
if 'time' in key:
self.messages[self.curr_idx]['send'][key] += self.time_now
producer.publish(self.messages[self.curr_idx]['send'], exchange=self.update_exchange, routing_key='update', serializer='ujson')
send_cnt += 1
while self.curr_idx != send_cnt:
time.sleep(0.1)
try:
connection.drain_events(timeout=60)
except socket.timeout:
assert False, 'Consumer timeout'
if send_cnt < send_len:
print('[+] Sleeping for 20 seconds to ensure auto-ignore works correctly')
time.sleep(20)
connection.close()
print('[+] Sleeping for 5 seconds...')
time.sleep(5)
|
artemis
|
positive
|
def _extend(self, obj):
stream = getattr(obj, 'stream', None)
if stream is not None:
d = {'__streamdata__': stream, '/Length': len(stream)}
d.update(obj)
dct = pdf.StreamObject.initializeFromDictionary(d)
if '/Filter' in obj and obj['/Filter'] == '/FlatDecode':
del dct['/Filter']
dct = dct.flateEncode()
else:
dct = pdf.DictionaryObject()
for (k, v) in obj.items():
if isinstance(v, pdf.DictionaryObject):
if v.indirect:
<DeepExtract>
stream = getattr(v, 'stream', None)
if stream is not None:
d = {'__streamdata__': stream, '/Length': len(stream)}
d.update(v)
dct = pdf.StreamObject.initializeFromDictionary(d)
if '/Filter' in v and v['/Filter'] == '/FlatDecode':
del dct['/Filter']
dct = dct.flateEncode()
else:
dct = pdf.DictionaryObject()
for (k, v) in v.items():
if isinstance(v, pdf.DictionaryObject):
if v.indirect:
v = self._extend(v)
v = self._addObject(v)
else:
v = self._extend(v)
elif isinstance(v, list):
result = pdf.ArrayObject()
for va in v:
if isinstance(va, pdf.DictionaryObject):
if va.indirect:
va = self._extend(va)
va = self._addObject(va)
else:
va = self._extend(va)
result.append(va)
v = result
dct[k] = v
v = dct
</DeepExtract>
v = self._addObject(v)
else:
<DeepExtract>
stream = getattr(v, 'stream', None)
if stream is not None:
d = {'__streamdata__': stream, '/Length': len(stream)}
d.update(v)
dct = pdf.StreamObject.initializeFromDictionary(d)
if '/Filter' in v and v['/Filter'] == '/FlatDecode':
del dct['/Filter']
dct = dct.flateEncode()
else:
dct = pdf.DictionaryObject()
for (k, v) in v.items():
if isinstance(v, pdf.DictionaryObject):
if v.indirect:
v = self._extend(v)
v = self._addObject(v)
else:
v = self._extend(v)
elif isinstance(v, list):
result = pdf.ArrayObject()
for va in v:
if isinstance(va, pdf.DictionaryObject):
if va.indirect:
va = self._extend(va)
va = self._addObject(va)
else:
va = self._extend(va)
result.append(va)
v = result
dct[k] = v
v = dct
</DeepExtract>
elif isinstance(v, list):
result = pdf.ArrayObject()
for va in v:
if isinstance(va, pdf.DictionaryObject):
if va.indirect:
<DeepExtract>
stream = getattr(va, 'stream', None)
if stream is not None:
d = {'__streamdata__': stream, '/Length': len(stream)}
d.update(va)
dct = pdf.StreamObject.initializeFromDictionary(d)
if '/Filter' in va and va['/Filter'] == '/FlatDecode':
del dct['/Filter']
dct = dct.flateEncode()
else:
dct = pdf.DictionaryObject()
for (k, v) in va.items():
if isinstance(v, pdf.DictionaryObject):
if v.indirect:
v = self._extend(v)
v = self._addObject(v)
else:
v = self._extend(v)
elif isinstance(v, list):
result = pdf.ArrayObject()
for va in v:
if isinstance(va, pdf.DictionaryObject):
if va.indirect:
va = self._extend(va)
va = self._addObject(va)
else:
va = self._extend(va)
result.append(va)
v = result
dct[k] = v
va = dct
</DeepExtract>
va = self._addObject(va)
else:
<DeepExtract>
stream = getattr(va, 'stream', None)
if stream is not None:
d = {'__streamdata__': stream, '/Length': len(stream)}
d.update(va)
dct = pdf.StreamObject.initializeFromDictionary(d)
if '/Filter' in va and va['/Filter'] == '/FlatDecode':
del dct['/Filter']
dct = dct.flateEncode()
else:
dct = pdf.DictionaryObject()
for (k, v) in va.items():
if isinstance(v, pdf.DictionaryObject):
if v.indirect:
v = self._extend(v)
v = self._addObject(v)
else:
v = self._extend(v)
elif isinstance(v, list):
result = pdf.ArrayObject()
for va in v:
if isinstance(va, pdf.DictionaryObject):
if va.indirect:
va = self._extend(va)
va = self._addObject(va)
else:
va = self._extend(va)
result.append(va)
v = result
dct[k] = v
va = dct
</DeepExtract>
result.append(va)
v = result
dct[k] = v
return dct
|
def _extend(self, obj):
stream = getattr(obj, 'stream', None)
if stream is not None:
d = {'__streamdata__': stream, '/Length': len(stream)}
d.update(obj)
dct = pdf.StreamObject.initializeFromDictionary(d)
if '/Filter' in obj and obj['/Filter'] == '/FlatDecode':
del dct['/Filter']
dct = dct.flateEncode()
else:
dct = pdf.DictionaryObject()
for (k, v) in obj.items():
if isinstance(v, pdf.DictionaryObject):
if v.indirect:
stream = getattr(v, 'stream', None)
if stream is not None:
d = {'__streamdata__': stream, '/Length': len(stream)}
d.update(v)
dct = pdf.StreamObject.initializeFromDictionary(d)
if '/Filter' in v and v['/Filter'] == '/FlatDecode':
del dct['/Filter']
dct = dct.flateEncode()
else:
dct = pdf.DictionaryObject()
for (k, v) in v.items():
if isinstance(v, pdf.DictionaryObject):
if v.indirect:
v = self._extend(v)
v = self._addObject(v)
else:
v = self._extend(v)
elif isinstance(v, list):
result = pdf.ArrayObject()
for va in v:
if isinstance(va, pdf.DictionaryObject):
if va.indirect:
va = self._extend(va)
va = self._addObject(va)
else:
va = self._extend(va)
result.append(va)
v = result
dct[k] = v
v = dct
v = self._addObject(v)
else:
stream = getattr(v, 'stream', None)
if stream is not None:
d = {'__streamdata__': stream, '/Length': len(stream)}
d.update(v)
dct = pdf.StreamObject.initializeFromDictionary(d)
if '/Filter' in v and v['/Filter'] == '/FlatDecode':
del dct['/Filter']
dct = dct.flateEncode()
else:
dct = pdf.DictionaryObject()
for (k, v) in v.items():
if isinstance(v, pdf.DictionaryObject):
if v.indirect:
v = self._extend(v)
v = self._addObject(v)
else:
v = self._extend(v)
elif isinstance(v, list):
result = pdf.ArrayObject()
for va in v:
if isinstance(va, pdf.DictionaryObject):
if va.indirect:
va = self._extend(va)
va = self._addObject(va)
else:
va = self._extend(va)
result.append(va)
v = result
dct[k] = v
v = dct
elif isinstance(v, list):
result = pdf.ArrayObject()
for va in v:
if isinstance(va, pdf.DictionaryObject):
if va.indirect:
stream = getattr(va, 'stream', None)
if stream is not None:
d = {'__streamdata__': stream, '/Length': len(stream)}
d.update(va)
dct = pdf.StreamObject.initializeFromDictionary(d)
if '/Filter' in va and va['/Filter'] == '/FlatDecode':
del dct['/Filter']
dct = dct.flateEncode()
else:
dct = pdf.DictionaryObject()
for (k, v) in va.items():
if isinstance(v, pdf.DictionaryObject):
if v.indirect:
v = self._extend(v)
v = self._addObject(v)
else:
v = self._extend(v)
elif isinstance(v, list):
result = pdf.ArrayObject()
for va in v:
if isinstance(va, pdf.DictionaryObject):
if va.indirect:
va = self._extend(va)
va = self._addObject(va)
else:
va = self._extend(va)
result.append(va)
v = result
dct[k] = v
va = dct
va = self._addObject(va)
else:
stream = getattr(va, 'stream', None)
if stream is not None:
d = {'__streamdata__': stream, '/Length': len(stream)}
d.update(va)
dct = pdf.StreamObject.initializeFromDictionary(d)
if '/Filter' in va and va['/Filter'] == '/FlatDecode':
del dct['/Filter']
dct = dct.flateEncode()
else:
dct = pdf.DictionaryObject()
for (k, v) in va.items():
if isinstance(v, pdf.DictionaryObject):
if v.indirect:
v = self._extend(v)
v = self._addObject(v)
else:
v = self._extend(v)
elif isinstance(v, list):
result = pdf.ArrayObject()
for va in v:
if isinstance(va, pdf.DictionaryObject):
if va.indirect:
va = self._extend(va)
va = self._addObject(va)
else:
va = self._extend(va)
result.append(va)
v = result
dct[k] = v
va = dct
result.append(va)
v = result
dct[k] = v
return dct
|
endesive
|
positive
|
def execute(self, args, timeout):
(file, *rest) = args
cmd = [sys.executable, example_path(file)] + rest
print(cmd)
process = subprocess.Popen(cmd, shell=False, env=ENV)
<DeepExtract>
start = time.monotonic()
while time.monotonic() - start < timeout:
code = process.poll()
if code is not None:
code = code
time.sleep(poll_interval)
code = None
</DeepExtract>
if code is not None:
self.assertEqual(0, code)
return
print('Interrupting process (Control-C)')
os.kill(process.pid, signal.SIGINT)
if wait_terminated(process, timeout=5.0) is None:
print('Terminating process')
os.kill(process.pid, signal.SIGTERM)
if wait_terminated(process, timeout=5.0) is None:
print('Killing process')
os.kill(process.pid, signal.SIGKILL)
process.wait()
self.fail('Process did not finish in time: %s' % timeout)
|
def execute(self, args, timeout):
(file, *rest) = args
cmd = [sys.executable, example_path(file)] + rest
print(cmd)
process = subprocess.Popen(cmd, shell=False, env=ENV)
start = time.monotonic()
while time.monotonic() - start < timeout:
code = process.poll()
if code is not None:
code = code
time.sleep(poll_interval)
code = None
if code is not None:
self.assertEqual(0, code)
return
print('Interrupting process (Control-C)')
os.kill(process.pid, signal.SIGINT)
if wait_terminated(process, timeout=5.0) is None:
print('Terminating process')
os.kill(process.pid, signal.SIGTERM)
if wait_terminated(process, timeout=5.0) is None:
print('Killing process')
os.kill(process.pid, signal.SIGKILL)
process.wait()
self.fail('Process did not finish in time: %s' % timeout)
|
aiyprojects-raspbian
|
positive
|
def _split_node(self, split_types: List[str], split_ratio: float, shuffle: bool=True):
"""
Split the graph into len(split_ratio) graphs for node prediction.
Internally this splits node indices, and the model will only compute
loss for the embedding of
nodes in each split graph.
In node classification, the whole graph is observed in train/val/test
Only split over node_label_index
"""
if isinstance(split_types, list):
for split_type in split_types:
if split_type not in self.node_types:
raise TypeError(f'all split_type in split_types need to be in {self.node_types}, however split type: {{split_type}} is in split_types.')
elif split_types is None:
split_types = self.node_types
elif split_types not in self.node_types:
raise TypeError(f'split_types need to be in {self.node_types}, however split_types is: {split_types}.')
else:
split_types = [split_types]
for (split_type, num_node_type) in self.num_nodes(split_types).items():
if num_node_type < len(split_ratio):
raise ValueError(f'In _split_node num of nodes of node_type: {split_type} are smaller than number of splitted parts.')
split_graphs = []
for _ in range(len(split_ratio)):
graph_new = copy.copy(self)
graph_new.node_label_index = {}
graph_new.node_label = {}
split_graphs.append(graph_new)
for split_type in self.node_types:
if split_type in split_types:
<DeepExtract>
if split_type is None:
split_type = self.node_types
if isinstance(split_type, str) or isinstance(split_type, int) or isinstance(split_type, float):
if split_type in self[self._node_related_key]:
split_type_nodes_length = len(self[self._node_related_key][split_type])
else:
raise ValueError('Node type does not exist in stored node feature.')
if isinstance(split_type, list):
if not all((node_type_i in self[self._node_related_key] for node_type_i in split_type)):
raise ValueError('Some node types do not exist in stored node feature.')
else:
num_nodes_dict = {}
for node_type_i in split_type:
num_nodes_dict[node_type_i] = len(self[self._node_related_key][node_type_i])
split_type_nodes_length = num_nodes_dict
else:
raise TypeError('Node types have unexpected type.')
</DeepExtract>
if shuffle:
split_type_node = self.node_label_index[split_type][torch.randperm(split_type_nodes_length)]
else:
split_type_node = self.node_label_index[split_type]
split_empty_flag = False
nodes_split_list = []
split_offset = 0
for (i, split_ratio_i) in enumerate(split_ratio):
if i != len(split_ratio) - 1:
num_split_i = int(split_ratio_i * split_type_nodes_length)
nodes_split_i = split_type_node[split_offset:split_offset + num_split_i]
split_offset += num_split_i
else:
nodes_split_i = split_type_node[split_offset:]
if nodes_split_i.numel() == 0:
split_empty_flag = True
split_offset = 0
nodes_split_list = []
break
nodes_split_list.append(nodes_split_i)
if split_empty_flag:
for (i, split_ratio_i) in enumerate(split_ratio):
if i != len(split_ratio) - 1:
num_split_i = 1 + int(split_ratio_i * (split_type_nodes_length - len(split_ratio)))
nodes_split_i = split_type_node[split_offset:split_offset + num_split_i]
split_offset += num_split_i
else:
nodes_split_i = split_type_node[split_offset:]
nodes_split_list.append(nodes_split_i)
for (idx, nodes_split_i) in enumerate(nodes_split_list):
split_graphs[idx].node_label_index[split_type] = nodes_split_i
split_graphs[idx].node_label[split_type] = self.node_label[split_type][nodes_split_i]
else:
for (idx, graph) in enumerate(split_graphs):
graph.node_label_index[split_type] = self.node_label_index[split_type]
graph.node_label[split_type] = self.node_label[split_type]
split_graphs[idx] = graph
return split_graphs
|
def _split_node(self, split_types: List[str], split_ratio: float, shuffle: bool=True):
"""
Split the graph into len(split_ratio) graphs for node prediction.
Internally this splits node indices, and the model will only compute
loss for the embedding of
nodes in each split graph.
In node classification, the whole graph is observed in train/val/test
Only split over node_label_index
"""
if isinstance(split_types, list):
for split_type in split_types:
if split_type not in self.node_types:
raise TypeError(f'all split_type in split_types need to be in {self.node_types}, however split type: {{split_type}} is in split_types.')
elif split_types is None:
split_types = self.node_types
elif split_types not in self.node_types:
raise TypeError(f'split_types need to be in {self.node_types}, however split_types is: {split_types}.')
else:
split_types = [split_types]
for (split_type, num_node_type) in self.num_nodes(split_types).items():
if num_node_type < len(split_ratio):
raise ValueError(f'In _split_node num of nodes of node_type: {split_type} are smaller than number of splitted parts.')
split_graphs = []
for _ in range(len(split_ratio)):
graph_new = copy.copy(self)
graph_new.node_label_index = {}
graph_new.node_label = {}
split_graphs.append(graph_new)
for split_type in self.node_types:
if split_type in split_types:
if split_type is None:
split_type = self.node_types
if isinstance(split_type, str) or isinstance(split_type, int) or isinstance(split_type, float):
if split_type in self[self._node_related_key]:
split_type_nodes_length = len(self[self._node_related_key][split_type])
else:
raise ValueError('Node type does not exist in stored node feature.')
if isinstance(split_type, list):
if not all((node_type_i in self[self._node_related_key] for node_type_i in split_type)):
raise ValueError('Some node types do not exist in stored node feature.')
else:
num_nodes_dict = {}
for node_type_i in split_type:
num_nodes_dict[node_type_i] = len(self[self._node_related_key][node_type_i])
split_type_nodes_length = num_nodes_dict
else:
raise TypeError('Node types have unexpected type.')
if shuffle:
split_type_node = self.node_label_index[split_type][torch.randperm(split_type_nodes_length)]
else:
split_type_node = self.node_label_index[split_type]
split_empty_flag = False
nodes_split_list = []
split_offset = 0
for (i, split_ratio_i) in enumerate(split_ratio):
if i != len(split_ratio) - 1:
num_split_i = int(split_ratio_i * split_type_nodes_length)
nodes_split_i = split_type_node[split_offset:split_offset + num_split_i]
split_offset += num_split_i
else:
nodes_split_i = split_type_node[split_offset:]
if nodes_split_i.numel() == 0:
split_empty_flag = True
split_offset = 0
nodes_split_list = []
break
nodes_split_list.append(nodes_split_i)
if split_empty_flag:
for (i, split_ratio_i) in enumerate(split_ratio):
if i != len(split_ratio) - 1:
num_split_i = 1 + int(split_ratio_i * (split_type_nodes_length - len(split_ratio)))
nodes_split_i = split_type_node[split_offset:split_offset + num_split_i]
split_offset += num_split_i
else:
nodes_split_i = split_type_node[split_offset:]
nodes_split_list.append(nodes_split_i)
for (idx, nodes_split_i) in enumerate(nodes_split_list):
split_graphs[idx].node_label_index[split_type] = nodes_split_i
split_graphs[idx].node_label[split_type] = self.node_label[split_type][nodes_split_i]
else:
for (idx, graph) in enumerate(split_graphs):
graph.node_label_index[split_type] = self.node_label_index[split_type]
graph.node_label[split_type] = self.node_label[split_type]
split_graphs[idx] = graph
return split_graphs
|
deepsnap
|
positive
|
def apply(self, x):
<DeepExtract>
x = self.preprocess_core(x)
x = x
</DeepExtract>
<DeepExtract>
raise NotImplementedError()
</DeepExtract>
<DeepExtract>
x = self.postprocess_core(x)
x = x
</DeepExtract>
return x
|
def apply(self, x):
x = self.preprocess_core(x)
x = x
raise NotImplementedError()
x = self.postprocess_core(x)
x = x
return x
|
bayesian_unet
|
positive
|
def assert_block(value, expected):
assert value is not None
assert value['type'] == expected['type']
assert value['name'] == expected['name']
if value['type'] == 'airspace':
assert value['class'] == expected['class']
assert value['floor'] == expected['floor']
assert value['ceiling'] == expected['ceiling']
for x in zip(value['labels'], expected['labels']):
<DeepExtract>
if not isinstance(expected, list):
expected = coordinate(expected)
assert isinstance(*x, list)
assert len(*x) == 2
assert_float(*x[0], expected[0], 0.0001)
assert_float(*x[1], expected[1], 0.0001)
</DeepExtract>
elif value['type'] == 'terrain':
assert value['open'] == expected['open']
assert value['fill'] == expected['fill']
assert value['outline'] == expected['outline']
assert value['zoom'] == expected['zoom']
for (v, e) in zip(value['elements'], expected['elements']):
<DeepExtract>
assert v['type'] == e['type']
if v['type'] == 'point':
assert_location(v['location'], e['location'])
elif v['type'] == 'circle':
assert_location(v['center'], e['center'])
assert v['radius'] == e['radius']
elif v['type'] == 'arc':
assert_location(v['center'], e['center'])
assert v['clockwise'] == e['clockwise']
if 'radius' in v:
assert v['radius'] == e['radius']
assert v['start'] == e['start']
assert v['end'] == e['end']
else:
assert_location(v['start'], e['start'])
assert_location(v['end'], e['end'])
</DeepExtract>
|
def assert_block(value, expected):
assert value is not None
assert value['type'] == expected['type']
assert value['name'] == expected['name']
if value['type'] == 'airspace':
assert value['class'] == expected['class']
assert value['floor'] == expected['floor']
assert value['ceiling'] == expected['ceiling']
for x in zip(value['labels'], expected['labels']):
if not isinstance(expected, list):
expected = coordinate(expected)
assert isinstance(*x, list)
assert len(*x) == 2
assert_float(*x[0], expected[0], 0.0001)
assert_float(*x[1], expected[1], 0.0001)
elif value['type'] == 'terrain':
assert value['open'] == expected['open']
assert value['fill'] == expected['fill']
assert value['outline'] == expected['outline']
assert value['zoom'] == expected['zoom']
for (v, e) in zip(value['elements'], expected['elements']):
assert v['type'] == e['type']
if v['type'] == 'point':
assert_location(v['location'], e['location'])
elif v['type'] == 'circle':
assert_location(v['center'], e['center'])
assert v['radius'] == e['radius']
elif v['type'] == 'arc':
assert_location(v['center'], e['center'])
assert v['clockwise'] == e['clockwise']
if 'radius' in v:
assert v['radius'] == e['radius']
assert v['start'] == e['start']
assert v['end'] == e['end']
else:
assert_location(v['start'], e['start'])
assert_location(v['end'], e['end'])
</DeepExtract>
|
aerofiles
|
positive
|
def compute_dr_wrt(self, wrt):
if wrt is self.a:
if False:
from scipy.sparse.linalg.interface import LinearOperator
return LinearOperator((self.size, wrt.size), lambda x: self.reorder(x.reshape(self.a.shape)).ravel())
else:
a = self.a
asz = a.size
ashape = a.shape
<DeepExtract>
key = None
</DeepExtract>
if key not in self.dr_lookup or key is None:
<DeepExtract>
JS = np.sort(np.arange(asz).reshape(ashape), self.axis, self.kind, self.order)
</DeepExtract>
IS = np.arange(JS.size)
data = np.ones_like(IS)
shape = JS.shape
self.dr_lookup[key] = sp.csc_matrix((data, (IS, JS.ravel())), shape=(self.r.size, wrt.r.size))
return self.dr_lookup[key]
|
def compute_dr_wrt(self, wrt):
if wrt is self.a:
if False:
from scipy.sparse.linalg.interface import LinearOperator
return LinearOperator((self.size, wrt.size), lambda x: self.reorder(x.reshape(self.a.shape)).ravel())
else:
a = self.a
asz = a.size
ashape = a.shape
key = None
if key not in self.dr_lookup or key is None:
JS = np.sort(np.arange(asz).reshape(ashape), self.axis, self.kind, self.order)
IS = np.arange(JS.size)
data = np.ones_like(IS)
shape = JS.shape
self.dr_lookup[key] = sp.csc_matrix((data, (IS, JS.ravel())), shape=(self.r.size, wrt.r.size))
return self.dr_lookup[key]
|
chumpy
|
positive
|
def strongly_connected_components(self):
"""
Compute the vertices in the strongly connected components
:return list of lists, one for each component's vertices:
"""
<DeepExtract>
stack = []
visited = set()
for v in self.get_vertex():
self.dfs_forward(v, stack, visited)
stack = stack
</DeepExtract>
<DeepExtract>
components = []
visited = set()
while stack:
v = stack.pop()
if v not in visited:
component = []
self.dfs_reverse(v, component, visited)
component.reverse()
components.append(component)
components = components
</DeepExtract>
return components
|
def strongly_connected_components(self):
"""
Compute the vertices in the strongly connected components
:return list of lists, one for each component's vertices:
"""
stack = []
visited = set()
for v in self.get_vertex():
self.dfs_forward(v, stack, visited)
stack = stack
components = []
visited = set()
while stack:
v = stack.pop()
if v not in visited:
component = []
self.dfs_reverse(v, component, visited)
component.reverse()
components.append(component)
components = components
return components
|
code-catalog-python
|
positive
|
def read(self):
"""Loads all lines in memory"""
<DeepExtract>
lines = list(iter(self))
</DeepExtract>
if self.metadata and 'encoding' in self.metadata:
encoding = self.metadata['encoding']
else:
encoding = 'utf-8'
return '\n'.join((str(line, 'utf-8') if isinstance(line, bytes) else line for line in lines))
|
def read(self):
"""Loads all lines in memory"""
lines = list(iter(self))
if self.metadata and 'encoding' in self.metadata:
encoding = self.metadata['encoding']
else:
encoding = 'utf-8'
return '\n'.join((str(line, 'utf-8') if isinstance(line, bytes) else line for line in lines))
|
clam
|
positive
|
def _run_navigation_episode(agent, client, time_out, target, episode_name):
"""
Run one episode of the benchmark (Pose) for a certain agent.
Args:
agent: the agent object
client: an object of the carla client to communicate
with the CARLA simulator
time_out: the time limit to complete this episode
target: the target to reach
episode_name: The name for saving images of this episode
"""
(measurements, sensor_data) = client.read_data()
client.send_control(VehicleControl())
initial_timestamp = measurements.game_timestamp
current_timestamp = initial_timestamp
measurement_vec = []
control_vec = []
frame = 0
distance = 10000
success = False
while current_timestamp - initial_timestamp < time_out * 1000 and (not success):
(measurements, sensor_data) = client.read_data()
<DeepExtract>
directions = self._planner.get_next_command((measurements.player_measurements.transform.location.x, measurements.player_measurements.transform.location.y, 0.22), (measurements.player_measurements.transform.orientation.x, measurements.player_measurements.transform.orientation.y, measurements.player_measurements.transform.orientation.z), (target.location.x, target.location.y, 0.22), (target.orientation.x, target.orientation.y, target.orientation.z))
directions = directions
</DeepExtract>
control = agent.run_step(measurements, sensor_data, directions, target)
client.send_control(control)
self._recording.save_images(sensor_data, episode_name, frame)
current_x = measurements.player_measurements.transform.location.x
current_y = measurements.player_measurements.transform.location.y
logging.info('Controller is Inputting:')
logging.info('Steer = %f Throttle = %f Brake = %f ', control.steer, control.throttle, control.brake)
current_timestamp = measurements.game_timestamp
<DeepExtract>
distance = math.sqrt(([target.location.x, target.location.y][0] - [current_x, current_y][0]) ** 2 + ([target.location.x, target.location.y][1] - [current_x, current_y][1]) ** 2)
</DeepExtract>
logging.info('Status:')
logging.info('[d=%f] c_x = %f, c_y = %f ---> t_x = %f, t_y = %f', float(distance), current_x, current_y, target.location.x, target.location.y)
if distance < self._distance_for_success:
success = True
frame += 1
measurement_vec.append(measurements.player_measurements)
control_vec.append(control)
if success:
return (1, measurement_vec, control_vec, float(current_timestamp - initial_timestamp) / 1000.0, distance)
return (0, measurement_vec, control_vec, time_out, distance)
|
def _run_navigation_episode(agent, client, time_out, target, episode_name):
"""
Run one episode of the benchmark (Pose) for a certain agent.
Args:
agent: the agent object
client: an object of the carla client to communicate
with the CARLA simulator
time_out: the time limit to complete this episode
target: the target to reach
episode_name: The name for saving images of this episode
"""
(measurements, sensor_data) = client.read_data()
client.send_control(VehicleControl())
initial_timestamp = measurements.game_timestamp
current_timestamp = initial_timestamp
measurement_vec = []
control_vec = []
frame = 0
distance = 10000
success = False
while current_timestamp - initial_timestamp < time_out * 1000 and (not success):
(measurements, sensor_data) = client.read_data()
directions = self._planner.get_next_command((measurements.player_measurements.transform.location.x, measurements.player_measurements.transform.location.y, 0.22), (measurements.player_measurements.transform.orientation.x, measurements.player_measurements.transform.orientation.y, measurements.player_measurements.transform.orientation.z), (target.location.x, target.location.y, 0.22), (target.orientation.x, target.orientation.y, target.orientation.z))
directions = directions
control = agent.run_step(measurements, sensor_data, directions, target)
client.send_control(control)
self._recording.save_images(sensor_data, episode_name, frame)
current_x = measurements.player_measurements.transform.location.x
current_y = measurements.player_measurements.transform.location.y
logging.info('Controller is Inputting:')
logging.info('Steer = %f Throttle = %f Brake = %f ', control.steer, control.throttle, control.brake)
current_timestamp = measurements.game_timestamp
distance = math.sqrt(([target.location.x, target.location.y][0] - [current_x, current_y][0]) ** 2 + ([target.location.x, target.location.y][1] - [current_x, current_y][1]) ** 2)
logging.info('Status:')
logging.info('[d=%f] c_x = %f, c_y = %f ---> t_x = %f, t_y = %f', float(distance), current_x, current_y, target.location.x, target.location.y)
if distance < self._distance_for_success:
success = True
frame += 1
measurement_vec.append(measurements.player_measurements)
control_vec.append(control)
if success:
return (1, measurement_vec, control_vec, float(current_timestamp - initial_timestamp) / 1000.0, distance)
return (0, measurement_vec, control_vec, time_out, distance)
|
carla-rl
|
positive
|
def post(self, path, payload, callback=None, timeout=None, no_response=False, **kwargs):
"""
Perform a POST on a certain path.
:param path: the path
:param payload: the request payload
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
<DeepExtract>
request = Request()
request.destination = self.server
request.code = defines.Codes.POST.number
request.uri_path = path
request = request
</DeepExtract>
request.token = generate_random_token(2)
request.payload = payload
if no_response:
request.add_no_response()
request.type = defines.Types['NON']
for (k, v) in kwargs.items():
if hasattr(request, k):
setattr(request, k, v)
return self.send_request(request, callback, timeout, no_response=no_response)
|
def post(self, path, payload, callback=None, timeout=None, no_response=False, **kwargs):
"""
Perform a POST on a certain path.
:param path: the path
:param payload: the request payload
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
request = Request()
request.destination = self.server
request.code = defines.Codes.POST.number
request.uri_path = path
request = request
request.token = generate_random_token(2)
request.payload = payload
if no_response:
request.add_no_response()
request.type = defines.Types['NON']
for (k, v) in kwargs.items():
if hasattr(request, k):
setattr(request, k, v)
return self.send_request(request, callback, timeout, no_response=no_response)
|
CoAPthon3
|
positive
|
def do_config(self):
<DeepExtract>
loginfile = os.path.expanduser(LOGINFILE)
configfile = os.path.expanduser(DATAFILE)
configdir = os.path.dirname(configfile)
values = {'configdir': configdir, 'value': configfile, 'loginfile': loginfile}
values = values
</DeepExtract>
<DeepExtract>
format = format or VAULT_FORMAT
field = field or VAULT_FIELD
if field:
if field not in values:
logg.error('no such field %s/%s %s', section, name, field)
raise VaultError('requested field not found')
sys.stdout.write(values[field])
elif format in ['table', '']:
for name in sorted(values):
print(name, values[name])
elif format in ['json', '']:
print(json.dumps({'data': values}))
else:
print(values['value'])
</DeepExtract>
|
def do_config(self):
loginfile = os.path.expanduser(LOGINFILE)
configfile = os.path.expanduser(DATAFILE)
configdir = os.path.dirname(configfile)
values = {'configdir': configdir, 'value': configfile, 'loginfile': loginfile}
values = values
format = format or VAULT_FORMAT
field = field or VAULT_FIELD
if field:
if field not in values:
logg.error('no such field %s/%s %s', section, name, field)
raise VaultError('requested field not found')
sys.stdout.write(values[field])
elif format in ['table', '']:
for name in sorted(values):
print(name, values[name])
elif format in ['json', '']:
print(json.dumps({'data': values}))
else:
print(values['value'])
</DeepExtract>
|
docker-systemctl-images
|
positive
|
def _save(self, state_vec, other_vecs=[]):
self.state_vec = state_vec
<DeepExtract>
(inner_low, inner_high) = low_high(self.inner_diameters)
(outer_low, outer_high) = low_high(self.outer_diameters)
(inner_angle_low, inner_angle_high) = low_high(self.inner_angles)
(outer_angle_low, outer_angle_high) = low_high(self.outer_angles)
inst = self.instrument_class()
scale = self.scale
p = 0
inst.length = state_vec[0] * self.initial_length * scale
p += 1
inst.hole_positions = [item * inst.length for item in state_vec[p:p + self.n_holes]]
p += self.n_holes
signed_sqrt = lambda x: math.sqrt(abs(x)) * (1 if x >= 0 else -1)
inst.hole_diameters = [self.min_hole_diameters[i] + signed_sqrt(item) * (self.max_hole_diameters[i] - self.min_hole_diameters[i]) for (i, item) in enumerate(state_vec[p:p + self.n_holes])]
p += self.n_holes
inner_kinks = [item * inst.length for item in state_vec[p:p + len(self.inner_diameters) - 2]]
p += len(self.inner_diameters) - 2
outer_kinks = [item * inst.length for item in state_vec[p:p + len(self.outer_diameters) - 2]]
p += len(self.outer_diameters) - 2
assert p == len(state_vec)
inst.inner = profile.curved_profile([0.0] + inner_kinks + [inst.length], inner_low, inner_high, inner_angle_low, inner_angle_high)
inst.outer = profile.curved_profile([0.0] + outer_kinks + [inst.length], outer_low, outer_high, outer_angle_low, outer_angle_high)
if self.outer_add:
inst.outer = inst.outer + inst.inner
inst.hole_angles = self.hole_angles
inst.inner_hole_positions = [None] * self.n_holes
inst.hole_lengths = [None] * self.n_holes
for i in range(self.n_holes):
radians = inst.hole_angles[i] * math.pi / 180.0
thickness = (inst.outer(inst.hole_positions[i]) - inst.inner(inst.hole_positions[i])) * 0.5
shift = math.sin(radians) * thickness
inst.inner_hole_positions[i] = inst.hole_positions[i] + shift
inst.hole_lengths[i] = math.sqrt(thickness * thickness + shift * shift)
inst.inner_kinks = inner_kinks
inst.outer_kinks = outer_kinks
inst.cone_step = self.cone_step
inst.closed_top = self.closed_top
self.instrument = inst
</DeepExtract>
with open(os.path.join(self.output_dir, 'data.pickle'), 'wb') as f:
if sys.version_info.major == 3:
pickle.dump(self, f, fix_imports=True)
else:
pickle.dump(self, f)
<DeepExtract>
patched_instrument = self.instrument
</DeepExtract>
patched_instrument.prepare()
patched_instrument.prepare_phase()
diagram = svg.SVG()
for vec in random.sample(other_vecs, min(20, len(other_vecs))):
<DeepExtract>
instrument = self.unpack(vec)
instrument.prepare()
for i in range(self.n_holes):
diagram.circle(0.0, -instrument.inner_hole_positions[i], instrument.hole_diameters[i], '#ffaaaa')
diagram.circle(0.0, -instrument.hole_positions[i], instrument.hole_diameters[i], '#aaaaaa')
diagram.profile(instrument.outer, '#aaaaaa')
diagram.profile(instrument.stepped_inner, '#aaaaaa')
if self.closed_top:
d = instrument.stepped_inner(instrument.length)
diagram.line([(-0.5 * d, -instrument.length), (0.5 * d, -instrument.length)], '#aaaaaa')
tick_x = instrument.outer.maximum() * -0.625
for pos in instrument.inner_kinks:
diagram.line([(tick_x, -pos), (tick_x - 5, -pos)], '#aaaaaa')
for pos in instrument.inner.pos[1:-1]:
diagram.line([(tick_x - 2, -pos), (tick_x - 3, -pos)], '#aaaaaa')
for pos in instrument.outer_kinks:
diagram.line([(tick_x - 10, -pos), (tick_x - 15, -pos)], '#aaaaaa')
for pos in instrument.outer.pos[1:-1]:
diagram.line([(tick_x - 12, -pos), (tick_x - 13, -pos)], '#aaaaaa')
</DeepExtract>
<DeepExtract>
instrument = self.unpack(state_vec)
instrument.prepare()
for i in range(self.n_holes):
diagram.circle(0.0, -instrument.inner_hole_positions[i], instrument.hole_diameters[i], red_color)
diagram.circle(0.0, -instrument.hole_positions[i], instrument.hole_diameters[i], color)
diagram.profile(instrument.outer, color)
diagram.profile(instrument.stepped_inner, color)
if self.closed_top:
d = instrument.stepped_inner(instrument.length)
diagram.line([(-0.5 * d, -instrument.length), (0.5 * d, -instrument.length)], color)
tick_x = instrument.outer.maximum() * -0.625
for pos in instrument.inner_kinks:
diagram.line([(tick_x, -pos), (tick_x - 5, -pos)], color)
for pos in instrument.inner.pos[1:-1]:
diagram.line([(tick_x - 2, -pos), (tick_x - 3, -pos)], color)
for pos in instrument.outer_kinks:
diagram.line([(tick_x - 10, -pos), (tick_x - 15, -pos)], color)
for pos in instrument.outer.pos[1:-1]:
diagram.line([(tick_x - 12, -pos), (tick_x - 13, -pos)], color)
</DeepExtract>
text_x = diagram.max_x * 1.25
text_y = 0
for i in range(self.n_holes):
this_y = min(text_y, -self.instrument.hole_positions[i])
text_y = diagram.text(text_x, this_y, '%.1fmm' % self.instrument.hole_diameters[i])
diagram.text(text_x + 90.0, this_y, 'at %.1fmm' % self.instrument.hole_positions[i])
if i < self.n_holes - 1:
this_y = min(text_y, -0.5 * (self.instrument.hole_positions[i] + self.instrument.hole_positions[i + 1]))
diagram.text(text_x + 45.0, this_y, '%.1fmm' % (self.instrument.hole_positions[i + 1] - self.instrument.hole_positions[i]))
diagram.text(text_x + 90.0, min(text_y, -self.instrument.length), '%.1fmm' % self.instrument.length)
text_x = diagram.max_x
graph_x = text_x + 200
emit_x = graph_x + 220
text_y = 0
for item in self.fingerings:
note = item[0]
fingers = item[1]
<DeepExtract>
if isinstance(note, str):
note = SPEED_OF_SOUND / fqc(note)
w1 = note / 2 ** (self.transpose / 12.0)
</DeepExtract>
if len(item) < 3:
w2 = patched_instrument.true_wavelength_near(w1, fingers)
else:
w2 = patched_instrument.true_nth_wavelength_near(w1, fingers, item[2])
cents = int(round(log2(w2 / w1) * 1200.0))
n_probes = 301
max_cents = 2400.0
width = 200.0
step = pow(0.5, max_cents / ((n_probes - 1) * 0.5 * 1200.0))
low = w1 * pow(step, -(n_probes - 1) / 2.0)
probes = [low * pow(step, i) for i in range(n_probes)]
scores = [patched_instrument.resonance_phase(probe, fingers) for probe in probes]
points = [(graph_x + i * width / n_probes, text_y - ((score + 0.5) % 1.0 - 0.5) * 14.0) for (i, score) in enumerate(scores)]
for i in range(len(probes) - 1):
c = math.floor(scores[i] + 0.5)
if c != math.floor(scores[i + 1] + 0.5):
continue
(r, g, b) = [int((math.cos((c / 5.0 + offset) * math.pi * 2.0) * 0.5 + 0.5) * 200) for offset in [0.0, 1.0 / 3, 2.0 / 3]]
diagram.line(points[i:i + 2], '#%02x%02x%02x' % (r, g, b), 0.2)
diagram.line([(graph_x + width * 0.5, text_y + 7), (graph_x + width * 0.5, text_y - 7)], '#0000ff', 0.2)
diagram.line([(graph_x, text_y), (graph_x + width, text_y)], '#0000ff', 0.2)
diagram.text(text_x, text_y, '%5s %s %-4d cents' % (describe(w1), ' ' if cents == 0 else ' flat' if cents > 0 else 'sharp', abs(cents)))
phase = patched_instrument.resonance_phase(w2, fingers)
diagram.text(emit_x, text_y, '%f' % phase)
text_y -= 25
diagram.text(graph_x, text_y - 10, 'Nearby resonances:', color='#000000')
text_y -= 50.0 + 10.0 * max(len(self.inner_diameters), len(self.outer_diameters))
diagram.text(graph_x - 150.0, text_y, 'Outer diameters:', color='#000000')
kinks = [0.0] + self.instrument.outer_kinks + [self.instrument.length]
for (i, item) in enumerate(self.outer_diameters):
diagram.text(graph_x - 150.0, text_y + 10.0 + (len(self.outer_diameters) - i) * 10.0, describe_low_high(item) + 'mm at %.1fmm' % kinks[i])
diagram.text(graph_x, text_y, 'Inner diameters:', color='#000000')
kinks = [0.0] + self.instrument.inner_kinks + [self.instrument.length]
for (i, item) in enumerate(self.inner_diameters):
diagram.text(graph_x, text_y + 10.0 + (len(self.inner_diameters) - i) * 10.0, describe_low_high(item) + 'mm at %.1fmm' % kinks[i])
diagram.save(os.path.join(self.output_dir, 'diagram.svg'))
del self.instrument
|
def _save(self, state_vec, other_vecs=[]):
self.state_vec = state_vec
(inner_low, inner_high) = low_high(self.inner_diameters)
(outer_low, outer_high) = low_high(self.outer_diameters)
(inner_angle_low, inner_angle_high) = low_high(self.inner_angles)
(outer_angle_low, outer_angle_high) = low_high(self.outer_angles)
inst = self.instrument_class()
scale = self.scale
p = 0
inst.length = state_vec[0] * self.initial_length * scale
p += 1
inst.hole_positions = [item * inst.length for item in state_vec[p:p + self.n_holes]]
p += self.n_holes
signed_sqrt = lambda x: math.sqrt(abs(x)) * (1 if x >= 0 else -1)
inst.hole_diameters = [self.min_hole_diameters[i] + signed_sqrt(item) * (self.max_hole_diameters[i] - self.min_hole_diameters[i]) for (i, item) in enumerate(state_vec[p:p + self.n_holes])]
p += self.n_holes
inner_kinks = [item * inst.length for item in state_vec[p:p + len(self.inner_diameters) - 2]]
p += len(self.inner_diameters) - 2
outer_kinks = [item * inst.length for item in state_vec[p:p + len(self.outer_diameters) - 2]]
p += len(self.outer_diameters) - 2
assert p == len(state_vec)
inst.inner = profile.curved_profile([0.0] + inner_kinks + [inst.length], inner_low, inner_high, inner_angle_low, inner_angle_high)
inst.outer = profile.curved_profile([0.0] + outer_kinks + [inst.length], outer_low, outer_high, outer_angle_low, outer_angle_high)
if self.outer_add:
inst.outer = inst.outer + inst.inner
inst.hole_angles = self.hole_angles
inst.inner_hole_positions = [None] * self.n_holes
inst.hole_lengths = [None] * self.n_holes
for i in range(self.n_holes):
radians = inst.hole_angles[i] * math.pi / 180.0
thickness = (inst.outer(inst.hole_positions[i]) - inst.inner(inst.hole_positions[i])) * 0.5
shift = math.sin(radians) * thickness
inst.inner_hole_positions[i] = inst.hole_positions[i] + shift
inst.hole_lengths[i] = math.sqrt(thickness * thickness + shift * shift)
inst.inner_kinks = inner_kinks
inst.outer_kinks = outer_kinks
inst.cone_step = self.cone_step
inst.closed_top = self.closed_top
self.instrument = inst
with open(os.path.join(self.output_dir, 'data.pickle'), 'wb') as f:
if sys.version_info.major == 3:
pickle.dump(self, f, fix_imports=True)
else:
pickle.dump(self, f)
patched_instrument = self.instrument
patched_instrument.prepare()
patched_instrument.prepare_phase()
diagram = svg.SVG()
for vec in random.sample(other_vecs, min(20, len(other_vecs))):
instrument = self.unpack(vec)
instrument.prepare()
for i in range(self.n_holes):
diagram.circle(0.0, -instrument.inner_hole_positions[i], instrument.hole_diameters[i], '#ffaaaa')
diagram.circle(0.0, -instrument.hole_positions[i], instrument.hole_diameters[i], '#aaaaaa')
diagram.profile(instrument.outer, '#aaaaaa')
diagram.profile(instrument.stepped_inner, '#aaaaaa')
if self.closed_top:
d = instrument.stepped_inner(instrument.length)
diagram.line([(-0.5 * d, -instrument.length), (0.5 * d, -instrument.length)], '#aaaaaa')
tick_x = instrument.outer.maximum() * -0.625
for pos in instrument.inner_kinks:
diagram.line([(tick_x, -pos), (tick_x - 5, -pos)], '#aaaaaa')
for pos in instrument.inner.pos[1:-1]:
diagram.line([(tick_x - 2, -pos), (tick_x - 3, -pos)], '#aaaaaa')
for pos in instrument.outer_kinks:
diagram.line([(tick_x - 10, -pos), (tick_x - 15, -pos)], '#aaaaaa')
for pos in instrument.outer.pos[1:-1]:
diagram.line([(tick_x - 12, -pos), (tick_x - 13, -pos)], '#aaaaaa')
instrument = self.unpack(state_vec)
instrument.prepare()
for i in range(self.n_holes):
diagram.circle(0.0, -instrument.inner_hole_positions[i], instrument.hole_diameters[i], red_color)
diagram.circle(0.0, -instrument.hole_positions[i], instrument.hole_diameters[i], color)
diagram.profile(instrument.outer, color)
diagram.profile(instrument.stepped_inner, color)
if self.closed_top:
d = instrument.stepped_inner(instrument.length)
diagram.line([(-0.5 * d, -instrument.length), (0.5 * d, -instrument.length)], color)
tick_x = instrument.outer.maximum() * -0.625
for pos in instrument.inner_kinks:
diagram.line([(tick_x, -pos), (tick_x - 5, -pos)], color)
for pos in instrument.inner.pos[1:-1]:
diagram.line([(tick_x - 2, -pos), (tick_x - 3, -pos)], color)
for pos in instrument.outer_kinks:
diagram.line([(tick_x - 10, -pos), (tick_x - 15, -pos)], color)
for pos in instrument.outer.pos[1:-1]:
diagram.line([(tick_x - 12, -pos), (tick_x - 13, -pos)], color)
text_x = diagram.max_x * 1.25
text_y = 0
for i in range(self.n_holes):
this_y = min(text_y, -self.instrument.hole_positions[i])
text_y = diagram.text(text_x, this_y, '%.1fmm' % self.instrument.hole_diameters[i])
diagram.text(text_x + 90.0, this_y, 'at %.1fmm' % self.instrument.hole_positions[i])
if i < self.n_holes - 1:
this_y = min(text_y, -0.5 * (self.instrument.hole_positions[i] + self.instrument.hole_positions[i + 1]))
diagram.text(text_x + 45.0, this_y, '%.1fmm' % (self.instrument.hole_positions[i + 1] - self.instrument.hole_positions[i]))
diagram.text(text_x + 90.0, min(text_y, -self.instrument.length), '%.1fmm' % self.instrument.length)
text_x = diagram.max_x
graph_x = text_x + 200
emit_x = graph_x + 220
text_y = 0
for item in self.fingerings:
note = item[0]
fingers = item[1]
if isinstance(note, str):
note = SPEED_OF_SOUND / fqc(note)
w1 = note / 2 ** (self.transpose / 12.0)
if len(item) < 3:
w2 = patched_instrument.true_wavelength_near(w1, fingers)
else:
w2 = patched_instrument.true_nth_wavelength_near(w1, fingers, item[2])
cents = int(round(log2(w2 / w1) * 1200.0))
n_probes = 301
max_cents = 2400.0
width = 200.0
step = pow(0.5, max_cents / ((n_probes - 1) * 0.5 * 1200.0))
low = w1 * pow(step, -(n_probes - 1) / 2.0)
probes = [low * pow(step, i) for i in range(n_probes)]
scores = [patched_instrument.resonance_phase(probe, fingers) for probe in probes]
points = [(graph_x + i * width / n_probes, text_y - ((score + 0.5) % 1.0 - 0.5) * 14.0) for (i, score) in enumerate(scores)]
for i in range(len(probes) - 1):
c = math.floor(scores[i] + 0.5)
if c != math.floor(scores[i + 1] + 0.5):
continue
(r, g, b) = [int((math.cos((c / 5.0 + offset) * math.pi * 2.0) * 0.5 + 0.5) * 200) for offset in [0.0, 1.0 / 3, 2.0 / 3]]
diagram.line(points[i:i + 2], '#%02x%02x%02x' % (r, g, b), 0.2)
diagram.line([(graph_x + width * 0.5, text_y + 7), (graph_x + width * 0.5, text_y - 7)], '#0000ff', 0.2)
diagram.line([(graph_x, text_y), (graph_x + width, text_y)], '#0000ff', 0.2)
diagram.text(text_x, text_y, '%5s %s %-4d cents' % (describe(w1), ' ' if cents == 0 else ' flat' if cents > 0 else 'sharp', abs(cents)))
phase = patched_instrument.resonance_phase(w2, fingers)
diagram.text(emit_x, text_y, '%f' % phase)
text_y -= 25
diagram.text(graph_x, text_y - 10, 'Nearby resonances:', color='#000000')
text_y -= 50.0 + 10.0 * max(len(self.inner_diameters), len(self.outer_diameters))
diagram.text(graph_x - 150.0, text_y, 'Outer diameters:', color='#000000')
kinks = [0.0] + self.instrument.outer_kinks + [self.instrument.length]
for (i, item) in enumerate(self.outer_diameters):
diagram.text(graph_x - 150.0, text_y + 10.0 + (len(self.outer_diameters) - i) * 10.0, describe_low_high(item) + 'mm at %.1fmm' % kinks[i])
diagram.text(graph_x, text_y, 'Inner diameters:', color='#000000')
kinks = [0.0] + self.instrument.inner_kinks + [self.instrument.length]
for (i, item) in enumerate(self.inner_diameters):
diagram.text(graph_x, text_y + 10.0 + (len(self.inner_diameters) - i) * 10.0, describe_low_high(item) + 'mm at %.1fmm' % kinks[i])
diagram.save(os.path.join(self.output_dir, 'diagram.svg'))
del self.instrument
|
demakein
|
positive
|
def _load_data(self, normalise=True):
data = []
for (field, label) in self._fields:
if normalise:
<DeepExtract>
array = (field.to_numpy() - np.amin(field.to_numpy())) / (np.amax(field.to_numpy()) - np.amin(field.to_numpy()))
</DeepExtract>
else:
array = field.to_numpy()
data.append((array, label, field))
return data
|
def _load_data(self, normalise=True):
data = []
for (field, label) in self._fields:
if normalise:
array = (field.to_numpy() - np.amin(field.to_numpy())) / (np.amax(field.to_numpy()) - np.amin(field.to_numpy()))
else:
array = field.to_numpy()
data.append((array, label, field))
return data
|
climetlab
|
positive
|
def increment(self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None):
""" Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
_observed_errors = self._observed_errors
connect = self.connect
read = self.read
redirect = self.redirect
if error and self._is_connection_error(error):
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
_observed_errors += 1
elif error and self._is_read_error(error):
if read is False:
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
_observed_errors += 1
elif response and response.get_redirect_location():
if redirect is not None:
redirect -= 1
else:
_observed_errors += 1
<DeepExtract>
params = dict(total=self.total, connect=self.connect, read=self.read, redirect=self.redirect, method_whitelist=self.method_whitelist, status_forcelist=self.status_forcelist, backoff_factor=self.backoff_factor, raise_on_redirect=self.raise_on_redirect, _observed_errors=self._observed_errors)
params.update(kw)
new_retry = type(self)(**params)
</DeepExtract>
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error)
log.debug("Incremented Retry for (url='%s'): %r" % (url, new_retry))
return new_retry
|
def increment(self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None):
""" Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
_observed_errors = self._observed_errors
connect = self.connect
read = self.read
redirect = self.redirect
if error and self._is_connection_error(error):
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
_observed_errors += 1
elif error and self._is_read_error(error):
if read is False:
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
_observed_errors += 1
elif response and response.get_redirect_location():
if redirect is not None:
redirect -= 1
else:
_observed_errors += 1
params = dict(total=self.total, connect=self.connect, read=self.read, redirect=self.redirect, method_whitelist=self.method_whitelist, status_forcelist=self.status_forcelist, backoff_factor=self.backoff_factor, raise_on_redirect=self.raise_on_redirect, _observed_errors=self._observed_errors)
params.update(kw)
new_retry = type(self)(**params)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error)
log.debug("Incremented Retry for (url='%s'): %r" % (url, new_retry))
return new_retry
|
crunchy-xml-decoder
|
positive
|
def test_valid_data(self):
<DeepExtract>
form = ProfileForm(user=self.standard_user, data={'full_name': name, 'bio': 'My bio', 'roles': [self.mentor.id, self.mentee.id]})
</DeepExtract>
self.assertTrue(form.is_valid())
|
def test_valid_data(self):
form = ProfileForm(user=self.standard_user, data={'full_name': name, 'bio': 'My bio', 'roles': [self.mentor.id, self.mentee.id]})
self.assertTrue(form.is_valid())
|
connect
|
positive
|
def _Bitand(self, t):
""" Bit and operation.
"""
for (i, node) in enumerate(t.nodes):
<DeepExtract>
self.f.write('(')
</DeepExtract>
<DeepExtract>
if isinstance(node, list):
for t in node:
self._dispatch(t)
return
meth = getattr(self, '_' + node.__class__.__name__)
if node.__class__.__name__ == 'NoneType' and (not self._do_indent):
return
meth(node)
</DeepExtract>
<DeepExtract>
self.f.write(')')
</DeepExtract>
if i != len(t.nodes) - 1:
<DeepExtract>
self.f.write(' & ')
</DeepExtract>
|
def _Bitand(self, t):
""" Bit and operation.
"""
for (i, node) in enumerate(t.nodes):
self.f.write('(')
if isinstance(node, list):
for t in node:
self._dispatch(t)
return
meth = getattr(self, '_' + node.__class__.__name__)
if node.__class__.__name__ == 'NoneType' and (not self._do_indent):
return
meth(node)
self.f.write(')')
if i != len(t.nodes) - 1:
self.f.write(' & ')
</DeepExtract>
|
biom-format
|
positive
|
def startmonitoring(self, onoff):
if onoff:
if self.oktostart:
<DeepExtract>
self.framecombo.setEnabled(False)
self.stopthread()
self.snifferthread = snifferThread(self.ecu_filter, self.ecurequests.baudrate)
self.snifferthread.dataready.connect(self.callback)
self.snifferthread.start()
</DeepExtract>
else:
<DeepExtract>
if self.snifferthread:
self.snifferthread.stop()
self.snifferthread.dataready.disconnect()
self.snifferthread.quit()
self.snifferthread = None
self.framecombo.setEnabled(True)
</DeepExtract>
|
def startmonitoring(self, onoff):
if onoff:
if self.oktostart:
self.framecombo.setEnabled(False)
self.stopthread()
self.snifferthread = snifferThread(self.ecu_filter, self.ecurequests.baudrate)
self.snifferthread.dataready.connect(self.callback)
self.snifferthread.start()
else:
if self.snifferthread:
self.snifferthread.stop()
self.snifferthread.dataready.disconnect()
self.snifferthread.quit()
self.snifferthread = None
self.framecombo.setEnabled(True)
</DeepExtract>
|
ddt4all
|
positive
|
def __init__(self, parent):
super().__init__(parent)
self.mp3s = []
drop_target = DropTarget(self)
self.SetDropTarget(drop_target)
main_sizer = wx.BoxSizer(wx.VERTICAL)
self.mp3_olv = ObjectListView(self, style=wx.LC_REPORT | wx.SUNKEN_BORDER)
self.mp3_olv.SetEmptyListMsg('No Mp3s Found')
<DeepExtract>
self.mp3_olv.SetColumns([ColumnDefn('Artist', 'left', 100, 'artist'), ColumnDefn('Album', 'left', 100, 'album'), ColumnDefn('Title', 'left', 150, 'title'), ColumnDefn('Year', 'left', 100, 'year')])
self.mp3_olv.SetObjects(self.mp3s)
</DeepExtract>
main_sizer.Add(self.mp3_olv, 1, wx.ALL | wx.EXPAND, 5)
edit_btn = wx.Button(self, label='Edit Mp3')
edit_btn.Bind(wx.EVT_BUTTON, self.edit_mp3)
main_sizer.Add(edit_btn, 0, wx.ALL | wx.CENTER, 5)
self.SetSizer(main_sizer)
|
def __init__(self, parent):
super().__init__(parent)
self.mp3s = []
drop_target = DropTarget(self)
self.SetDropTarget(drop_target)
main_sizer = wx.BoxSizer(wx.VERTICAL)
self.mp3_olv = ObjectListView(self, style=wx.LC_REPORT | wx.SUNKEN_BORDER)
self.mp3_olv.SetEmptyListMsg('No Mp3s Found')
self.mp3_olv.SetColumns([ColumnDefn('Artist', 'left', 100, 'artist'), ColumnDefn('Album', 'left', 100, 'album'), ColumnDefn('Title', 'left', 150, 'title'), ColumnDefn('Year', 'left', 100, 'year')])
self.mp3_olv.SetObjects(self.mp3s)
main_sizer.Add(self.mp3_olv, 1, wx.ALL | wx.EXPAND, 5)
edit_btn = wx.Button(self, label='Edit Mp3')
edit_btn.Bind(wx.EVT_BUTTON, self.edit_mp3)
main_sizer.Add(edit_btn, 0, wx.ALL | wx.CENTER, 5)
self.SetSizer(main_sizer)
|
applications_with_wxpython
|
positive
|
def __init__(self, objectType, instance, objectName, properties=None, description='', presentValue=None, is_commandable=False, relinquish_default=None):
self._properties = ObjectFactory.default_properties(objectType, properties, is_commandable, relinquish_default)
pv_datatype = ObjectFactory.get_pv_datatype(objectType)
if not isinstance(presentValue, pv_datatype):
try:
presentValue = pv_datatype(presentValue)
except:
raise ValueError('Wrong datatype provided for presentValue')
@bacnet_properties(self._properties)
@make_commandable()
def _create_commandable(objectType, instance, objectName, presentValue, description):
return create(objectType, instance, objectName, presentValue, description)
@bacnet_properties(self._properties)
def _create(objectType, instance, objectName, presentValue, description):
return create(objectType, instance, objectName, presentValue, description)
<DeepExtract>
name_must_be_changed = False
if objectName in self.objects.keys():
name_must_be_changed = True
instance = self.validate_instance(objectType, instance)
if name_must_be_changed:
objectName = '{}-{}'.format(objectName, instance)
self._log.warning('Name already taken, using {} instead'.format(objectName))
(objectName, instance) = (objectName, instance)
</DeepExtract>
if is_commandable:
<DeepExtract>
self.objects[objectName] = create(objectType, instance, objectName, presentValue, description)
</DeepExtract>
else:
<DeepExtract>
self.objects[objectName] = create(objectType, instance, objectName, presentValue, description)
</DeepExtract>
|
def __init__(self, objectType, instance, objectName, properties=None, description='', presentValue=None, is_commandable=False, relinquish_default=None):
self._properties = ObjectFactory.default_properties(objectType, properties, is_commandable, relinquish_default)
pv_datatype = ObjectFactory.get_pv_datatype(objectType)
if not isinstance(presentValue, pv_datatype):
try:
presentValue = pv_datatype(presentValue)
except:
raise ValueError('Wrong datatype provided for presentValue')
@bacnet_properties(self._properties)
@make_commandable()
def _create_commandable(objectType, instance, objectName, presentValue, description):
return create(objectType, instance, objectName, presentValue, description)
@bacnet_properties(self._properties)
def _create(objectType, instance, objectName, presentValue, description):
return create(objectType, instance, objectName, presentValue, description)
name_must_be_changed = False
if objectName in self.objects.keys():
name_must_be_changed = True
instance = self.validate_instance(objectType, instance)
if name_must_be_changed:
objectName = '{}-{}'.format(objectName, instance)
self._log.warning('Name already taken, using {} instead'.format(objectName))
(objectName, instance) = (objectName, instance)
if is_commandable:
self.objects[objectName] = create(objectType, instance, objectName, presentValue, description)
else:
self.objects[objectName] = create(objectType, instance, objectName, presentValue, description)
</DeepExtract>
|
BAC0
|
positive
|
@bp_core.route('/<uuid(strict=False):mbid>/high-level', methods=['GET'])
@crossdomain()
@ratelimit()
def get_high_level(mbid):
"""Get high-level data for recording with a given MBID.
This endpoint returns one document at a time. If there are many submissions
for an MBID, you can browse through them by specifying an offset parameter
``n``. Documents are sorted by the submission time of their associated
low-level documents.
You can get the total number of low-level submissions using ``/<mbid>/count``
endpoint.
:query n: *Optional.* Integer specifying an offset for a document.
:query map_classes: *Optional.* If set to 'true', map class names to human-readable values
TODO: provide a link to what these mappings are
:resheader Content-Type: *application/json*
"""
offset = request.args.get('n')
<DeepExtract>
try:
normalised_mbid = str(uuid.UUID(str(mbid)))
except ValueError:
raise webserver.views.api.exceptions.APIBadRequest("'%s' is not a valid UUID" % str(mbid))
if offset:
try:
offset = int(offset)
offset = max(offset, 0)
except ValueError:
offset = 0
else:
offset = 0
(_, str(mbid), offset) = (str(mbid), normalised_mbid, offset)
</DeepExtract>
<DeepExtract>
request.args.get('map_classes') = request.args.get('map_classes') is not None and request.args.get('map_classes').lower() == 'true'
</DeepExtract>
try:
return jsonify(db.data.load_high_level(mbid, offset, map_classes))
except NoDataFoundException:
raise webserver.views.api.exceptions.APINotFound('Not found')
|
@bp_core.route('/<uuid(strict=False):mbid>/high-level', methods=['GET'])
@crossdomain()
@ratelimit()
def get_high_level(mbid):
"""Get high-level data for recording with a given MBID.
This endpoint returns one document at a time. If there are many submissions
for an MBID, you can browse through them by specifying an offset parameter
``n``. Documents are sorted by the submission time of their associated
low-level documents.
You can get the total number of low-level submissions using ``/<mbid>/count``
endpoint.
:query n: *Optional.* Integer specifying an offset for a document.
:query map_classes: *Optional.* If set to 'true', map class names to human-readable values
TODO: provide a link to what these mappings are
:resheader Content-Type: *application/json*
"""
offset = request.args.get('n')
try:
normalised_mbid = str(uuid.UUID(str(mbid)))
except ValueError:
raise webserver.views.api.exceptions.APIBadRequest("'%s' is not a valid UUID" % str(mbid))
if offset:
try:
offset = int(offset)
offset = max(offset, 0)
except ValueError:
offset = 0
else:
offset = 0
(_, str(mbid), offset) = (str(mbid), normalised_mbid, offset)
request.args.get('map_classes') = request.args.get('map_classes') is not None and request.args.get('map_classes').lower() == 'true'
try:
return jsonify(db.data.load_high_level(mbid, offset, map_classes))
except NoDataFoundException:
raise webserver.views.api.exceptions.APINotFound('Not found')
|
acousticbrainz-server
|
positive
|
def render(self, defaults, errors, error_wrapper, stag_end=' />'):
lst = []
attrs = dict(self.attrs)
error = errors and self.name in errors
if error:
<DeepExtract>
classes = attrs.get('class', '').split()
if 'error' not in classes:
attrs['class'] = ' '.join(classes + ['error'])
</DeepExtract>
lst.append(error_wrapper[0])
lst.append(html_escape(errors[self.name]))
lst.append(error_wrapper[1])
if self.name in defaults:
attrs['value'] = str(defaults[self.name])
attrs['name'] = self.name
lst.append('<input type="%s" %s%s' % (self.tpe, dict_to_attrs(attrs), stag_end))
return ''.join(lst)
|
def render(self, defaults, errors, error_wrapper, stag_end=' />'):
lst = []
attrs = dict(self.attrs)
error = errors and self.name in errors
if error:
classes = attrs.get('class', '').split()
if 'error' not in classes:
attrs['class'] = ' '.join(classes + ['error'])
lst.append(error_wrapper[0])
lst.append(html_escape(errors[self.name]))
lst.append(error_wrapper[1])
if self.name in defaults:
attrs['value'] = str(defaults[self.name])
attrs['name'] = self.name
lst.append('<input type="%s" %s%s' % (self.tpe, dict_to_attrs(attrs), stag_end))
return ''.join(lst)
|
aws-serverless-workshop-greater-china-region
|
positive
|
@retry(**RETRY_POLICY)
def delete_version(self, project_id, service_id, version_id):
""" Deletes a version.
Args:
project_id: A string specifying the project ID.
service_id: A string specifying the service ID.
version_id: A string specifying the version ID.
Returns:
A dictionary containing the delete version operation details.
Raises:
AdminError if the response is formatted incorrectly.
"""
version_url = '{prefix}/{project}/services/{service}/versions/{version}'.format(prefix=self.prefix, project=project_id, service=service_id, version=version_id)
headers = {'AppScale-Secret': self.secret}
response = requests.delete(version_url, headers=headers, verify=False)
<DeepExtract>
try:
content = response.json()
except ValueError:
raise AdminError('Invalid response: {}'.format(response.content))
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
try:
message = content['error']['message']
except KeyError:
message = 'AdminServer returned: {}'.format(response.status_code)
raise AdminError(message)
operation = content
</DeepExtract>
try:
operation_id = operation['name'].split('/')[-1]
except (KeyError, IndexError):
raise AdminError('Invalid operation: {}'.format(operation))
return operation_id
|
@retry(**RETRY_POLICY)
def delete_version(self, project_id, service_id, version_id):
""" Deletes a version.
Args:
project_id: A string specifying the project ID.
service_id: A string specifying the service ID.
version_id: A string specifying the version ID.
Returns:
A dictionary containing the delete version operation details.
Raises:
AdminError if the response is formatted incorrectly.
"""
version_url = '{prefix}/{project}/services/{service}/versions/{version}'.format(prefix=self.prefix, project=project_id, service=service_id, version=version_id)
headers = {'AppScale-Secret': self.secret}
response = requests.delete(version_url, headers=headers, verify=False)
try:
content = response.json()
except ValueError:
raise AdminError('Invalid response: {}'.format(response.content))
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
try:
message = content['error']['message']
except KeyError:
message = 'AdminServer returned: {}'.format(response.status_code)
raise AdminError(message)
operation = content
try:
operation_id = operation['name'].split('/')[-1]
except (KeyError, IndexError):
raise AdminError('Invalid operation: {}'.format(operation))
return operation_id
|
appscale-tools
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.