before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def language_eval(dataset, preds, preds_n, eval_kwargs, split):
model_id = eval_kwargs['id']
eval_oracle = eval_kwargs.get('eval_oracle', 0)
out = {}
if len(preds_n) > 0:
if 'coco' in dataset:
dataset_file = 'data/dataset_coco.json'
elif 'flickr30k' in dataset or 'f30k' in dataset:
dataset_file = 'data/dataset_flickr30k.json'
training_sentences = set([' '.join(__['tokens']) for _ in json.load(open(dataset_file))['images'] if not _['split'] in ['val', 'test'] for __ in _['sentences']])
generated_sentences = set([_['caption'] for _ in preds_n])
novels = generated_sentences - training_sentences
out['novel_sentences'] = float(len(novels)) / len(preds_n)
tmp = [_.split() for _ in generated_sentences]
words = []
for _ in tmp:
words += _
out['vocab_size'] = len(set(words))
cache_path = os.path.join('eval_results/', '.cache_' + model_id + '_' + split + '.json')
<DeepExtract>
if 'coco' in dataset:
annFile = 'coco-caption/annotations/captions_coco_LN_test.json'
elif 'flickr30k' in dataset or 'f30k' in dataset or 'flk30k' in dataset:
annFile = 'coco-caption/annotations/captions_flk30k_LN_test.json'
elif 'ade20k' in dataset:
annFile = 'coco-caption/annotations/captions_ade20k_LN_test.json'
elif 'openimg' in dataset:
annFile = 'coco-caption/annotations/captions_openimg_LN_test.json'
coco = COCO(annFile)
</DeepExtract>
valids = coco.getImgIds()
preds_filt = [p for p in preds if p['image_id'] in valids]
mean_perplexity = sum([_['perplexity'] for _ in preds_filt]) / len(preds_filt)
mean_entropy = sum([_['entropy'] for _ in preds_filt]) / len(preds_filt)
print('using %d/%d predictions' % (len(preds_filt), len(preds)))
json.dump(preds_filt, open(cache_path, 'w'))
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
for (metric, score) in cocoEval.eval.items():
out[metric] = score
out['perplexity'] = mean_perplexity
out['entropy'] = mean_entropy
imgToEval = cocoEval.imgToEval
for k in list(imgToEval.values())[0]['SPICE'].keys():
if k != 'All':
out['SPICE_' + k] = np.array([v['SPICE'][k]['f'] for v in imgToEval.values()])
out['SPICE_' + k] = out['SPICE_' + k][out['SPICE_' + k] == out['SPICE_' + k]].mean()
for p in preds_filt:
(image_id, caption) = (p['image_id'], p['caption'])
imgToEval[image_id]['caption'] = caption
if len(preds_n) > 0:
from . import eval_multi
cache_path_n = os.path.join('eval_results/', '.cache_' + model_id + '_' + split + '_n.json')
allspice = eval_multi.eval_allspice(dataset, preds_n, model_id, split)
out.update(allspice['overall'])
div_stats = eval_multi.eval_div_stats(dataset, preds_n, model_id, split)
out.update(div_stats['overall'])
if eval_oracle:
oracle = eval_multi.eval_oracle(dataset, preds_n, model_id, split)
out.update(oracle['overall'])
else:
oracle = None
self_cider = eval_multi.eval_self_cider(dataset, preds_n, model_id, split)
out.update(self_cider['overall'])
with open(cache_path_n, 'w') as outfile:
json.dump({'allspice': allspice, 'div_stats': div_stats, 'oracle': oracle, 'self_cider': self_cider}, outfile)
out['bad_count_rate'] = sum([count_bad(_['caption']) for _ in preds_filt]) / float(len(preds_filt))
outfile_path = os.path.join('eval_results/', model_id + '_' + split + '.json')
with open(outfile_path, 'w') as outfile:
json.dump({'overall': out, 'imgToEval': imgToEval}, outfile)
return out
|
def language_eval(dataset, preds, preds_n, eval_kwargs, split):
model_id = eval_kwargs['id']
eval_oracle = eval_kwargs.get('eval_oracle', 0)
out = {}
if len(preds_n) > 0:
if 'coco' in dataset:
dataset_file = 'data/dataset_coco.json'
elif 'flickr30k' in dataset or 'f30k' in dataset:
dataset_file = 'data/dataset_flickr30k.json'
training_sentences = set([' '.join(__['tokens']) for _ in json.load(open(dataset_file))['images'] if not _['split'] in ['val', 'test'] for __ in _['sentences']])
generated_sentences = set([_['caption'] for _ in preds_n])
novels = generated_sentences - training_sentences
out['novel_sentences'] = float(len(novels)) / len(preds_n)
tmp = [_.split() for _ in generated_sentences]
words = []
for _ in tmp:
words += _
out['vocab_size'] = len(set(words))
cache_path = os.path.join('eval_results/', '.cache_' + model_id + '_' + split + '.json')
if 'coco' in dataset:
annFile = 'coco-caption/annotations/captions_coco_LN_test.json'
elif 'flickr30k' in dataset or 'f30k' in dataset or 'flk30k' in dataset:
annFile = 'coco-caption/annotations/captions_flk30k_LN_test.json'
elif 'ade20k' in dataset:
annFile = 'coco-caption/annotations/captions_ade20k_LN_test.json'
elif 'openimg' in dataset:
annFile = 'coco-caption/annotations/captions_openimg_LN_test.json'
coco = COCO(annFile)
valids = coco.getImgIds()
preds_filt = [p for p in preds if p['image_id'] in valids]
mean_perplexity = sum([_['perplexity'] for _ in preds_filt]) / len(preds_filt)
mean_entropy = sum([_['entropy'] for _ in preds_filt]) / len(preds_filt)
print('using %d/%d predictions' % (len(preds_filt), len(preds)))
json.dump(preds_filt, open(cache_path, 'w'))
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
for (metric, score) in cocoEval.eval.items():
out[metric] = score
out['perplexity'] = mean_perplexity
out['entropy'] = mean_entropy
imgToEval = cocoEval.imgToEval
for k in list(imgToEval.values())[0]['SPICE'].keys():
if k != 'All':
out['SPICE_' + k] = np.array([v['SPICE'][k]['f'] for v in imgToEval.values()])
out['SPICE_' + k] = out['SPICE_' + k][out['SPICE_' + k] == out['SPICE_' + k]].mean()
for p in preds_filt:
(image_id, caption) = (p['image_id'], p['caption'])
imgToEval[image_id]['caption'] = caption
if len(preds_n) > 0:
from . import eval_multi
cache_path_n = os.path.join('eval_results/', '.cache_' + model_id + '_' + split + '_n.json')
allspice = eval_multi.eval_allspice(dataset, preds_n, model_id, split)
out.update(allspice['overall'])
div_stats = eval_multi.eval_div_stats(dataset, preds_n, model_id, split)
out.update(div_stats['overall'])
if eval_oracle:
oracle = eval_multi.eval_oracle(dataset, preds_n, model_id, split)
out.update(oracle['overall'])
else:
oracle = None
self_cider = eval_multi.eval_self_cider(dataset, preds_n, model_id, split)
out.update(self_cider['overall'])
with open(cache_path_n, 'w') as outfile:
json.dump({'allspice': allspice, 'div_stats': div_stats, 'oracle': oracle, 'self_cider': self_cider}, outfile)
out['bad_count_rate'] = sum([count_bad(_['caption']) for _ in preds_filt]) / float(len(preds_filt))
outfile_path = os.path.join('eval_results/', model_id + '_' + split + '.json')
with open(outfile_path, 'w') as outfile:
json.dump({'overall': out, 'imgToEval': imgToEval}, outfile)
return out
|
connect-caption-and-trace
|
positive
|
def parse_dataframes(self, dataframes: Dict[str, DataFrame], aux: Dict[str, DataFrame], **parse_opts) -> DataFrame:
data = dataframes[0].rename(columns={'date': 'date', 'state': 'subregion1_name', 'fips': 'subregion2_code', 'cases': 'total_confirmed', 'deaths': 'total_deceased'}).dropna(subset=['subregion2_code'])
<DeepExtract>
us_meta = aux['metadata']
us_meta = us_meta[us_meta['country_code'] == 'US']
us_meta = us_meta.set_index('subregion1_name')['subregion1_code'].drop_duplicates()
country_map = {idx: code for (idx, code) in us_meta.iteritems()}
data['subregion1_code'] = data['subregion1_name'].apply(country_map.get)
data = data.dropna(subset=['subregion1_code'])
data = data
</DeepExtract>
data['subregion2_code'] = data['subregion2_code'].apply(lambda x: '{0:05d}'.format(int(x)))
data['key'] = 'US_' + data['subregion1_code'] + '_' + data['subregion2_code']
return data
|
def parse_dataframes(self, dataframes: Dict[str, DataFrame], aux: Dict[str, DataFrame], **parse_opts) -> DataFrame:
data = dataframes[0].rename(columns={'date': 'date', 'state': 'subregion1_name', 'fips': 'subregion2_code', 'cases': 'total_confirmed', 'deaths': 'total_deceased'}).dropna(subset=['subregion2_code'])
us_meta = aux['metadata']
us_meta = us_meta[us_meta['country_code'] == 'US']
us_meta = us_meta.set_index('subregion1_name')['subregion1_code'].drop_duplicates()
country_map = {idx: code for (idx, code) in us_meta.iteritems()}
data['subregion1_code'] = data['subregion1_name'].apply(country_map.get)
data = data.dropna(subset=['subregion1_code'])
data = data
data['subregion2_code'] = data['subregion2_code'].apply(lambda x: '{0:05d}'.format(int(x)))
data['key'] = 'US_' + data['subregion1_code'] + '_' + data['subregion2_code']
return data
|
covid-19-open-data
|
positive
|
def send_tms0110():
<DeepExtract>
if 0:
tms.on()
else:
tms.off()
for i in range(1):
tck.off()
tck.on()
</DeepExtract>
<DeepExtract>
if 1:
tms.on()
else:
tms.off()
for i in range(1):
tck.off()
tck.on()
</DeepExtract>
<DeepExtract>
if 1:
tms.on()
else:
tms.off()
for i in range(1):
tck.off()
tck.on()
</DeepExtract>
<DeepExtract>
if 0:
tms.on()
else:
tms.off()
for i in range(1):
tck.off()
tck.on()
</DeepExtract>
|
def send_tms0110():
if 0:
tms.on()
else:
tms.off()
for i in range(1):
tck.off()
tck.on()
if 1:
tms.on()
else:
tms.off()
for i in range(1):
tck.off()
tck.on()
if 1:
tms.on()
else:
tms.off()
for i in range(1):
tck.off()
tck.on()
if 0:
tms.on()
else:
tms.off()
for i in range(1):
tck.off()
tck.on()
</DeepExtract>
|
esp32ecp5
|
positive
|
def is_empty(self, stack_num):
<DeepExtract>
if stack_num >= self.number_of_stacks:
raise StackDoesNotExistError(f'Stack #{stack_num} does not exist')
</DeepExtract>
return self.sizes[stack_num] == 0
|
def is_empty(self, stack_num):
if stack_num >= self.number_of_stacks:
raise StackDoesNotExistError(f'Stack #{stack_num} does not exist')
return self.sizes[stack_num] == 0
|
CtCI-6th-Edition-Python
|
positive
|
def close_tab(self, tab):
"""Close a tab (after asking to save if needed)."""
index = self.notebook.page_num(tab)
if not self.notebook.get_nth_page(index).is_saved():
self.notebook.set_current_page(index)
is_saved = self.saving_manager.confirm_save_modifs()
if not is_saved:
return False
self.notebook.remove_page(index)
<DeepExtract>
controls_hidden = self.lookup_action('hide_controls').get_state()
should_show = self.notebook.get_n_pages() > 1 and (not controls_hidden)
self.notebook.set_show_tabs(should_show)
</DeepExtract>
return True
|
def close_tab(self, tab):
"""Close a tab (after asking to save if needed)."""
index = self.notebook.page_num(tab)
if not self.notebook.get_nth_page(index).is_saved():
self.notebook.set_current_page(index)
is_saved = self.saving_manager.confirm_save_modifs()
if not is_saved:
return False
self.notebook.remove_page(index)
controls_hidden = self.lookup_action('hide_controls').get_state()
should_show = self.notebook.get_n_pages() > 1 and (not controls_hidden)
self.notebook.set_show_tabs(should_show)
return True
|
drawing
|
positive
|
def write_instance_to_example_files(instances, tokenizer, max_seq_length, max_predictions_per_seq, output_files):
"""Create TF example files from `TrainingInstance`s."""
writers = []
for output_file in output_files:
writers.append(tf.python_io.TFRecordWriter(output_file))
writer_index = 0
total_written = 0
for (inst_index, instance) in enumerate(instances):
input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)
input_mask = [1] * len(input_ids)
segment_ids = list(instance.segment_ids)
assert len(input_ids) <= max_seq_length
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
masked_lm_positions = list(instance.masked_lm_positions)
masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)
masked_lm_weights = [1.0] * len(masked_lm_ids)
while len(masked_lm_positions) < max_predictions_per_seq:
masked_lm_positions.append(0)
masked_lm_ids.append(0)
masked_lm_weights.append(0.0)
next_sentence_label = 1 if instance.is_random_next else 0
features = collections.OrderedDict()
<DeepExtract>
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(input_ids)))
features['input_ids'] = feature
</DeepExtract>
<DeepExtract>
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(input_mask)))
features['input_mask'] = feature
</DeepExtract>
<DeepExtract>
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(segment_ids)))
features['segment_ids'] = feature
</DeepExtract>
<DeepExtract>
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(masked_lm_positions)))
features['masked_lm_positions'] = feature
</DeepExtract>
<DeepExtract>
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(masked_lm_ids)))
features['masked_lm_ids'] = feature
</DeepExtract>
<DeepExtract>
feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(masked_lm_weights)))
features['masked_lm_weights'] = feature
</DeepExtract>
<DeepExtract>
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list([next_sentence_label])))
features['next_sentence_labels'] = feature
</DeepExtract>
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writers[writer_index].write(tf_example.SerializeToString())
writer_index = (writer_index + 1) % len(writers)
total_written += 1
if inst_index < 20:
tf.logging.info('*** Example ***')
tf.logging.info('tokens: %s' % ' '.join([tokenization.printable_text(x) for x in instance.tokens]))
for feature_name in features.keys():
feature = features[feature_name]
values = []
if feature.int64_list.value:
values = feature.int64_list.value
elif feature.float_list.value:
values = feature.float_list.value
tf.logging.info('%s: %s' % (feature_name, ' '.join([str(x) for x in values])))
for writer in writers:
writer.close()
tf.logging.info('Wrote %d total instances', total_written)
|
def write_instance_to_example_files(instances, tokenizer, max_seq_length, max_predictions_per_seq, output_files):
"""Create TF example files from `TrainingInstance`s."""
writers = []
for output_file in output_files:
writers.append(tf.python_io.TFRecordWriter(output_file))
writer_index = 0
total_written = 0
for (inst_index, instance) in enumerate(instances):
input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)
input_mask = [1] * len(input_ids)
segment_ids = list(instance.segment_ids)
assert len(input_ids) <= max_seq_length
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
masked_lm_positions = list(instance.masked_lm_positions)
masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)
masked_lm_weights = [1.0] * len(masked_lm_ids)
while len(masked_lm_positions) < max_predictions_per_seq:
masked_lm_positions.append(0)
masked_lm_ids.append(0)
masked_lm_weights.append(0.0)
next_sentence_label = 1 if instance.is_random_next else 0
features = collections.OrderedDict()
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(input_ids)))
features['input_ids'] = feature
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(input_mask)))
features['input_mask'] = feature
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(segment_ids)))
features['segment_ids'] = feature
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(masked_lm_positions)))
features['masked_lm_positions'] = feature
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(masked_lm_ids)))
features['masked_lm_ids'] = feature
feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(masked_lm_weights)))
features['masked_lm_weights'] = feature
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list([next_sentence_label])))
features['next_sentence_labels'] = feature
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writers[writer_index].write(tf_example.SerializeToString())
writer_index = (writer_index + 1) % len(writers)
total_written += 1
if inst_index < 20:
tf.logging.info('*** Example ***')
tf.logging.info('tokens: %s' % ' '.join([tokenization.printable_text(x) for x in instance.tokens]))
for feature_name in features.keys():
feature = features[feature_name]
values = []
if feature.int64_list.value:
values = feature.int64_list.value
elif feature.float_list.value:
values = feature.float_list.value
tf.logging.info('%s: %s' % (feature_name, ' '.join([str(x) for x in values])))
for writer in writers:
writer.close()
tf.logging.info('Wrote %d total instances', total_written)
|
BERT4doc-Classification
|
positive
|
def vat_number_validation(vat_number):
"""
Validate Italian VAT number. Used also for entities SSN validation.
``ValueError`` is raised if validation fails.
"""
vat_number = str(int(vat_number)).zfill(11)
<DeepExtract>
normalized_vat_number = force_str(vat_number[0:10]).zfill(10)
total = 0
for i in range(0, 10, 2):
total += int(normalized_vat_number[i])
for i in range(1, 11, 2):
(quotient, remainder) = divmod(int(normalized_vat_number[i]) * 2, 10)
total += quotient + remainder
check_digit = force_str((10 - total % 10) % 10)
</DeepExtract>
if vat_number[10] != check_digit:
raise ValueError(_('Check digit does not match.'))
return force_str(vat_number)
|
def vat_number_validation(vat_number):
"""
Validate Italian VAT number. Used also for entities SSN validation.
``ValueError`` is raised if validation fails.
"""
vat_number = str(int(vat_number)).zfill(11)
normalized_vat_number = force_str(vat_number[0:10]).zfill(10)
total = 0
for i in range(0, 10, 2):
total += int(normalized_vat_number[i])
for i in range(1, 11, 2):
(quotient, remainder) = divmod(int(normalized_vat_number[i]) * 2, 10)
total += quotient + remainder
check_digit = force_str((10 - total % 10) % 10)
if vat_number[10] != check_digit:
raise ValueError(_('Check digit does not match.'))
return force_str(vat_number)
|
django-localflavor
|
positive
|
def map_func(idx):
image_info = {'image_idx': idx, 'pointcloud_num_features': 4}
annotations = None
if velodyne:
<DeepExtract>
image_info['velodyne_path'] = get_kitti_info_path(idx, path, 'velodyne', '.bin', training, relative_path, exist_check)
</DeepExtract>
<DeepExtract>
image_info['img_path'] = get_kitti_info_path(idx, path, 'image_2', '.png', training, relative_path, exist_check)
</DeepExtract>
if with_imageshape:
img_path = image_info['img_path']
if relative_path:
img_path = str(root_path / img_path)
image_info['img_shape'] = np.array(io.imread(img_path).shape[:2], dtype=np.int32)
if label_info:
<DeepExtract>
label_path = get_kitti_info_path(idx, path, 'label_2', '.txt', training, relative_path, exist_check)
</DeepExtract>
if relative_path:
label_path = str(root_path / label_path)
<DeepExtract>
annotations = {}
annotations.update({'name': [], 'truncated': [], 'occluded': [], 'alpha': [], 'bbox': [], 'dimensions': [], 'location': [], 'rotation_y': []})
with open(label_path, 'r') as f:
lines = f.readlines()
content = [line.strip().split(' ') for line in lines]
num_objects = len([x[0] for x in content if x[0] != 'DontCare'])
annotations['name'] = np.array([x[0] for x in content])
num_gt = len(annotations['name'])
annotations['truncated'] = np.array([float(x[1]) for x in content])
annotations['occluded'] = np.array([int(x[2]) for x in content])
annotations['alpha'] = np.array([float(x[3]) for x in content])
annotations['bbox'] = np.array([[float(info) for info in x[4:8]] for x in content]).reshape(-1, 4)
annotations['dimensions'] = np.array([[float(info) for info in x[8:11]] for x in content]).reshape(-1, 3)[:, [2, 0, 1]]
annotations['location'] = np.array([[float(info) for info in x[11:14]] for x in content]).reshape(-1, 3)
annotations['rotation_y'] = np.array([float(x[14]) for x in content]).reshape(-1)
if len(content) != 0 and len(content[0]) == 16:
annotations['score'] = np.array([float(x[15]) for x in content])
else:
annotations['score'] = np.zeros((annotations['bbox'].shape[0],))
index = list(range(num_objects)) + [-1] * (num_gt - num_objects)
annotations['index'] = np.array(index, dtype=np.int32)
annotations['group_ids'] = np.arange(num_gt, dtype=np.int32)
annotations = annotations
</DeepExtract>
if calib:
<DeepExtract>
calib_path = get_kitti_info_path(idx, path, 'calib', '.txt', training, False, exist_check)
</DeepExtract>
with open(calib_path, 'r') as f:
lines = f.readlines()
P0 = np.array([float(info) for info in lines[0].split(' ')[1:13]]).reshape([3, 4])
P1 = np.array([float(info) for info in lines[1].split(' ')[1:13]]).reshape([3, 4])
P2 = np.array([float(info) for info in lines[2].split(' ')[1:13]]).reshape([3, 4])
P3 = np.array([float(info) for info in lines[3].split(' ')[1:13]]).reshape([3, 4])
if extend_matrix:
<DeepExtract>
P0 = np.concatenate([P0, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0)
P0 = P0
</DeepExtract>
<DeepExtract>
P1 = np.concatenate([P1, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0)
P1 = P1
</DeepExtract>
<DeepExtract>
P2 = np.concatenate([P2, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0)
P2 = P2
</DeepExtract>
<DeepExtract>
P3 = np.concatenate([P3, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0)
P3 = P3
</DeepExtract>
image_info['calib/P0'] = P0
image_info['calib/P1'] = P1
image_info['calib/P2'] = P2
image_info['calib/P3'] = P3
R0_rect = np.array([float(info) for info in lines[4].split(' ')[1:10]]).reshape([3, 3])
if extend_matrix:
rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype)
rect_4x4[3, 3] = 1.0
rect_4x4[:3, :3] = R0_rect
else:
rect_4x4 = R0_rect
image_info['calib/R0_rect'] = rect_4x4
Tr_velo_to_cam = np.array([float(info) for info in lines[5].split(' ')[1:13]]).reshape([3, 4])
Tr_imu_to_velo = np.array([float(info) for info in lines[6].split(' ')[1:13]]).reshape([3, 4])
if extend_matrix:
<DeepExtract>
Tr_velo_to_cam = np.concatenate([Tr_velo_to_cam, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0)
Tr_velo_to_cam = Tr_velo_to_cam
</DeepExtract>
<DeepExtract>
Tr_imu_to_velo = np.concatenate([Tr_imu_to_velo, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0)
Tr_imu_to_velo = Tr_imu_to_velo
</DeepExtract>
image_info['calib/Tr_velo_to_cam'] = Tr_velo_to_cam
image_info['calib/Tr_imu_to_velo'] = Tr_imu_to_velo
if annotations is not None:
image_info['annos'] = annotations
<DeepExtract>
min_height = [40, 25, 25]
max_occlusion = [0, 1, 2]
max_trunc = [0.15, 0.3, 0.5]
annos = image_info['annos']
dims = annos['dimensions']
bbox = annos['bbox']
height = bbox[:, 3] - bbox[:, 1]
occlusion = annos['occluded']
truncation = annos['truncated']
diff = []
easy_mask = np.ones((len(dims),), dtype=np.bool)
moderate_mask = np.ones((len(dims),), dtype=np.bool)
hard_mask = np.ones((len(dims),), dtype=np.bool)
i = 0
for (h, o, t) in zip(height, occlusion, truncation):
if o > max_occlusion[0] or h <= min_height[0] or t > max_trunc[0]:
easy_mask[i] = False
if o > max_occlusion[1] or h <= min_height[1] or t > max_trunc[1]:
moderate_mask[i] = False
if o > max_occlusion[2] or h <= min_height[2] or t > max_trunc[2]:
hard_mask[i] = False
i += 1
is_easy = easy_mask
is_moderate = np.logical_xor(easy_mask, moderate_mask)
is_hard = np.logical_xor(hard_mask, moderate_mask)
for i in range(len(dims)):
if is_easy[i]:
diff.append(0)
elif is_moderate[i]:
diff.append(1)
elif is_hard[i]:
diff.append(2)
else:
diff.append(-1)
annos['difficulty'] = np.array(diff, np.int32)
return diff
</DeepExtract>
return image_info
|
def map_func(idx):
image_info = {'image_idx': idx, 'pointcloud_num_features': 4}
annotations = None
if velodyne:
image_info['velodyne_path'] = get_kitti_info_path(idx, path, 'velodyne', '.bin', training, relative_path, exist_check)
image_info['img_path'] = get_kitti_info_path(idx, path, 'image_2', '.png', training, relative_path, exist_check)
if with_imageshape:
img_path = image_info['img_path']
if relative_path:
img_path = str(root_path / img_path)
image_info['img_shape'] = np.array(io.imread(img_path).shape[:2], dtype=np.int32)
if label_info:
label_path = get_kitti_info_path(idx, path, 'label_2', '.txt', training, relative_path, exist_check)
if relative_path:
label_path = str(root_path / label_path)
annotations = {}
annotations.update({'name': [], 'truncated': [], 'occluded': [], 'alpha': [], 'bbox': [], 'dimensions': [], 'location': [], 'rotation_y': []})
with open(label_path, 'r') as f:
lines = f.readlines()
content = [line.strip().split(' ') for line in lines]
num_objects = len([x[0] for x in content if x[0] != 'DontCare'])
annotations['name'] = np.array([x[0] for x in content])
num_gt = len(annotations['name'])
annotations['truncated'] = np.array([float(x[1]) for x in content])
annotations['occluded'] = np.array([int(x[2]) for x in content])
annotations['alpha'] = np.array([float(x[3]) for x in content])
annotations['bbox'] = np.array([[float(info) for info in x[4:8]] for x in content]).reshape(-1, 4)
annotations['dimensions'] = np.array([[float(info) for info in x[8:11]] for x in content]).reshape(-1, 3)[:, [2, 0, 1]]
annotations['location'] = np.array([[float(info) for info in x[11:14]] for x in content]).reshape(-1, 3)
annotations['rotation_y'] = np.array([float(x[14]) for x in content]).reshape(-1)
if len(content) != 0 and len(content[0]) == 16:
annotations['score'] = np.array([float(x[15]) for x in content])
else:
annotations['score'] = np.zeros((annotations['bbox'].shape[0],))
index = list(range(num_objects)) + [-1] * (num_gt - num_objects)
annotations['index'] = np.array(index, dtype=np.int32)
annotations['group_ids'] = np.arange(num_gt, dtype=np.int32)
annotations = annotations
if calib:
calib_path = get_kitti_info_path(idx, path, 'calib', '.txt', training, False, exist_check)
with open(calib_path, 'r') as f:
lines = f.readlines()
P0 = np.array([float(info) for info in lines[0].split(' ')[1:13]]).reshape([3, 4])
P1 = np.array([float(info) for info in lines[1].split(' ')[1:13]]).reshape([3, 4])
P2 = np.array([float(info) for info in lines[2].split(' ')[1:13]]).reshape([3, 4])
P3 = np.array([float(info) for info in lines[3].split(' ')[1:13]]).reshape([3, 4])
if extend_matrix:
P0 = np.concatenate([P0, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0)
P0 = P0
P1 = np.concatenate([P1, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0)
P1 = P1
P2 = np.concatenate([P2, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0)
P2 = P2
P3 = np.concatenate([P3, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0)
P3 = P3
image_info['calib/P0'] = P0
image_info['calib/P1'] = P1
image_info['calib/P2'] = P2
image_info['calib/P3'] = P3
R0_rect = np.array([float(info) for info in lines[4].split(' ')[1:10]]).reshape([3, 3])
if extend_matrix:
rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype)
rect_4x4[3, 3] = 1.0
rect_4x4[:3, :3] = R0_rect
else:
rect_4x4 = R0_rect
image_info['calib/R0_rect'] = rect_4x4
Tr_velo_to_cam = np.array([float(info) for info in lines[5].split(' ')[1:13]]).reshape([3, 4])
Tr_imu_to_velo = np.array([float(info) for info in lines[6].split(' ')[1:13]]).reshape([3, 4])
if extend_matrix:
Tr_velo_to_cam = np.concatenate([Tr_velo_to_cam, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0)
Tr_velo_to_cam = Tr_velo_to_cam
Tr_imu_to_velo = np.concatenate([Tr_imu_to_velo, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0)
Tr_imu_to_velo = Tr_imu_to_velo
image_info['calib/Tr_velo_to_cam'] = Tr_velo_to_cam
image_info['calib/Tr_imu_to_velo'] = Tr_imu_to_velo
if annotations is not None:
image_info['annos'] = annotations
min_height = [40, 25, 25]
max_occlusion = [0, 1, 2]
max_trunc = [0.15, 0.3, 0.5]
annos = image_info['annos']
dims = annos['dimensions']
bbox = annos['bbox']
height = bbox[:, 3] - bbox[:, 1]
occlusion = annos['occluded']
truncation = annos['truncated']
diff = []
easy_mask = np.ones((len(dims),), dtype=np.bool)
moderate_mask = np.ones((len(dims),), dtype=np.bool)
hard_mask = np.ones((len(dims),), dtype=np.bool)
i = 0
for (h, o, t) in zip(height, occlusion, truncation):
if o > max_occlusion[0] or h <= min_height[0] or t > max_trunc[0]:
easy_mask[i] = False
if o > max_occlusion[1] or h <= min_height[1] or t > max_trunc[1]:
moderate_mask[i] = False
if o > max_occlusion[2] or h <= min_height[2] or t > max_trunc[2]:
hard_mask[i] = False
i += 1
is_easy = easy_mask
is_moderate = np.logical_xor(easy_mask, moderate_mask)
is_hard = np.logical_xor(hard_mask, moderate_mask)
for i in range(len(dims)):
if is_easy[i]:
diff.append(0)
elif is_moderate[i]:
diff.append(1)
elif is_hard[i]:
diff.append(2)
else:
diff.append(-1)
annos['difficulty'] = np.array(diff, np.int32)
return diff
return image_info
|
CLOCs
|
positive
|
@click.command(context_settings=CONTEXT_SETTINGS, short_help="Gets a pipeline's current settings and status.")
@click.option('--pipeline-id', default=None, type=PipelineIdClickType(), help=PipelineIdClickType.help)
@debug_option
@profile_option
@pipelines_exception_eater
@provide_api_client
def get_cli(api_client, pipeline_id):
"""
Gets a pipeline's current settings and status.
Usage:
databricks pipelines get --pipeline-id 1234
"""
<DeepExtract>
if pipeline_id is None or len(pipeline_id) == 0:
error_and_quit(u'Empty pipeline ID provided')
</DeepExtract>
click.echo(pretty_format(PipelinesApi(api_client).get(pipeline_id)))
|
@click.command(context_settings=CONTEXT_SETTINGS, short_help="Gets a pipeline's current settings and status.")
@click.option('--pipeline-id', default=None, type=PipelineIdClickType(), help=PipelineIdClickType.help)
@debug_option
@profile_option
@pipelines_exception_eater
@provide_api_client
def get_cli(api_client, pipeline_id):
"""
Gets a pipeline's current settings and status.
Usage:
databricks pipelines get --pipeline-id 1234
"""
if pipeline_id is None or len(pipeline_id) == 0:
error_and_quit(u'Empty pipeline ID provided')
click.echo(pretty_format(PipelinesApi(api_client).get(pipeline_id)))
|
databricks-cli
|
positive
|
def step(self, action):
for _ in range(self.simrate):
<DeepExtract>
target = action
self.u = pd_in_t()
for i in range(5):
self.u.leftLeg.motorPd.pGain[i] = self.P[i]
self.u.rightLeg.motorPd.pGain[i] = self.P[i]
self.u.leftLeg.motorPd.dGain[i] = self.D[i]
self.u.rightLeg.motorPd.dGain[i] = self.D[i]
self.u.leftLeg.motorPd.torque[i] = 0
self.u.rightLeg.motorPd.torque[i] = 0
self.u.leftLeg.motorPd.pTarget[i] = target[i]
self.u.rightLeg.motorPd.pTarget[i] = target[i + 5]
self.u.leftLeg.motorPd.dTarget[i] = 0
self.u.rightLeg.motorPd.dTarget[i] = 0
self.sim.step_pd(self.u)
</DeepExtract>
height = self.sim.qpos()[2]
self.time += 1
self.phase += 1
if self.phase > self.phaselen:
self.phase = 0
self.counter += 1
done = not (height > 0.5 and height < 3.0)
<DeepExtract>
qpos = np.copy(self.sim.qpos())
com_pos = qpos[0:2]
foot_pos = np.zeros(6)
self.sim.foot_pos(foot_pos)
target = self.get_ref_state(self.phase)
foot_target = target[0:6]
com_target = target[6:9]
foot_error = 0
for i in range(len(foot_pos)):
foot_error += 3 * (foot_pos[i] - foot_target[i]) ** 2
com_error = 0
for i in range(len(com_pos)):
com_error += 10 * (com_pos[i] - com_target[i]) ** 2
orientation_error = np.arccos(2 * np.inner(qpos[3:7], self.qpos0[3:7]) ** 2 - 1)
reward = 0.4 * np.exp(-foot_error) + 0.3 * np.exp(-com_error) + 0.3 * np.exp(-orientation_error)
reward = reward
</DeepExtract>
if reward < 0.3:
done = True
return (self.get_full_state(), reward, done, {})
|
def step(self, action):
for _ in range(self.simrate):
target = action
self.u = pd_in_t()
for i in range(5):
self.u.leftLeg.motorPd.pGain[i] = self.P[i]
self.u.rightLeg.motorPd.pGain[i] = self.P[i]
self.u.leftLeg.motorPd.dGain[i] = self.D[i]
self.u.rightLeg.motorPd.dGain[i] = self.D[i]
self.u.leftLeg.motorPd.torque[i] = 0
self.u.rightLeg.motorPd.torque[i] = 0
self.u.leftLeg.motorPd.pTarget[i] = target[i]
self.u.rightLeg.motorPd.pTarget[i] = target[i + 5]
self.u.leftLeg.motorPd.dTarget[i] = 0
self.u.rightLeg.motorPd.dTarget[i] = 0
self.sim.step_pd(self.u)
height = self.sim.qpos()[2]
self.time += 1
self.phase += 1
if self.phase > self.phaselen:
self.phase = 0
self.counter += 1
done = not (height > 0.5 and height < 3.0)
qpos = np.copy(self.sim.qpos())
com_pos = qpos[0:2]
foot_pos = np.zeros(6)
self.sim.foot_pos(foot_pos)
target = self.get_ref_state(self.phase)
foot_target = target[0:6]
com_target = target[6:9]
foot_error = 0
for i in range(len(foot_pos)):
foot_error += 3 * (foot_pos[i] - foot_target[i]) ** 2
com_error = 0
for i in range(len(com_pos)):
com_error += 10 * (com_pos[i] - com_target[i]) ** 2
orientation_error = np.arccos(2 * np.inner(qpos[3:7], self.qpos0[3:7]) ** 2 - 1)
reward = 0.4 * np.exp(-foot_error) + 0.3 * np.exp(-com_error) + 0.3 * np.exp(-orientation_error)
reward = reward
if reward < 0.3:
done = True
return (self.get_full_state(), reward, done, {})
|
apex
|
positive
|
def generate(self):
<DeepExtract>
account_id = self.map_params['default_providers']['source'].get('properties', {}).get('account_id', '')
if self.provider == 'GitHub':
pipeline_role = None
if self.provider == 'CodeStarSourceConnection':
pipeline_role = None
if self.provider == 'CodeBuild':
pipeline_role = None
if self.provider == 'CodeCommit':
pipeline_role = f'arn:{ADF_DEPLOYMENT_PARTITION}:iam::{account_id}:role/adf-codecommit-role'
if self.provider == 'S3' and self.category == 'Source':
pipeline_role = f'arn:{ADF_DEPLOYMENT_PARTITION}:iam::{account_id}:role/adf-codecommit-role'
if self.provider == 'S3' and self.category == 'Deploy':
pipeline_role = f"arn:{ADF_DEPLOYMENT_PARTITION}:iam::{self.target['id']}:role/adf-cloudformation-role"
if self.provider == 'ServiceCatalog':
pipeline_role = f"arn:{ADF_DEPLOYMENT_PARTITION}:iam::{self.target['id']}:role/adf-cloudformation-role"
if self.provider == 'CodeDeploy':
pipeline_role = f"arn:{ADF_DEPLOYMENT_PARTITION}:iam::{self.target['id']}:role/adf-cloudformation-role"
if self.provider == 'Lambda':
pipeline_role = None
if self.provider == 'CloudFormation':
pipeline_role = f"arn:{ADF_DEPLOYMENT_PARTITION}:iam::{self.target['id']}:role/adf-cloudformation-role"
if self.provider == 'Manual':
pipeline_role = None
raise Exception(f'Invalid Provider {self.provider}')
</DeepExtract>
action_props = {'action_type_id': _codepipeline.CfnPipeline.ActionTypeIdProperty(version=Action._version, owner=self.owner, provider=self.provider, category=self.category), 'configuration': self.configuration, 'name': self.action_name, 'region': self.region or ADF_DEPLOYMENT_REGION, 'run_order': self.run_order}
<DeepExtract>
if self.category not in ['Build', 'Deploy']:
input_artifacts = []
input_artifacts = [_codepipeline.CfnPipeline.InputArtifactProperty(name=self._get_base_input_artifact_name())]
if self.category == 'Deploy':
for override in self.target.get('properties', {}).get('param_overrides', []):
override_input = _codepipeline.CfnPipeline.InputArtifactProperty(name=override.get('inputs', ''))
requires_input_override = self.provider == 'CloudFormation' and override.get('inputs') and (self.action_mode != 'CHANGE_SET_EXECUTE') and (override_input not in input_artifacts)
if requires_input_override:
input_artifacts.append(override_input)
input_artifacts = input_artifacts
</DeepExtract>
if input_artifacts:
action_props['input_artifacts'] = input_artifacts
<DeepExtract>
output_artifact_name = self._get_base_output_artifact_name()
if output_artifact_name:
output_artifacts = [_codepipeline.CfnPipeline.OutputArtifactProperty(name=output_artifact_name)]
output_artifacts = []
</DeepExtract>
if output_artifacts:
action_props['output_artifacts'] = output_artifacts
if pipeline_role:
action_props['role_arn'] = pipeline_role
if self.category == 'Manual':
del action_props['region']
return _codepipeline.CfnPipeline.ActionDeclarationProperty(**action_props)
|
def generate(self):
account_id = self.map_params['default_providers']['source'].get('properties', {}).get('account_id', '')
if self.provider == 'GitHub':
pipeline_role = None
if self.provider == 'CodeStarSourceConnection':
pipeline_role = None
if self.provider == 'CodeBuild':
pipeline_role = None
if self.provider == 'CodeCommit':
pipeline_role = f'arn:{ADF_DEPLOYMENT_PARTITION}:iam::{account_id}:role/adf-codecommit-role'
if self.provider == 'S3' and self.category == 'Source':
pipeline_role = f'arn:{ADF_DEPLOYMENT_PARTITION}:iam::{account_id}:role/adf-codecommit-role'
if self.provider == 'S3' and self.category == 'Deploy':
pipeline_role = f"arn:{ADF_DEPLOYMENT_PARTITION}:iam::{self.target['id']}:role/adf-cloudformation-role"
if self.provider == 'ServiceCatalog':
pipeline_role = f"arn:{ADF_DEPLOYMENT_PARTITION}:iam::{self.target['id']}:role/adf-cloudformation-role"
if self.provider == 'CodeDeploy':
pipeline_role = f"arn:{ADF_DEPLOYMENT_PARTITION}:iam::{self.target['id']}:role/adf-cloudformation-role"
if self.provider == 'Lambda':
pipeline_role = None
if self.provider == 'CloudFormation':
pipeline_role = f"arn:{ADF_DEPLOYMENT_PARTITION}:iam::{self.target['id']}:role/adf-cloudformation-role"
if self.provider == 'Manual':
pipeline_role = None
raise Exception(f'Invalid Provider {self.provider}')
action_props = {'action_type_id': _codepipeline.CfnPipeline.ActionTypeIdProperty(version=Action._version, owner=self.owner, provider=self.provider, category=self.category), 'configuration': self.configuration, 'name': self.action_name, 'region': self.region or ADF_DEPLOYMENT_REGION, 'run_order': self.run_order}
if self.category not in ['Build', 'Deploy']:
input_artifacts = []
input_artifacts = [_codepipeline.CfnPipeline.InputArtifactProperty(name=self._get_base_input_artifact_name())]
if self.category == 'Deploy':
for override in self.target.get('properties', {}).get('param_overrides', []):
override_input = _codepipeline.CfnPipeline.InputArtifactProperty(name=override.get('inputs', ''))
requires_input_override = self.provider == 'CloudFormation' and override.get('inputs') and (self.action_mode != 'CHANGE_SET_EXECUTE') and (override_input not in input_artifacts)
if requires_input_override:
input_artifacts.append(override_input)
input_artifacts = input_artifacts
if input_artifacts:
action_props['input_artifacts'] = input_artifacts
output_artifact_name = self._get_base_output_artifact_name()
if output_artifact_name:
output_artifacts = [_codepipeline.CfnPipeline.OutputArtifactProperty(name=output_artifact_name)]
output_artifacts = []
if output_artifacts:
action_props['output_artifacts'] = output_artifacts
if pipeline_role:
action_props['role_arn'] = pipeline_role
if self.category == 'Manual':
del action_props['region']
return _codepipeline.CfnPipeline.ActionDeclarationProperty(**action_props)
|
aws-deployment-framework
|
positive
|
def ensemble_doubled(middle_in, middle_out, forecast_in, forecast_out):
third_merge_in = pd.merge(middle_in.drop(['Target'], axis=1), forecast_in, left_index=True, right_index=True, how='left')
third_merge_out = pd.merge(middle_out, forecast_out, left_index=True, right_index=True, how='left')
df_perf = ensemble_performance(third_merge_in).drop('Target', axis=1)
def inner_ensemble(df_perf, third_merge):
df_ensemble = pd.DataFrame(index=third_merge.index)
many = len(df_perf.iloc[0, :].sort_values())
if many == 1:
ValueError('You need more than one model to ensemble.')
if many >= 2:
df_ensemble['__X__'.join(list(df_perf.iloc[0, :].sort_values()[:2].index.values))] = third_merge[list(df_perf.iloc[0, :].sort_values()[:2].index)].mean(axis=1)
if many >= 3:
df_ensemble['__X__'.join(list(df_perf.iloc[0, :].sort_values()[:3].index.values))] = third_merge[list(df_perf.iloc[0, :].sort_values()[:3].index)].mean(axis=1)
if many >= 5:
df_ensemble['__X__'.join(list(df_perf.iloc[0, :].sort_values()[:5].index.values))] = third_merge[list(df_perf.iloc[0, :].sort_values()[:5].index)].mean(axis=1)
if many >= 7:
df_ensemble['__X__'.join(list(df_perf.iloc[0, :].sort_values()[:7].index.values))] = third_merge[list(df_perf.iloc[0, :].sort_values()[:7].index)].mean(axis=1)
return df_ensemble
<DeepExtract>
df_ensemble = pd.DataFrame(index=third_merge_in.index)
many = len(df_perf.iloc[0, :].sort_values())
if many == 1:
ValueError('You need more than one model to ensemble.')
if many >= 2:
df_ensemble['__X__'.join(list(df_perf.iloc[0, :].sort_values()[:2].index.values))] = third_merge_in[list(df_perf.iloc[0, :].sort_values()[:2].index)].mean(axis=1)
if many >= 3:
df_ensemble['__X__'.join(list(df_perf.iloc[0, :].sort_values()[:3].index.values))] = third_merge_in[list(df_perf.iloc[0, :].sort_values()[:3].index)].mean(axis=1)
if many >= 5:
df_ensemble['__X__'.join(list(df_perf.iloc[0, :].sort_values()[:5].index.values))] = third_merge_in[list(df_perf.iloc[0, :].sort_values()[:5].index)].mean(axis=1)
if many >= 7:
df_ensemble['__X__'.join(list(df_perf.iloc[0, :].sort_values()[:7].index.values))] = third_merge_in[list(df_perf.iloc[0, :].sort_values()[:7].index)].mean(axis=1)
df_ensembled_in = df_ensemble
</DeepExtract>
<DeepExtract>
df_ensemble = pd.DataFrame(index=third_merge_out.index)
many = len(df_perf.iloc[0, :].sort_values())
if many == 1:
ValueError('You need more than one model to ensemble.')
if many >= 2:
df_ensemble['__X__'.join(list(df_perf.iloc[0, :].sort_values()[:2].index.values))] = third_merge_out[list(df_perf.iloc[0, :].sort_values()[:2].index)].mean(axis=1)
if many >= 3:
df_ensemble['__X__'.join(list(df_perf.iloc[0, :].sort_values()[:3].index.values))] = third_merge_out[list(df_perf.iloc[0, :].sort_values()[:3].index)].mean(axis=1)
if many >= 5:
df_ensemble['__X__'.join(list(df_perf.iloc[0, :].sort_values()[:5].index.values))] = third_merge_out[list(df_perf.iloc[0, :].sort_values()[:5].index)].mean(axis=1)
if many >= 7:
df_ensemble['__X__'.join(list(df_perf.iloc[0, :].sort_values()[:7].index.values))] = third_merge_out[list(df_perf.iloc[0, :].sort_values()[:7].index)].mean(axis=1)
df_ensembled_out = df_ensemble
</DeepExtract>
last_merge_in = pd.merge(third_merge_in, df_ensembled_in, left_index=True, right_index=True, how='left')
last_merge_out = pd.merge(third_merge_out, df_ensembled_out, left_index=True, right_index=True, how='left')
df_perf_last = ensemble_performance(last_merge_in).drop('Target', axis=1)
return (last_merge_in, last_merge_out, df_perf_last)
|
def ensemble_doubled(middle_in, middle_out, forecast_in, forecast_out):
third_merge_in = pd.merge(middle_in.drop(['Target'], axis=1), forecast_in, left_index=True, right_index=True, how='left')
third_merge_out = pd.merge(middle_out, forecast_out, left_index=True, right_index=True, how='left')
df_perf = ensemble_performance(third_merge_in).drop('Target', axis=1)
def inner_ensemble(df_perf, third_merge):
df_ensemble = pd.DataFrame(index=third_merge.index)
many = len(df_perf.iloc[0, :].sort_values())
if many == 1:
ValueError('You need more than one model to ensemble.')
if many >= 2:
df_ensemble['__X__'.join(list(df_perf.iloc[0, :].sort_values()[:2].index.values))] = third_merge[list(df_perf.iloc[0, :].sort_values()[:2].index)].mean(axis=1)
if many >= 3:
df_ensemble['__X__'.join(list(df_perf.iloc[0, :].sort_values()[:3].index.values))] = third_merge[list(df_perf.iloc[0, :].sort_values()[:3].index)].mean(axis=1)
if many >= 5:
df_ensemble['__X__'.join(list(df_perf.iloc[0, :].sort_values()[:5].index.values))] = third_merge[list(df_perf.iloc[0, :].sort_values()[:5].index)].mean(axis=1)
if many >= 7:
df_ensemble['__X__'.join(list(df_perf.iloc[0, :].sort_values()[:7].index.values))] = third_merge[list(df_perf.iloc[0, :].sort_values()[:7].index)].mean(axis=1)
return df_ensemble
df_ensemble = pd.DataFrame(index=third_merge_in.index)
many = len(df_perf.iloc[0, :].sort_values())
if many == 1:
ValueError('You need more than one model to ensemble.')
if many >= 2:
df_ensemble['__X__'.join(list(df_perf.iloc[0, :].sort_values()[:2].index.values))] = third_merge_in[list(df_perf.iloc[0, :].sort_values()[:2].index)].mean(axis=1)
if many >= 3:
df_ensemble['__X__'.join(list(df_perf.iloc[0, :].sort_values()[:3].index.values))] = third_merge_in[list(df_perf.iloc[0, :].sort_values()[:3].index)].mean(axis=1)
if many >= 5:
df_ensemble['__X__'.join(list(df_perf.iloc[0, :].sort_values()[:5].index.values))] = third_merge_in[list(df_perf.iloc[0, :].sort_values()[:5].index)].mean(axis=1)
if many >= 7:
df_ensemble['__X__'.join(list(df_perf.iloc[0, :].sort_values()[:7].index.values))] = third_merge_in[list(df_perf.iloc[0, :].sort_values()[:7].index)].mean(axis=1)
df_ensembled_in = df_ensemble
df_ensemble = pd.DataFrame(index=third_merge_out.index)
many = len(df_perf.iloc[0, :].sort_values())
if many == 1:
ValueError('You need more than one model to ensemble.')
if many >= 2:
df_ensemble['__X__'.join(list(df_perf.iloc[0, :].sort_values()[:2].index.values))] = third_merge_out[list(df_perf.iloc[0, :].sort_values()[:2].index)].mean(axis=1)
if many >= 3:
df_ensemble['__X__'.join(list(df_perf.iloc[0, :].sort_values()[:3].index.values))] = third_merge_out[list(df_perf.iloc[0, :].sort_values()[:3].index)].mean(axis=1)
if many >= 5:
df_ensemble['__X__'.join(list(df_perf.iloc[0, :].sort_values()[:5].index.values))] = third_merge_out[list(df_perf.iloc[0, :].sort_values()[:5].index)].mean(axis=1)
if many >= 7:
df_ensemble['__X__'.join(list(df_perf.iloc[0, :].sort_values()[:7].index.values))] = third_merge_out[list(df_perf.iloc[0, :].sort_values()[:7].index)].mean(axis=1)
df_ensembled_out = df_ensemble
last_merge_in = pd.merge(third_merge_in, df_ensembled_in, left_index=True, right_index=True, how='left')
last_merge_out = pd.merge(third_merge_out, df_ensembled_out, left_index=True, right_index=True, how='left')
df_perf_last = ensemble_performance(last_merge_in).drop('Target', axis=1)
return (last_merge_in, last_merge_out, df_perf_last)
|
atspy
|
positive
|
def __init__(self, working_dir, settings, rpc_server=None, app_name=None):
self.dir_ = working_dir
self.settings = settings
self.rpc_server = rpc_server
self.app_name = app_name
self.budgets = settings.job.size
self.limit = self.budgets >= 0
self.applied = 0
self.finished = 0
self.lock = threading.Lock()
if not os.path.exists(self.dir_):
os.makedirs(self.dir_)
<DeepExtract>
save_file = os.path.join(self.dir_, BUDGET_APPLY_STATUS_FILENAME)
if os.path.exists(save_file):
with open(save_file) as f:
(self.applied, self.finished) = pickle.load(f)
</DeepExtract>
<DeepExtract>
assert self.finished <= self.applied
if not self.limit or self.applied < self.budgets:
self.status = SUFFICIENT
elif self.applied >= self.budgets and self.finished < self.budgets:
self.status = NOAPPLIED
elif self.finished >= self.budgets:
self.status = ALLFINISHED
else:
raise RuntimeError('size of applied and finished is impossible')
</DeepExtract>
<DeepExtract>
if self.rpc_server is not None:
self.register_rpc(self, self.rpc_server, app_name=self.app_name)
</DeepExtract>
|
def __init__(self, working_dir, settings, rpc_server=None, app_name=None):
self.dir_ = working_dir
self.settings = settings
self.rpc_server = rpc_server
self.app_name = app_name
self.budgets = settings.job.size
self.limit = self.budgets >= 0
self.applied = 0
self.finished = 0
self.lock = threading.Lock()
if not os.path.exists(self.dir_):
os.makedirs(self.dir_)
save_file = os.path.join(self.dir_, BUDGET_APPLY_STATUS_FILENAME)
if os.path.exists(save_file):
with open(save_file) as f:
(self.applied, self.finished) = pickle.load(f)
assert self.finished <= self.applied
if not self.limit or self.applied < self.budgets:
self.status = SUFFICIENT
elif self.applied >= self.budgets and self.finished < self.budgets:
self.status = NOAPPLIED
elif self.finished >= self.budgets:
self.status = ALLFINISHED
else:
raise RuntimeError('size of applied and finished is impossible')
if self.rpc_server is not None:
self.register_rpc(self, self.rpc_server, app_name=self.app_name)
</DeepExtract>
|
cola
|
positive
|
def get_entities_for_surface(self, surface):
"""Return all entities for the surface form.
:param surface:
:return:
"""
surface = normalize_entity_name(surface).encode('utf-8')
LOG.debug('Looking up %s', surface)
line = self.entity_db.get(SURFACE_PREFIX + surface)
if not line:
return []
cols = line.split(b'\t')
mids_dedup = set()
result = []
for i in range(0, len(cols), 2):
surface_score = float(cols[i])
mid = cols[i + 1].decode('utf-8')
if mid in mids_dedup:
continue
mids_dedup.add(mid)
<DeepExtract>
mid_bytes = mid.encode('utf-8')
line = self.entity_db.get(ENTITY_PREFIX + mid_bytes)
if not line:
LOG.info("No entity for mid: '%s'.", mid)
entity = None
entity = EntityIndex._bytes_to_entity(line)
entity = entity
</DeepExtract>
if entity:
result.append((entity, surface_score))
return result
|
def get_entities_for_surface(self, surface):
"""Return all entities for the surface form.
:param surface:
:return:
"""
surface = normalize_entity_name(surface).encode('utf-8')
LOG.debug('Looking up %s', surface)
line = self.entity_db.get(SURFACE_PREFIX + surface)
if not line:
return []
cols = line.split(b'\t')
mids_dedup = set()
result = []
for i in range(0, len(cols), 2):
surface_score = float(cols[i])
mid = cols[i + 1].decode('utf-8')
if mid in mids_dedup:
continue
mids_dedup.add(mid)
mid_bytes = mid.encode('utf-8')
line = self.entity_db.get(ENTITY_PREFIX + mid_bytes)
if not line:
LOG.info("No entity for mid: '%s'.", mid)
entity = None
entity = EntityIndex._bytes_to_entity(line)
entity = entity
if entity:
result.append((entity, surface_score))
return result
|
aqqu
|
positive
|
def _doPaint(self, event):
"""Process the drawing event."""
self.canvas.SetCurrent()
if not self.GLinitialized:
<DeepExtract>
glClearColor(1, 1, 1, 1)
glDisable(GL_DEPTH_TEST)
</DeepExtract>
self.GLinitialized = True
<DeepExtract>
glClear(GL_COLOR_BUFFER_BIT)
glBegin(GL_TRIANGLES)
glColor(1, 0, 0)
glVertex(-0.25, -0.25)
glColor(0, 1, 0)
glVertex(0.25, -0.25)
glColor(0, 0, 1)
glVertex(0, 0.25)
glEnd()
self.SwapBuffers()
</DeepExtract>
event.Skip()
|
def _doPaint(self, event):
"""Process the drawing event."""
self.canvas.SetCurrent()
if not self.GLinitialized:
glClearColor(1, 1, 1, 1)
glDisable(GL_DEPTH_TEST)
self.GLinitialized = True
glClear(GL_COLOR_BUFFER_BIT)
glBegin(GL_TRIANGLES)
glColor(1, 0, 0)
glVertex(-0.25, -0.25)
glColor(0, 1, 0)
glVertex(0.25, -0.25)
glColor(0, 0, 1)
glVertex(0, 0.25)
glEnd()
self.SwapBuffers()
event.Skip()
|
calfem-python
|
positive
|
def __init__(self, arguments):
self.args = arguments
<DeepExtract>
lvl = '0' if arguments.verbose else '2'
set_system_verbosity(lvl)
</DeepExtract>
<DeepExtract>
dest_format = None
if hasattr(self.args, 'alignment_format') and self.args.alignment_format:
dest_format = self.args.alignment_format
dest_format = dest_format
</DeepExtract>
self.alignments = AlignmentData(self.args.alignments_file, dest_format, self.args.verbose)
|
def __init__(self, arguments):
self.args = arguments
lvl = '0' if arguments.verbose else '2'
set_system_verbosity(lvl)
dest_format = None
if hasattr(self.args, 'alignment_format') and self.args.alignment_format:
dest_format = self.args.alignment_format
dest_format = dest_format
self.alignments = AlignmentData(self.args.alignments_file, dest_format, self.args.verbose)
|
DeepFakeTutorial
|
positive
|
def test_submission_template_used(self):
<DeepExtract>
response = self.client_post_report_prep()
</DeepExtract>
self.assertTemplateUsed(response, 'callisto_core/reporting/submission.html')
|
def test_submission_template_used(self):
response = self.client_post_report_prep()
self.assertTemplateUsed(response, 'callisto_core/reporting/submission.html')
|
callisto-core
|
positive
|
def load_message(com_i_pdu):
interval = None
senders = []
comments = None
name = com_i_pdu.find(SHORT_NAME_XPATH, NAMESPACES).text
direction = None
for (parameter, value) in self.iter_parameter_values(com_i_pdu):
if parameter == 'ComIPduDirection':
direction = value
break
com_pdu_id_ref = None
for (reference, value) in self.iter_reference_values(com_i_pdu):
if reference == 'ComPduIdRef':
com_pdu_id_ref = value
break
if com_pdu_id_ref is None:
raise ValueError('No ComPduIdRef reference found.')
if direction == 'SEND':
<DeepExtract>
(frame_id, length, is_extended_frame) = self.load_message_rx_tx(com_pdu_id_ref, 'CanIfTxPduCanId', 'CanIfTxPduDlc', 'CanIfTxPduCanIdType')
</DeepExtract>
elif direction == 'RECEIVE':
<DeepExtract>
(frame_id, length, is_extended_frame) = self.load_message_rx_tx(com_pdu_id_ref, 'CanIfRxPduCanId', 'CanIfRxPduDlc', 'CanIfRxPduCanIdType')
</DeepExtract>
else:
raise NotImplementedError(f'Direction {direction} not supported.')
if frame_id is None:
LOGGER.warning('No frame id found for message %s.', name)
return None
if is_extended_frame is None:
LOGGER.warning('No frame type found for message %s.', name)
return None
if length is None:
LOGGER.warning('No length found for message %s.', name)
return None
signals = []
values = com_i_pdu.iterfind(ECUC_REFERENCE_VALUE_XPATH, NAMESPACES)
for value in values:
definition_ref = value.find(DEFINITION_REF_XPATH, NAMESPACES).text
if not definition_ref.endswith('ComIPduSignalRef'):
continue
value_ref = value.find(VALUE_REF_XPATH, NAMESPACES)
<DeepExtract>
ecuc_container_value = self.find_value(value_ref.text)
if ecuc_container_value is None:
signal = None
name = ecuc_container_value.find(SHORT_NAME_XPATH, NAMESPACES).text
is_signed = False
is_float = False
minimum = None
maximum = None
factor = 1.0
offset = 0.0
unit = None
choices = None
comments = None
receivers = []
decimal = SignalDecimal(Decimal(factor), Decimal(offset))
bit_position = None
length = None
byte_order = None
for (parameter, value) in self.iter_parameter_values(ecuc_container_value):
if parameter == 'ComBitPosition':
bit_position = int(value)
elif parameter == 'ComBitSize':
length = int(value)
elif parameter == 'ComSignalEndianness':
byte_order = value.lower()
elif parameter == 'ComSignalType':
if value in ['SINT8', 'SINT16', 'SINT32']:
is_signed = True
elif value in ['FLOAT32', 'FLOAT64']:
is_float = True
if bit_position is None:
LOGGER.warning('No bit position found for signal %s.', name)
signal = None
if length is None:
LOGGER.warning('No bit size found for signal %s.', name)
signal = None
if byte_order is None:
LOGGER.warning('No endianness found for signal %s.', name)
signal = None
signal = Signal(name=name, start=bit_position, length=length, receivers=receivers, byte_order=byte_order, is_signed=is_signed, scale=factor, offset=offset, minimum=minimum, maximum=maximum, unit=unit, choices=choices, comment=comments, is_float=is_float, decimal=decimal)
</DeepExtract>
if signal is not None:
signals.append(signal)
return Message(frame_id=frame_id, is_extended_frame=is_extended_frame, name=name, length=length, senders=senders, send_type=None, cycle_time=interval, signals=signals, comment=comments, bus_name=None, strict=self.strict, sort_signals=self.sort_signals)
|
def load_message(com_i_pdu):
interval = None
senders = []
comments = None
name = com_i_pdu.find(SHORT_NAME_XPATH, NAMESPACES).text
direction = None
for (parameter, value) in self.iter_parameter_values(com_i_pdu):
if parameter == 'ComIPduDirection':
direction = value
break
com_pdu_id_ref = None
for (reference, value) in self.iter_reference_values(com_i_pdu):
if reference == 'ComPduIdRef':
com_pdu_id_ref = value
break
if com_pdu_id_ref is None:
raise ValueError('No ComPduIdRef reference found.')
if direction == 'SEND':
(frame_id, length, is_extended_frame) = self.load_message_rx_tx(com_pdu_id_ref, 'CanIfTxPduCanId', 'CanIfTxPduDlc', 'CanIfTxPduCanIdType')
elif direction == 'RECEIVE':
(frame_id, length, is_extended_frame) = self.load_message_rx_tx(com_pdu_id_ref, 'CanIfRxPduCanId', 'CanIfRxPduDlc', 'CanIfRxPduCanIdType')
else:
raise NotImplementedError(f'Direction {direction} not supported.')
if frame_id is None:
LOGGER.warning('No frame id found for message %s.', name)
return None
if is_extended_frame is None:
LOGGER.warning('No frame type found for message %s.', name)
return None
if length is None:
LOGGER.warning('No length found for message %s.', name)
return None
signals = []
values = com_i_pdu.iterfind(ECUC_REFERENCE_VALUE_XPATH, NAMESPACES)
for value in values:
definition_ref = value.find(DEFINITION_REF_XPATH, NAMESPACES).text
if not definition_ref.endswith('ComIPduSignalRef'):
continue
value_ref = value.find(VALUE_REF_XPATH, NAMESPACES)
ecuc_container_value = self.find_value(value_ref.text)
if ecuc_container_value is None:
signal = None
name = ecuc_container_value.find(SHORT_NAME_XPATH, NAMESPACES).text
is_signed = False
is_float = False
minimum = None
maximum = None
factor = 1.0
offset = 0.0
unit = None
choices = None
comments = None
receivers = []
decimal = SignalDecimal(Decimal(factor), Decimal(offset))
bit_position = None
length = None
byte_order = None
for (parameter, value) in self.iter_parameter_values(ecuc_container_value):
if parameter == 'ComBitPosition':
bit_position = int(value)
elif parameter == 'ComBitSize':
length = int(value)
elif parameter == 'ComSignalEndianness':
byte_order = value.lower()
elif parameter == 'ComSignalType':
if value in ['SINT8', 'SINT16', 'SINT32']:
is_signed = True
elif value in ['FLOAT32', 'FLOAT64']:
is_float = True
if bit_position is None:
LOGGER.warning('No bit position found for signal %s.', name)
signal = None
if length is None:
LOGGER.warning('No bit size found for signal %s.', name)
signal = None
if byte_order is None:
LOGGER.warning('No endianness found for signal %s.', name)
signal = None
signal = Signal(name=name, start=bit_position, length=length, receivers=receivers, byte_order=byte_order, is_signed=is_signed, scale=factor, offset=offset, minimum=minimum, maximum=maximum, unit=unit, choices=choices, comment=comments, is_float=is_float, decimal=decimal)
if signal is not None:
signals.append(signal)
return Message(frame_id=frame_id, is_extended_frame=is_extended_frame, name=name, length=length, senders=senders, send_type=None, cycle_time=interval, signals=signals, comment=comments, bus_name=None, strict=self.strict, sort_signals=self.sort_signals)
|
cantools
|
positive
|
def get_refs_from(self, obj_name):
"""
Find all object names which `obj_name` references, and return
a list of those objects. Requires a database connection to the refs
database.
"""
<DeepExtract>
if self.db is None:
self._enforce_config_section('database')
if not os.path.exists(self.config['database']['dbfile']):
raise RuntimeError('Database file not found: {}'.format(self.config['database']['dbfile']))
self.db = sqlite3.connect(self.config['database']['dbfile'])
self.curs = self.db.cursor()
</DeepExtract>
self.curs.execute("select o.name\n from bl3object o, bl3refs r, bl3object o2\n where\n o.name like '%{}%'\n and o.id=r.from_obj\n and o2.id=r.to_obj\n ".format(obj_name))
return [row[0] for row in self.curs.fetchall()]
|
def get_refs_from(self, obj_name):
"""
Find all object names which `obj_name` references, and return
a list of those objects. Requires a database connection to the refs
database.
"""
if self.db is None:
self._enforce_config_section('database')
if not os.path.exists(self.config['database']['dbfile']):
raise RuntimeError('Database file not found: {}'.format(self.config['database']['dbfile']))
self.db = sqlite3.connect(self.config['database']['dbfile'])
self.curs = self.db.cursor()
self.curs.execute("select o.name\n from bl3object o, bl3refs r, bl3object o2\n where\n o.name like '%{}%'\n and o.id=r.from_obj\n and o2.id=r.to_obj\n ".format(obj_name))
return [row[0] for row in self.curs.fetchall()]
|
bl3mods
|
positive
|
def decode_stream_one(stream: BytesIO) -> int:
"""Read a varint from `stream`"""
shift = 0
result = 0
while True:
<DeepExtract>
c = stream.read(1)
if not c:
raise EOFError('Unexpected EOF while reading bytes')
i = ord(c)
</DeepExtract>
result |= (i & 127) << shift
shift += 7
if not i & 128:
break
return result
|
def decode_stream_one(stream: BytesIO) -> int:
"""Read a varint from `stream`"""
shift = 0
result = 0
while True:
c = stream.read(1)
if not c:
raise EOFError('Unexpected EOF while reading bytes')
i = ord(c)
result |= (i & 127) << shift
shift += 7
if not i & 128:
break
return result
|
Amulet-Core
|
positive
|
def transformer(cur_definition, rest_definitions):
<DeepExtract>
if INSTANCE_REG.search(cur_definition['statements'][0]):
names = tuple()
elif CANONICAL_STRUCTURE_REG.search(cur_definition['statements'][0]):
names = tuple()
else:
names = cur_definition.get('terms_defined', tuple())
</DeepExtract>
if len(names) == 0:
return cur_definition
section_level = 0
for future_definition in rest_definitions:
if section_level < 0:
break
if SECTION_BEGIN_REG.match(future_definition['statement']):
section_level += 1
elif SECTION_END_REG.match(future_definition['statement']):
section_level -= 1
elif any((re_search("(?<![\\w'])%s(?![\\w'])" % re.escape(name), future_definition['statement']) for name in names)):
return cur_definition
return None
|
def transformer(cur_definition, rest_definitions):
if INSTANCE_REG.search(cur_definition['statements'][0]):
names = tuple()
elif CANONICAL_STRUCTURE_REG.search(cur_definition['statements'][0]):
names = tuple()
else:
names = cur_definition.get('terms_defined', tuple())
if len(names) == 0:
return cur_definition
section_level = 0
for future_definition in rest_definitions:
if section_level < 0:
break
if SECTION_BEGIN_REG.match(future_definition['statement']):
section_level += 1
elif SECTION_END_REG.match(future_definition['statement']):
section_level -= 1
elif any((re_search("(?<![\\w'])%s(?![\\w'])" % re.escape(name), future_definition['statement']) for name in names)):
return cur_definition
return None
|
coq-tools
|
positive
|
def test_any_positional_with_return(self):
<DeepExtract>
wrapped = apply_enforcer(self.func_any_args__none())
enforcer = wrapped.__enforcer__
func_type = enforcer.callable_signature
func_type = func_type
</DeepExtract>
self.assertEqual(func_type, Callable[..., None])
|
def test_any_positional_with_return(self):
wrapped = apply_enforcer(self.func_any_args__none())
enforcer = wrapped.__enforcer__
func_type = enforcer.callable_signature
func_type = func_type
self.assertEqual(func_type, Callable[..., None])
|
enforce
|
positive
|
def test_bootstrap_cloudwatch_log_create_log_stream_failed(mocker):
<DeepExtract>
def config_get_side_effect(section, field):
if section == mount_efs.CLOUDWATCH_LOG_SECTION and field == 'log_group_name':
config = DEFAULT_CLOUDWATCH_LOG_GROUP
elif section == mount_efs.CLOUDWATCH_LOG_SECTION and field == 'retention_in_days':
config = DEFAULT_RETENTION_DAYS
else:
raise ValueError('Unexpected arguments')
def config_getboolean_side_effect(section, field):
if section == mount_efs.CLOUDWATCH_LOG_SECTION and field == 'enabled':
config = True if DEFAULT_CLOUDWATCH_ENABLED == 'true' else False
else:
raise ValueError('Unexpected arguments')
mock_config = MagicMock()
mock_config.get.side_effect = config_get_side_effect
mock_config.getboolean.side_effect = config_getboolean_side_effect
config = mock_config
</DeepExtract>
mocker.patch('mount_efs.get_instance_identity_info_from_instance_metadata', return_value=INSTANCE)
get_botocore_client_mock = mocker.patch('mount_efs.get_botocore_client', return_value='fake-agent')
create_log_group_mock = mocker.patch('mount_efs.create_cloudwatch_log_group', return_value=True)
put_retention_policy_mock = mocker.patch('mount_efs.put_cloudwatch_log_retention_policy', return_value=True)
create_log_stream_mock = mocker.patch('mount_efs.create_cloudwatch_log_stream', return_value=False)
cloudwatchlog_agent = mount_efs.bootstrap_cloudwatch_logging(config, {}, FS_ID)
utils.assert_called_once(get_botocore_client_mock)
utils.assert_called_once(create_log_group_mock)
utils.assert_called_once(put_retention_policy_mock)
utils.assert_called_once(create_log_stream_mock)
assert cloudwatchlog_agent == None
|
def test_bootstrap_cloudwatch_log_create_log_stream_failed(mocker):
def config_get_side_effect(section, field):
if section == mount_efs.CLOUDWATCH_LOG_SECTION and field == 'log_group_name':
config = DEFAULT_CLOUDWATCH_LOG_GROUP
elif section == mount_efs.CLOUDWATCH_LOG_SECTION and field == 'retention_in_days':
config = DEFAULT_RETENTION_DAYS
else:
raise ValueError('Unexpected arguments')
def config_getboolean_side_effect(section, field):
if section == mount_efs.CLOUDWATCH_LOG_SECTION and field == 'enabled':
config = True if DEFAULT_CLOUDWATCH_ENABLED == 'true' else False
else:
raise ValueError('Unexpected arguments')
mock_config = MagicMock()
mock_config.get.side_effect = config_get_side_effect
mock_config.getboolean.side_effect = config_getboolean_side_effect
config = mock_config
mocker.patch('mount_efs.get_instance_identity_info_from_instance_metadata', return_value=INSTANCE)
get_botocore_client_mock = mocker.patch('mount_efs.get_botocore_client', return_value='fake-agent')
create_log_group_mock = mocker.patch('mount_efs.create_cloudwatch_log_group', return_value=True)
put_retention_policy_mock = mocker.patch('mount_efs.put_cloudwatch_log_retention_policy', return_value=True)
create_log_stream_mock = mocker.patch('mount_efs.create_cloudwatch_log_stream', return_value=False)
cloudwatchlog_agent = mount_efs.bootstrap_cloudwatch_logging(config, {}, FS_ID)
utils.assert_called_once(get_botocore_client_mock)
utils.assert_called_once(create_log_group_mock)
utils.assert_called_once(put_retention_policy_mock)
utils.assert_called_once(create_log_stream_mock)
assert cloudwatchlog_agent == None
|
efs-utils
|
positive
|
def render_shape(self, drawer, _, **kwargs):
<DeepExtract>
fill = kwargs.get('fill')
m = self.metrics.cell(self.node)
r = self.metrics.cellsize * 2
box = m.box
ellipses = [Box(box[0], box[1], box[0] + r * 2, box[3]), Box(box[2] - r * 2, box[1], box[2], box[3])]
for e in ellipses:
if kwargs.get('shadow'):
e = self.shift_shadow(e)
if kwargs.get('style') == 'blur':
drawer.ellipse(e, fill=fill, outline=fill, filter='transp-blur')
else:
drawer.ellipse(e, fill=fill, outline=fill)
else:
drawer.ellipse(e, fill=self.node.color, outline=self.node.linecolor, style=self.node.style)
rect = Box(box[0] + r, box[1], box[2] - r, box[3])
if kwargs.get('shadow'):
rect = self.shift_shadow(rect)
if kwargs.get('style') == 'blur':
drawer.rectangle(rect, fill=fill, outline=fill, filter='transp-blur')
else:
drawer.rectangle(rect, fill=fill, outline=fill)
else:
drawer.rectangle(rect, fill=self.node.color, outline=self.node.color)
lines = [(XY(box[0] + r, box[1]), XY(box[2] - r, box[1])), (XY(box[0] + r, box[3]), XY(box[2] - r, box[3]))]
for line in lines:
if not kwargs.get('shadow'):
drawer.line(line, fill=self.node.linecolor, style=self.node.style)
</DeepExtract>
if not kwargs.get('shadow') and self.node.background:
drawer.image(self.textbox, self.node.background)
|
def render_shape(self, drawer, _, **kwargs):
fill = kwargs.get('fill')
m = self.metrics.cell(self.node)
r = self.metrics.cellsize * 2
box = m.box
ellipses = [Box(box[0], box[1], box[0] + r * 2, box[3]), Box(box[2] - r * 2, box[1], box[2], box[3])]
for e in ellipses:
if kwargs.get('shadow'):
e = self.shift_shadow(e)
if kwargs.get('style') == 'blur':
drawer.ellipse(e, fill=fill, outline=fill, filter='transp-blur')
else:
drawer.ellipse(e, fill=fill, outline=fill)
else:
drawer.ellipse(e, fill=self.node.color, outline=self.node.linecolor, style=self.node.style)
rect = Box(box[0] + r, box[1], box[2] - r, box[3])
if kwargs.get('shadow'):
rect = self.shift_shadow(rect)
if kwargs.get('style') == 'blur':
drawer.rectangle(rect, fill=fill, outline=fill, filter='transp-blur')
else:
drawer.rectangle(rect, fill=fill, outline=fill)
else:
drawer.rectangle(rect, fill=self.node.color, outline=self.node.color)
lines = [(XY(box[0] + r, box[1]), XY(box[2] - r, box[1])), (XY(box[0] + r, box[3]), XY(box[2] - r, box[3]))]
for line in lines:
if not kwargs.get('shadow'):
drawer.line(line, fill=self.node.linecolor, style=self.node.style)
if not kwargs.get('shadow') and self.node.background:
drawer.image(self.textbox, self.node.background)
|
blockdiag
|
positive
|
def get_ecs_assumerole_policy(region: str='') -> Policy:
"""Helper function for building the ECS AssumeRole Policy."""
<DeepExtract>
tld = '.com.cn' if region == 'cn-north-1' else '.com'
'ecs' = '{}.amazonaws{}'.format('ecs', tld)
</DeepExtract>
return make_simple_assume_policy(service)
|
def get_ecs_assumerole_policy(region: str='') -> Policy:
"""Helper function for building the ECS AssumeRole Policy."""
tld = '.com.cn' if region == 'cn-north-1' else '.com'
'ecs' = '{}.amazonaws{}'.format('ecs', tld)
return make_simple_assume_policy(service)
|
awacs
|
positive
|
def _delete_db_set(key):
<DeepExtract>
db_value = self._db.get(KEY_CODEC.encode_set(key))
(key_id, _) = KEY_CODEC.decode_key_id_and_length(key, db_value)
</DeepExtract>
with self._db.write_batch() as batch:
batch.delete(KEY_CODEC.encode_set(key))
batch.put(KEY_CODEC.encode_deleted_set(key_id), bytes(''))
|
def _delete_db_set(key):
db_value = self._db.get(KEY_CODEC.encode_set(key))
(key_id, _) = KEY_CODEC.decode_key_id_and_length(key, db_value)
with self._db.write_batch() as batch:
batch.delete(KEY_CODEC.encode_set(key))
batch.put(KEY_CODEC.encode_deleted_set(key_id), bytes(''))
|
dredis
|
positive
|
@pytest.mark.parametrize('knn_methods', knn_methods)
def test_ola(knn_methods):
<DeepExtract>
rng = np.random.RandomState(123456)
(X_dsel, X_test, X_train, y_dsel, y_test, y_train) = load_dataset(encode_labels, rng)
model = LogisticRegression(C=1, random_state=rng)
pool_classifiers = BaggingClassifier(model, n_estimators=100, n_jobs=-1, random_state=rng)
pool_classifiers.fit(X_train, y_train)
(pool_classifiers, X_dsel, y_dsel, X_test, y_test) = (pool_classifiers, X_dsel, y_dsel, X_test, y_test)
</DeepExtract>
ola = OLA(pool_classifiers, knn_classifier=knn_methods)
ola.fit(X_dsel, y_dsel)
assert np.isclose(ola.score(X_test, y_test), 0.9787234042553191)
|
@pytest.mark.parametrize('knn_methods', knn_methods)
def test_ola(knn_methods):
rng = np.random.RandomState(123456)
(X_dsel, X_test, X_train, y_dsel, y_test, y_train) = load_dataset(encode_labels, rng)
model = LogisticRegression(C=1, random_state=rng)
pool_classifiers = BaggingClassifier(model, n_estimators=100, n_jobs=-1, random_state=rng)
pool_classifiers.fit(X_train, y_train)
(pool_classifiers, X_dsel, y_dsel, X_test, y_test) = (pool_classifiers, X_dsel, y_dsel, X_test, y_test)
ola = OLA(pool_classifiers, knn_classifier=knn_methods)
ola.fit(X_dsel, y_dsel)
assert np.isclose(ola.score(X_test, y_test), 0.9787234042553191)
|
DESlib
|
positive
|
def _figure_output_format_hms(parsed_data: ParsedTargetFormat, remain_tokens: List[str]) -> Tuple[ParsedTargetFormat, List[str]]:
"""
This function figure hour, minute and second token in target format
Parameters
----------
parsed_data
parsed target format
remain_tokens
remained tokens after figuring tokens
"""
if len(remain_tokens) > 0:
remain_str = ''
for token in remain_tokens:
if not token in TARGET_MONTH and (not token in TARGET_WEEKDAY) and (not token in AM) and (not token in PM):
remain_str = token
<DeepExtract>
if 'z' in remain_str:
parsed_data.timezone_token = 'z'
hms_tokens = split(remain_str, [':', parsed_data.timezone_token])
elif 'Z' in remain_str:
parsed_data.timezone_token = 'Z'
hms_tokens = split(remain_str, [':', parsed_data.timezone_token])
else:
hms_tokens = split(remain_str, [':'])
for token in AM:
if token in remain_str:
hms_tokens = split(remain_str, AM)
break
for token in PM:
if token in remain_str:
hms_tokens = split(remain_str, PM)
break
if len(hms_tokens) == 0:
hms_tokens = split(remain_str, [':'])
(parsed_data, hms_tokens) = (parsed_data, hms_tokens)
</DeepExtract>
for token in hms_tokens:
if token in TARGET_HOUR:
parsed_data.set_hour_token(token)
if token in TARGET_MINUTE:
parsed_data.set_minute_token(token)
if token in TARGET_SECOND:
parsed_data.set_second_token(token)
if len(remain_str) > 0:
remain_tokens.remove(remain_str)
return (parsed_data, remain_tokens)
|
def _figure_output_format_hms(parsed_data: ParsedTargetFormat, remain_tokens: List[str]) -> Tuple[ParsedTargetFormat, List[str]]:
"""
This function figure hour, minute and second token in target format
Parameters
----------
parsed_data
parsed target format
remain_tokens
remained tokens after figuring tokens
"""
if len(remain_tokens) > 0:
remain_str = ''
for token in remain_tokens:
if not token in TARGET_MONTH and (not token in TARGET_WEEKDAY) and (not token in AM) and (not token in PM):
remain_str = token
if 'z' in remain_str:
parsed_data.timezone_token = 'z'
hms_tokens = split(remain_str, [':', parsed_data.timezone_token])
elif 'Z' in remain_str:
parsed_data.timezone_token = 'Z'
hms_tokens = split(remain_str, [':', parsed_data.timezone_token])
else:
hms_tokens = split(remain_str, [':'])
for token in AM:
if token in remain_str:
hms_tokens = split(remain_str, AM)
break
for token in PM:
if token in remain_str:
hms_tokens = split(remain_str, PM)
break
if len(hms_tokens) == 0:
hms_tokens = split(remain_str, [':'])
(parsed_data, hms_tokens) = (parsed_data, hms_tokens)
for token in hms_tokens:
if token in TARGET_HOUR:
parsed_data.set_hour_token(token)
if token in TARGET_MINUTE:
parsed_data.set_minute_token(token)
if token in TARGET_SECOND:
parsed_data.set_second_token(token)
if len(remain_str) > 0:
remain_tokens.remove(remain_str)
return (parsed_data, remain_tokens)
|
dataprep
|
positive
|
def loss_L2Net(anchor, positive, anchor_swap=False, margin=1.0, loss_type='triplet_margin'):
"""L2Net losses: using whole batch as negatives, not only hardest.
"""
assert anchor.size() == positive.size(), 'Input sizes between positive and negative must be equal.'
assert anchor.dim() == 2, 'Inputd must be a 2D matrix.'
eps = 1e-08
<DeepExtract>
d1_sq = torch.sum(anchor * anchor, dim=1).unsqueeze(-1)
d2_sq = torch.sum(positive * positive, dim=1).unsqueeze(-1)
eps = 1e-06
dist_matrix = torch.sqrt(d1_sq.repeat(1, positive.size(0)) + torch.t(d2_sq.repeat(1, anchor.size(0))) - 2.0 * torch.bmm(anchor.unsqueeze(0), torch.t(positive).unsqueeze(0)).squeeze(0) + eps)
</DeepExtract>
eye = torch.autograd.Variable(torch.eye(dist_matrix.size(1))).cuda()
pos1 = torch.diag(dist_matrix)
dist_without_min_on_diag = dist_matrix + eye * 10
mask = (dist_without_min_on_diag.ge(0.008) - 1) * -1
mask = mask.type_as(dist_without_min_on_diag) * 10
dist_without_min_on_diag = dist_without_min_on_diag + mask
if loss_type == 'softmax':
exp_pos = torch.exp(2.0 - pos1)
exp_den = torch.sum(torch.exp(2.0 - dist_matrix), 1) + eps
loss = -torch.log(exp_pos / exp_den)
if anchor_swap:
exp_den1 = torch.sum(torch.exp(2.0 - dist_matrix), 0) + eps
loss += -torch.log(exp_pos / exp_den1)
else:
print('Only softmax loss works with L2Net sampling')
sys.exit(1)
loss = torch.mean(loss)
return loss
|
def loss_L2Net(anchor, positive, anchor_swap=False, margin=1.0, loss_type='triplet_margin'):
"""L2Net losses: using whole batch as negatives, not only hardest.
"""
assert anchor.size() == positive.size(), 'Input sizes between positive and negative must be equal.'
assert anchor.dim() == 2, 'Inputd must be a 2D matrix.'
eps = 1e-08
d1_sq = torch.sum(anchor * anchor, dim=1).unsqueeze(-1)
d2_sq = torch.sum(positive * positive, dim=1).unsqueeze(-1)
eps = 1e-06
dist_matrix = torch.sqrt(d1_sq.repeat(1, positive.size(0)) + torch.t(d2_sq.repeat(1, anchor.size(0))) - 2.0 * torch.bmm(anchor.unsqueeze(0), torch.t(positive).unsqueeze(0)).squeeze(0) + eps)
eye = torch.autograd.Variable(torch.eye(dist_matrix.size(1))).cuda()
pos1 = torch.diag(dist_matrix)
dist_without_min_on_diag = dist_matrix + eye * 10
mask = (dist_without_min_on_diag.ge(0.008) - 1) * -1
mask = mask.type_as(dist_without_min_on_diag) * 10
dist_without_min_on_diag = dist_without_min_on_diag + mask
if loss_type == 'softmax':
exp_pos = torch.exp(2.0 - pos1)
exp_den = torch.sum(torch.exp(2.0 - dist_matrix), 1) + eps
loss = -torch.log(exp_pos / exp_den)
if anchor_swap:
exp_den1 = torch.sum(torch.exp(2.0 - dist_matrix), 0) + eps
loss += -torch.log(exp_pos / exp_den1)
else:
print('Only softmax loss works with L2Net sampling')
sys.exit(1)
loss = torch.mean(loss)
return loss
|
affnet
|
positive
|
def __init__(self, num_spatial_dims: int, in_channels: int, out_channels: int, kernel_size: Union[int, Tuple[int, ...]], stride: Union[int, Tuple[int, ...]]=1, padding: Union[str, int, Tuple[int, int], Sequence[Tuple[int, int]]]='SAME', lhs_dilation: Union[int, Tuple[int, ...]]=1, rhs_dilation: Union[int, Tuple[int, ...]]=1, groups: int=1, w_initializer: Union[Callable, ArrayType, Initializer]=XavierNormal(), b_initializer: Optional[Union[Callable, ArrayType, Initializer]]=ZeroInit(), mask: Optional[ArrayType]=None, mode: bm.Mode=None, name: str=None):
super(_GeneralConv, self).__init__(name=name, mode=mode)
check.is_subclass(self.mode, (bm.TrainingMode, bm.BatchingMode), self.name)
self.num_spatial_dims = num_spatial_dims
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = tools.replicate(stride, num_spatial_dims, 'stride')
self.kernel_size = tools.replicate(kernel_size, num_spatial_dims, 'kernel_size')
self.lhs_dilation = tools.replicate(lhs_dilation, num_spatial_dims, 'lhs_dilation')
self.rhs_dilation = tools.replicate(rhs_dilation, num_spatial_dims, 'rhs_dilation')
self.groups = groups
self.w_initializer = w_initializer
self.b_initializer = b_initializer
self.mask = mask
<DeepExtract>
num_dims = num_spatial_dims + 2
if True:
spatial_dims = tuple(range(1, num_dims - 1))
image_dn = (0, num_dims - 1) + spatial_dims
else:
spatial_dims = tuple(range(2, num_dims))
image_dn = (0, 1) + spatial_dims
if False:
kernel_dn = (num_dims - 2, num_dims - 1) + tuple(range(num_dims - 2))
else:
kernel_dn = (num_dims - 1, num_dims - 2) + tuple(range(num_dims - 2))
self.dimension_numbers = lax.ConvDimensionNumbers(lhs_spec=image_dn, rhs_spec=kernel_dn, out_spec=image_dn)
</DeepExtract>
if isinstance(padding, str):
assert padding in ['SAME', 'VALID']
elif isinstance(padding, int):
padding = tuple(((padding, padding) for _ in range(num_spatial_dims)))
elif isinstance(padding, (tuple, list)):
if isinstance(padding[0], int):
padding = (padding,) * num_spatial_dims
elif isinstance(padding[0], (tuple, list)):
if len(padding) == 1:
padding = tuple(padding) * num_spatial_dims
else:
if len(padding) != num_spatial_dims:
raise ValueError(f'Padding {padding} must be a Tuple[int, int], or sequence of Tuple[int, int] with length 1, or sequence of Tuple[int, int] with length {num_spatial_dims}.')
padding = tuple(padding)
else:
raise ValueError
self.padding = padding
assert self.out_channels % self.groups == 0, '"out_channels" should be divisible by groups'
assert self.in_channels % self.groups == 0, '"in_channels" should be divisible by groups'
kernel_shape = tuple(self.kernel_size) + (self.in_channels // self.groups, self.out_channels)
bias_shape = (1,) * len(self.kernel_size) + (self.out_channels,)
self.w = parameter(self.w_initializer, kernel_shape, allow_none=False)
self.b = parameter(self.b_initializer, bias_shape, allow_none=True)
if isinstance(self.mode, bm.TrainingMode):
self.w = bm.TrainVar(self.w)
if self.b is not None:
self.b = bm.TrainVar(self.b)
|
def __init__(self, num_spatial_dims: int, in_channels: int, out_channels: int, kernel_size: Union[int, Tuple[int, ...]], stride: Union[int, Tuple[int, ...]]=1, padding: Union[str, int, Tuple[int, int], Sequence[Tuple[int, int]]]='SAME', lhs_dilation: Union[int, Tuple[int, ...]]=1, rhs_dilation: Union[int, Tuple[int, ...]]=1, groups: int=1, w_initializer: Union[Callable, ArrayType, Initializer]=XavierNormal(), b_initializer: Optional[Union[Callable, ArrayType, Initializer]]=ZeroInit(), mask: Optional[ArrayType]=None, mode: bm.Mode=None, name: str=None):
super(_GeneralConv, self).__init__(name=name, mode=mode)
check.is_subclass(self.mode, (bm.TrainingMode, bm.BatchingMode), self.name)
self.num_spatial_dims = num_spatial_dims
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = tools.replicate(stride, num_spatial_dims, 'stride')
self.kernel_size = tools.replicate(kernel_size, num_spatial_dims, 'kernel_size')
self.lhs_dilation = tools.replicate(lhs_dilation, num_spatial_dims, 'lhs_dilation')
self.rhs_dilation = tools.replicate(rhs_dilation, num_spatial_dims, 'rhs_dilation')
self.groups = groups
self.w_initializer = w_initializer
self.b_initializer = b_initializer
self.mask = mask
num_dims = num_spatial_dims + 2
if True:
spatial_dims = tuple(range(1, num_dims - 1))
image_dn = (0, num_dims - 1) + spatial_dims
else:
spatial_dims = tuple(range(2, num_dims))
image_dn = (0, 1) + spatial_dims
if False:
kernel_dn = (num_dims - 2, num_dims - 1) + tuple(range(num_dims - 2))
else:
kernel_dn = (num_dims - 1, num_dims - 2) + tuple(range(num_dims - 2))
self.dimension_numbers = lax.ConvDimensionNumbers(lhs_spec=image_dn, rhs_spec=kernel_dn, out_spec=image_dn)
if isinstance(padding, str):
assert padding in ['SAME', 'VALID']
elif isinstance(padding, int):
padding = tuple(((padding, padding) for _ in range(num_spatial_dims)))
elif isinstance(padding, (tuple, list)):
if isinstance(padding[0], int):
padding = (padding,) * num_spatial_dims
elif isinstance(padding[0], (tuple, list)):
if len(padding) == 1:
padding = tuple(padding) * num_spatial_dims
else:
if len(padding) != num_spatial_dims:
raise ValueError(f'Padding {padding} must be a Tuple[int, int], or sequence of Tuple[int, int] with length 1, or sequence of Tuple[int, int] with length {num_spatial_dims}.')
padding = tuple(padding)
else:
raise ValueError
self.padding = padding
assert self.out_channels % self.groups == 0, '"out_channels" should be divisible by groups'
assert self.in_channels % self.groups == 0, '"in_channels" should be divisible by groups'
kernel_shape = tuple(self.kernel_size) + (self.in_channels // self.groups, self.out_channels)
bias_shape = (1,) * len(self.kernel_size) + (self.out_channels,)
self.w = parameter(self.w_initializer, kernel_shape, allow_none=False)
self.b = parameter(self.b_initializer, bias_shape, allow_none=True)
if isinstance(self.mode, bm.TrainingMode):
self.w = bm.TrainVar(self.w)
if self.b is not None:
self.b = bm.TrainVar(self.b)
|
BrainPy
|
positive
|
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource('s3')
<DeepExtract>
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError('bad s3 path {}'.format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
if s3_path.startswith('/'):
s3_path = s3_path[1:]
(bucket_name, s3_path) = (bucket_name, s3_path)
</DeepExtract>
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
|
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource('s3')
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError('bad s3 path {}'.format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
if s3_path.startswith('/'):
s3_path = s3_path[1:]
(bucket_name, s3_path) = (bucket_name, s3_path)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
|
Chinese-clinical-NER
|
positive
|
def train(self, mode=True):
super(ResNet, self).train(mode)
<DeepExtract>
if self.frozen_stages >= 0:
self.norm1.eval()
for m in [self.conv1, self.norm1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, 'layer{}'.format(i))
m.eval()
for param in m.parameters():
param.requires_grad = False
</DeepExtract>
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
|
def train(self, mode=True):
super(ResNet, self).train(mode)
if self.frozen_stages >= 0:
self.norm1.eval()
for m in [self.conv1, self.norm1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, 'layer{}'.format(i))
m.eval()
for param in m.parameters():
param.requires_grad = False
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
|
DenseCL
|
positive
|
def validate_stack_combos(self, data: QueryRuleData, meta: RuleMeta) -> None:
"""Validate the query against ECS and beats schemas across stack combinations."""
for (stack_version, mapping) in meta.get_validation_stack_versions().items():
beats_version = mapping['beats']
ecs_version = mapping['ecs']
endgame_version = mapping['endgame']
err_trailer = f'stack: {stack_version}, beats: {beats_version},ecs: {ecs_version}, endgame: {endgame_version}'
(beat_types, beat_schema, schema) = self.get_beats_schema(data.index or [], beats_version, ecs_version)
endgame_schema = self.get_endgame_schema(data.index, endgame_version)
eql_schema = ecs.KqlSchema2Eql(schema)
<DeepExtract>
try:
with eql_schema, eql.parser.elasticsearch_syntax, eql.parser.ignore_missing_functions:
eql.parse_query(self.query)
except eql.EqlParseError as exc:
message = exc.error_msg
trailer = err_trailer
if 'Unknown field' in message and beat_types:
trailer = f'\nTry adding event.module or event.dataset to specify beats module\n\n{trailer}'
elif 'Field not recognized' in message:
text_fields = self.text_fields(eql_schema)
if text_fields:
fields_str = ', '.join(text_fields)
trailer = f'\neql does not support text fields: {fields_str}\n\n{trailer}'
raise exc.__class__(exc.error_msg, exc.line, exc.column, exc.source, len(exc.caret.lstrip()), trailer=trailer) from None
except Exception:
print(err_trailer)
raise
</DeepExtract>
if endgame_schema:
<DeepExtract>
try:
with endgame_schema, eql.parser.elasticsearch_syntax, eql.parser.ignore_missing_functions:
eql.parse_query(self.query)
except eql.EqlParseError as exc:
message = exc.error_msg
trailer = err_trailer
if 'Unknown field' in message and beat_types:
trailer = f'\nTry adding event.module or event.dataset to specify beats module\n\n{trailer}'
elif 'Field not recognized' in message:
text_fields = self.text_fields(endgame_schema)
if text_fields:
fields_str = ', '.join(text_fields)
trailer = f'\neql does not support text fields: {fields_str}\n\n{trailer}'
raise exc.__class__(exc.error_msg, exc.line, exc.column, exc.source, len(exc.caret.lstrip()), trailer=trailer) from None
except Exception:
print(err_trailer)
raise
</DeepExtract>
|
def validate_stack_combos(self, data: QueryRuleData, meta: RuleMeta) -> None:
"""Validate the query against ECS and beats schemas across stack combinations."""
for (stack_version, mapping) in meta.get_validation_stack_versions().items():
beats_version = mapping['beats']
ecs_version = mapping['ecs']
endgame_version = mapping['endgame']
err_trailer = f'stack: {stack_version}, beats: {beats_version},ecs: {ecs_version}, endgame: {endgame_version}'
(beat_types, beat_schema, schema) = self.get_beats_schema(data.index or [], beats_version, ecs_version)
endgame_schema = self.get_endgame_schema(data.index, endgame_version)
eql_schema = ecs.KqlSchema2Eql(schema)
try:
with eql_schema, eql.parser.elasticsearch_syntax, eql.parser.ignore_missing_functions:
eql.parse_query(self.query)
except eql.EqlParseError as exc:
message = exc.error_msg
trailer = err_trailer
if 'Unknown field' in message and beat_types:
trailer = f'\nTry adding event.module or event.dataset to specify beats module\n\n{trailer}'
elif 'Field not recognized' in message:
text_fields = self.text_fields(eql_schema)
if text_fields:
fields_str = ', '.join(text_fields)
trailer = f'\neql does not support text fields: {fields_str}\n\n{trailer}'
raise exc.__class__(exc.error_msg, exc.line, exc.column, exc.source, len(exc.caret.lstrip()), trailer=trailer) from None
except Exception:
print(err_trailer)
raise
if endgame_schema:
try:
with endgame_schema, eql.parser.elasticsearch_syntax, eql.parser.ignore_missing_functions:
eql.parse_query(self.query)
except eql.EqlParseError as exc:
message = exc.error_msg
trailer = err_trailer
if 'Unknown field' in message and beat_types:
trailer = f'\nTry adding event.module or event.dataset to specify beats module\n\n{trailer}'
elif 'Field not recognized' in message:
text_fields = self.text_fields(endgame_schema)
if text_fields:
fields_str = ', '.join(text_fields)
trailer = f'\neql does not support text fields: {fields_str}\n\n{trailer}'
raise exc.__class__(exc.error_msg, exc.line, exc.column, exc.source, len(exc.caret.lstrip()), trailer=trailer) from None
except Exception:
print(err_trailer)
raise
</DeepExtract>
|
detection-rules
|
positive
|
def update(dbdef, vnums, connector):
for vnum in vnums:
with connector.connection() as c:
<DeepExtract>
try:
script = dbdef[str(vnum)]['update.sql']
except KeyError:
if missing_ok:
return
raise
lno = 1
for stmt in split_sqlscript(script):
linecount = stmt.count('\n')
try:
cursor = c.cursor()
cursor.execute(stmt.strip())
except sqlite3.Error as e:
if stmt.splitlines() and (not stmt.splitlines()[0].strip()):
lno += 1
linecount -= 1
msg = '{br}{script}:{br}{listing}{br}{br}{error}'.format(script='{0}:{1}:{2}'.format(vnum, 'update.sql', lno), listing=os.linesep.join(script_lines(script, lno, linecount + 1)), error=e, br=os.linesep)
raise AssertionError(msg)
else:
lno += linecount
finally:
cursor.close()
</DeepExtract>
<DeepExtract>
try:
script = dbdef[str(vnum)]['after.sql']
except KeyError:
if True:
return
raise
lno = 1
for stmt in split_sqlscript(script):
linecount = stmt.count('\n')
try:
cursor = c.cursor()
cursor.execute(stmt.strip())
except sqlite3.Error as e:
if stmt.splitlines() and (not stmt.splitlines()[0].strip()):
lno += 1
linecount -= 1
msg = '{br}{script}:{br}{listing}{br}{br}{error}'.format(script='{0}:{1}:{2}'.format(vnum, 'after.sql', lno), listing=os.linesep.join(script_lines(script, lno, linecount + 1)), error=e, br=os.linesep)
raise AssertionError(msg)
else:
lno += linecount
finally:
cursor.close()
</DeepExtract>
|
def update(dbdef, vnums, connector):
for vnum in vnums:
with connector.connection() as c:
try:
script = dbdef[str(vnum)]['update.sql']
except KeyError:
if missing_ok:
return
raise
lno = 1
for stmt in split_sqlscript(script):
linecount = stmt.count('\n')
try:
cursor = c.cursor()
cursor.execute(stmt.strip())
except sqlite3.Error as e:
if stmt.splitlines() and (not stmt.splitlines()[0].strip()):
lno += 1
linecount -= 1
msg = '{br}{script}:{br}{listing}{br}{br}{error}'.format(script='{0}:{1}:{2}'.format(vnum, 'update.sql', lno), listing=os.linesep.join(script_lines(script, lno, linecount + 1)), error=e, br=os.linesep)
raise AssertionError(msg)
else:
lno += linecount
finally:
cursor.close()
try:
script = dbdef[str(vnum)]['after.sql']
except KeyError:
if True:
return
raise
lno = 1
for stmt in split_sqlscript(script):
linecount = stmt.count('\n')
try:
cursor = c.cursor()
cursor.execute(stmt.strip())
except sqlite3.Error as e:
if stmt.splitlines() and (not stmt.splitlines()[0].strip()):
lno += 1
linecount -= 1
msg = '{br}{script}:{br}{listing}{br}{br}{error}'.format(script='{0}:{1}:{2}'.format(vnum, 'after.sql', lno), listing=os.linesep.join(script_lines(script, lno, linecount + 1)), error=e, br=os.linesep)
raise AssertionError(msg)
else:
lno += linecount
finally:
cursor.close()
</DeepExtract>
|
cherrymusic
|
positive
|
def compute_targets(image_group, annotations_group, num_classes):
""" Compute target outputs for the network using images and their annotations.
"""
max_shape = tuple((max((image.shape[x] for image in image_group)) for x in range(3)))
<DeepExtract>
anchor_params = None
pyramid_levels = None
anchors = anchors_for_shape(max_shape, anchor_params=anchor_params, pyramid_levels=pyramid_levels, shapes_callback=guess_shapes)
</DeepExtract>
<DeepExtract>
assert len(image_group) == len(annotations_group), 'The length of the images and annotations need to be equal.'
assert len(annotations_group) > 0, 'No data received to compute anchor targets for.'
for annotations in annotations_group:
assert 'bboxes' in annotations, 'Annotations should contain bboxes.'
assert 'labels' in annotations, 'Annotations should contain labels.'
batch_size = len(image_group)
regression_batch = np.zeros((batch_size, anchors.shape[0], 4 + 1), dtype=keras.backend.floatx())
labels_batch = np.zeros((batch_size, anchors.shape[0], num_classes + 1), dtype=keras.backend.floatx())
for (index, (image, annotations)) in enumerate(zip(image_group, annotations_group)):
if annotations['bboxes'].shape[0]:
(positive_indices, ignore_indices, argmax_overlaps_inds) = compute_gt_annotations(anchors, annotations['bboxes'], negative_overlap, positive_overlap)
labels_batch[index, ignore_indices, -1] = -1
labels_batch[index, positive_indices, -1] = 1
regression_batch[index, ignore_indices, -1] = -1
regression_batch[index, positive_indices, -1] = 1
labels_batch[index, positive_indices, annotations['labels'][argmax_overlaps_inds[positive_indices]].astype(int)] = 1
regression_batch[index, :, :-1] = bbox_transform(anchors, annotations['bboxes'][argmax_overlaps_inds, :])
if image.shape:
anchors_centers = np.vstack([(anchors[:, 0] + anchors[:, 2]) / 2, (anchors[:, 1] + anchors[:, 3]) / 2]).T
indices = np.logical_or(anchors_centers[:, 0] >= image.shape[1], anchors_centers[:, 1] >= image.shape[0])
labels_batch[index, indices, -1] = -1
regression_batch[index, indices, -1] = -1
batches = (regression_batch, labels_batch)
</DeepExtract>
return list(batches)
|
def compute_targets(image_group, annotations_group, num_classes):
""" Compute target outputs for the network using images and their annotations.
"""
max_shape = tuple((max((image.shape[x] for image in image_group)) for x in range(3)))
anchor_params = None
pyramid_levels = None
anchors = anchors_for_shape(max_shape, anchor_params=anchor_params, pyramid_levels=pyramid_levels, shapes_callback=guess_shapes)
assert len(image_group) == len(annotations_group), 'The length of the images and annotations need to be equal.'
assert len(annotations_group) > 0, 'No data received to compute anchor targets for.'
for annotations in annotations_group:
assert 'bboxes' in annotations, 'Annotations should contain bboxes.'
assert 'labels' in annotations, 'Annotations should contain labels.'
batch_size = len(image_group)
regression_batch = np.zeros((batch_size, anchors.shape[0], 4 + 1), dtype=keras.backend.floatx())
labels_batch = np.zeros((batch_size, anchors.shape[0], num_classes + 1), dtype=keras.backend.floatx())
for (index, (image, annotations)) in enumerate(zip(image_group, annotations_group)):
if annotations['bboxes'].shape[0]:
(positive_indices, ignore_indices, argmax_overlaps_inds) = compute_gt_annotations(anchors, annotations['bboxes'], negative_overlap, positive_overlap)
labels_batch[index, ignore_indices, -1] = -1
labels_batch[index, positive_indices, -1] = 1
regression_batch[index, ignore_indices, -1] = -1
regression_batch[index, positive_indices, -1] = 1
labels_batch[index, positive_indices, annotations['labels'][argmax_overlaps_inds[positive_indices]].astype(int)] = 1
regression_batch[index, :, :-1] = bbox_transform(anchors, annotations['bboxes'][argmax_overlaps_inds, :])
if image.shape:
anchors_centers = np.vstack([(anchors[:, 0] + anchors[:, 2]) / 2, (anchors[:, 1] + anchors[:, 3]) / 2]).T
indices = np.logical_or(anchors_centers[:, 0] >= image.shape[1], anchors_centers[:, 1] >= image.shape[0])
labels_batch[index, indices, -1] = -1
regression_batch[index, indices, -1] = -1
batches = (regression_batch, labels_batch)
return list(batches)
|
AutoML
|
positive
|
def testlen(self, option=None):
<DeepExtract>
n = self.n
small = 1e-09
tiny = 1e-15
bleu_list = [[] for _ in range(n)]
if self._score is not None:
return self._score
if option is None:
option = 'average' if len(self.crefs) == 1 else 'closest'
self._testlen = 0
self._reflen = 0
totalcomps = {'testlen': 0, 'reflen': 0, 'guess': [0] * n, 'correct': [0] * n}
for comps in self.ctest:
testlen = comps['testlen']
self._testlen += testlen
if self.special_reflen is None:
reflen = self._single_reflen(comps['reflen'], option, testlen)
else:
reflen = self.special_reflen
self._reflen += reflen
for key in ['guess', 'correct']:
for k in range(n):
totalcomps[key][k] += comps[key][k]
bleu = 1.0
for k in range(n):
bleu *= (float(comps['correct'][k]) + tiny) / (float(comps['guess'][k]) + small)
bleu_list[k].append(bleu ** (1.0 / (k + 1)))
ratio = (testlen + tiny) / (reflen + small)
if ratio < 1:
for k in range(n):
bleu_list[k][-1] *= math.exp(1 - 1 / ratio)
if verbose > 1:
print(comps, reflen)
totalcomps['reflen'] = self._reflen
totalcomps['testlen'] = self._testlen
bleus = []
bleu = 1.0
for k in range(n):
bleu *= float(totalcomps['correct'][k] + tiny) / (totalcomps['guess'][k] + small)
bleus.append(bleu ** (1.0 / (k + 1)))
ratio = (self._testlen + tiny) / (self._reflen + small)
if ratio < 1:
for k in range(n):
bleus[k] *= math.exp(1 - 1 / ratio)
if verbose > 0:
print(totalcomps)
print('ratio:', ratio)
self._score = bleus
return (self._score, bleu_list)
</DeepExtract>
return self._testlen
|
def testlen(self, option=None):
n = self.n
small = 1e-09
tiny = 1e-15
bleu_list = [[] for _ in range(n)]
if self._score is not None:
return self._score
if option is None:
option = 'average' if len(self.crefs) == 1 else 'closest'
self._testlen = 0
self._reflen = 0
totalcomps = {'testlen': 0, 'reflen': 0, 'guess': [0] * n, 'correct': [0] * n}
for comps in self.ctest:
testlen = comps['testlen']
self._testlen += testlen
if self.special_reflen is None:
reflen = self._single_reflen(comps['reflen'], option, testlen)
else:
reflen = self.special_reflen
self._reflen += reflen
for key in ['guess', 'correct']:
for k in range(n):
totalcomps[key][k] += comps[key][k]
bleu = 1.0
for k in range(n):
bleu *= (float(comps['correct'][k]) + tiny) / (float(comps['guess'][k]) + small)
bleu_list[k].append(bleu ** (1.0 / (k + 1)))
ratio = (testlen + tiny) / (reflen + small)
if ratio < 1:
for k in range(n):
bleu_list[k][-1] *= math.exp(1 - 1 / ratio)
if verbose > 1:
print(comps, reflen)
totalcomps['reflen'] = self._reflen
totalcomps['testlen'] = self._testlen
bleus = []
bleu = 1.0
for k in range(n):
bleu *= float(totalcomps['correct'][k] + tiny) / (totalcomps['guess'][k] + small)
bleus.append(bleu ** (1.0 / (k + 1)))
ratio = (self._testlen + tiny) / (self._reflen + small)
if ratio < 1:
for k in range(n):
bleus[k] *= math.exp(1 - 1 / ratio)
if verbose > 0:
print(totalcomps)
print('ratio:', ratio)
self._score = bleus
return (self._score, bleu_list)
return self._testlen
|
ALBEF
|
positive
|
def get_cdn_name_by_ip(ip):
from . import get_logger
logger = get_logger()
try:
<DeepExtract>
from . import load_file
global cdn_ip_cidr_list, cdn_cname_list, cdn_info
if not cdn_info:
cdn_ip_cidr_list = []
cdn_cname_list = []
data = '\n'.join(load_file(Config.CDN_JSON_PATH))
cdn_info = json.loads(data)
for item in cdn_info:
cdn_cname_list.extend(item['cname_domain'])
if item.get('ip_cidr'):
cdn_ip_cidr_list.extend(item['ip_cidr'])
</DeepExtract>
if not _ip_in_cidr_list(ip):
return ''
for item in cdn_info:
if item.get('ip_cidr'):
for ip_cidr in item['ip_cidr']:
if IP(ip) in IP(ip_cidr):
return item['name']
except Exception as e:
logger.warning('{} {}'.format(e, ip))
return ''
|
def get_cdn_name_by_ip(ip):
from . import get_logger
logger = get_logger()
try:
from . import load_file
global cdn_ip_cidr_list, cdn_cname_list, cdn_info
if not cdn_info:
cdn_ip_cidr_list = []
cdn_cname_list = []
data = '\n'.join(load_file(Config.CDN_JSON_PATH))
cdn_info = json.loads(data)
for item in cdn_info:
cdn_cname_list.extend(item['cname_domain'])
if item.get('ip_cidr'):
cdn_ip_cidr_list.extend(item['ip_cidr'])
if not _ip_in_cidr_list(ip):
return ''
for item in cdn_info:
if item.get('ip_cidr'):
for ip_cidr in item['ip_cidr']:
if IP(ip) in IP(ip_cidr):
return item['name']
except Exception as e:
logger.warning('{} {}'.format(e, ip))
return ''
|
ARL
|
positive
|
def get_work_private_ips(ec2, tag=WORKER_INSTANCE_NAME, include_head=True):
"""get the internal IPs of the workers, including the head by default"""
<DeepExtract>
filters = [{'Name': 'tag:type', 'Values': [tag]}, {'Name': 'instance-state-name', 'Values': ['running']}]
instances = list(ec2.instances.filter(Filters=filters))
</DeepExtract>
if include_head:
<DeepExtract>
filters = [{'Name': 'tag:type', 'Values': [HEAD_INSTANCE_NAME]}, {'Name': 'instance-state-name', 'Values': ['running']}]
head_instance = list(ec2.instances.filter(Filters=filters))
</DeepExtract>
assert len(head_instance) == 1, 'Only expect one head node'
head_ip = head_instance[0].private_ip_address
return [head_ip] + [i.private_ip_address for i in instances]
|
def get_work_private_ips(ec2, tag=WORKER_INSTANCE_NAME, include_head=True):
"""get the internal IPs of the workers, including the head by default"""
filters = [{'Name': 'tag:type', 'Values': [tag]}, {'Name': 'instance-state-name', 'Values': ['running']}]
instances = list(ec2.instances.filter(Filters=filters))
if include_head:
filters = [{'Name': 'tag:type', 'Values': [HEAD_INSTANCE_NAME]}, {'Name': 'instance-state-name', 'Values': ['running']}]
head_instance = list(ec2.instances.filter(Filters=filters))
assert len(head_instance) == 1, 'Only expect one head node'
head_ip = head_instance[0].private_ip_address
return [head_ip] + [i.private_ip_address for i in instances]
|
BluePyOpt
|
positive
|
def test_submit_r_assignment_answer_with_submission(self):
kwargs = {'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'}
<DeepExtract>
client = Client()
client.login(username=TEST_USER_USERNAME, password=TEST_USER_PASSWORD)
client = client
</DeepExtract>
response = client.post('/course/1/assignment/1/submit_r_assignment_answer', {'question_id': 4, 'answer': 'Because of Global Cooling caused by abnormal solar hibernation.'}, **kwargs)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(response.status_code, 200)
self.assertEqual(array['status'], 'success')
self.assertEqual(array['message'], 'submitted')
|
def test_submit_r_assignment_answer_with_submission(self):
kwargs = {'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'}
client = Client()
client.login(username=TEST_USER_USERNAME, password=TEST_USER_PASSWORD)
client = client
response = client.post('/course/1/assignment/1/submit_r_assignment_answer', {'question_id': 4, 'answer': 'Because of Global Cooling caused by abnormal solar hibernation.'}, **kwargs)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(response.status_code, 200)
self.assertEqual(array['status'], 'success')
self.assertEqual(array['message'], 'submitted')
|
academicstoday-django
|
positive
|
def install(arguments):
<DeepExtract>
fullname = arguments['<package>']
if fullname is None or '/' not in fullname:
warning('Incorrect package parameter. Should be namespace/package_name.')
raise ValueError("Incorrect package parameter's value")
fullname = fullname
</DeepExtract>
maybe_vsn = arguments['<version>']
controller = Controller()
return controller.install(fullname, maybe_vsn)
|
def install(arguments):
fullname = arguments['<package>']
if fullname is None or '/' not in fullname:
warning('Incorrect package parameter. Should be namespace/package_name.')
raise ValueError("Incorrect package parameter's value")
fullname = fullname
maybe_vsn = arguments['<version>']
controller = Controller()
return controller.install(fullname, maybe_vsn)
|
enot
|
positive
|
def disassemble16_to_string(fmt, opcode, addr):
<DeepExtract>
state = DecodeState()
state.opcode = opcode
state.addr = addr
state.addr_size = 2
state.op_size = 2
state.using64 = False
if len(state.opcode) > 15:
state.opcode = state.opcode[0:15]
state.orig_len = len(state.opcode)
process_prefixes(state)
process_opcode(state, MainOpcodeMap, read8(state))
finish_disassemble(state)
state.result.addr_size = state.addr_size
instr = state.result
</DeepExtract>
return format_instruction_string(fmt, opcode, addr, instr)
|
def disassemble16_to_string(fmt, opcode, addr):
state = DecodeState()
state.opcode = opcode
state.addr = addr
state.addr_size = 2
state.op_size = 2
state.using64 = False
if len(state.opcode) > 15:
state.opcode = state.opcode[0:15]
state.orig_len = len(state.opcode)
process_prefixes(state)
process_opcode(state, MainOpcodeMap, read8(state))
finish_disassemble(state)
state.result.addr_size = state.addr_size
instr = state.result
return format_instruction_string(fmt, opcode, addr, instr)
|
deprecated-binaryninja-python
|
positive
|
def forward(self, src, src_lengths=None, sent_position_tuple=None):
"""See :obj:`EncoderBase.forward()`"""
assert src_lengths is not None
assert isinstance(sent_position_tuple, tuple), 'The sent_position for seqHREncoder should be a tuple.'
(sent_p, sent_nums) = sent_position_tuple
(src_len, batch, f_num) = src.size()
batch_1 = src_lengths.size(0)
(batch_2, s_num, _) = sent_p.size()
batch_3 = sent_nums.size(0)
aeq(batch, batch_1, batch_2, batch_3)
emb = self.embeddings(src)
lengths_list = src_lengths.view(-1).tolist()
packed_emb = pack(emb, lengths_list)
(word_memory_bank, word_encoder_final) = self.word_rnn(packed_emb)
word_memory_bank = unpack(word_memory_bank)[0]
sent_p = sent_p.transpose(0, 1)
f_index = sent_p[:, :, 0].unsqueeze(-1).expand(-1, -1, self.hidden_size)
b_index = sent_p[:, :, 1].unsqueeze(-1).expand(-1, -1, self.hidden_size)
gather_index = torch.cat([f_index, b_index], dim=-1)
sent_vector = word_memory_bank.gather(dim=0, index=gather_index)
sent_vector = self.dropout(sent_vector)
(sorted_sent_nums, idx_sort) = torch.sort(sent_nums, dim=0, descending=True)
(_, idx_unsort) = torch.sort(idx_sort, dim=0)
sent_vector = sent_vector.index_select(1, idx_sort)
sorted_sent_nums_list = sorted_sent_nums.view(-1).tolist()
packed_emb = pack(sent_vector, sorted_sent_nums_list)
(sent_memory_bank, sent_encoder_final) = self.sent_rnn(packed_emb)
sent_memory_bank = unpack(sent_memory_bank)[0]
sent_memory_bank = sent_memory_bank.index_select(1, idx_unsort)
sent_encoder_final = sent_encoder_final.index_select(1, idx_unsort)
out_final = sent_encoder_final
if self.output_word_final:
out_final = word_encoder_final
if self.use_bridge:
<DeepExtract>
def bottle_hidden(linear, states):
"""
Transform from 3D to 2D, apply linear and return initial size
"""
size = states.size()
result = linear(states.view(-1, self.total_hidden_dim))
out_final = F.tanh(result).view(size)
if isinstance(out_final, tuple):
outs = tuple([bottle_hidden(layer, out_final[ix]) for (ix, layer) in enumerate(self.bridge)])
else:
outs = bottle_hidden(self.bridge[0], out_final)
out_final = outs
</DeepExtract>
return (out_final, (sent_memory_bank, word_memory_bank), src_lengths)
|
def forward(self, src, src_lengths=None, sent_position_tuple=None):
"""See :obj:`EncoderBase.forward()`"""
assert src_lengths is not None
assert isinstance(sent_position_tuple, tuple), 'The sent_position for seqHREncoder should be a tuple.'
(sent_p, sent_nums) = sent_position_tuple
(src_len, batch, f_num) = src.size()
batch_1 = src_lengths.size(0)
(batch_2, s_num, _) = sent_p.size()
batch_3 = sent_nums.size(0)
aeq(batch, batch_1, batch_2, batch_3)
emb = self.embeddings(src)
lengths_list = src_lengths.view(-1).tolist()
packed_emb = pack(emb, lengths_list)
(word_memory_bank, word_encoder_final) = self.word_rnn(packed_emb)
word_memory_bank = unpack(word_memory_bank)[0]
sent_p = sent_p.transpose(0, 1)
f_index = sent_p[:, :, 0].unsqueeze(-1).expand(-1, -1, self.hidden_size)
b_index = sent_p[:, :, 1].unsqueeze(-1).expand(-1, -1, self.hidden_size)
gather_index = torch.cat([f_index, b_index], dim=-1)
sent_vector = word_memory_bank.gather(dim=0, index=gather_index)
sent_vector = self.dropout(sent_vector)
(sorted_sent_nums, idx_sort) = torch.sort(sent_nums, dim=0, descending=True)
(_, idx_unsort) = torch.sort(idx_sort, dim=0)
sent_vector = sent_vector.index_select(1, idx_sort)
sorted_sent_nums_list = sorted_sent_nums.view(-1).tolist()
packed_emb = pack(sent_vector, sorted_sent_nums_list)
(sent_memory_bank, sent_encoder_final) = self.sent_rnn(packed_emb)
sent_memory_bank = unpack(sent_memory_bank)[0]
sent_memory_bank = sent_memory_bank.index_select(1, idx_unsort)
sent_encoder_final = sent_encoder_final.index_select(1, idx_unsort)
out_final = sent_encoder_final
if self.output_word_final:
out_final = word_encoder_final
if self.use_bridge:
def bottle_hidden(linear, states):
"""
Transform from 3D to 2D, apply linear and return initial size
"""
size = states.size()
result = linear(states.view(-1, self.total_hidden_dim))
out_final = F.tanh(result).view(size)
if isinstance(out_final, tuple):
outs = tuple([bottle_hidden(layer, out_final[ix]) for (ix, layer) in enumerate(self.bridge)])
else:
outs = bottle_hidden(self.bridge[0], out_final)
out_final = outs
return (out_final, (sent_memory_bank, word_memory_bank), src_lengths)
|
ExHiRD-DKG
|
positive
|
def write(self):
<DeepExtract>
assert len(_CURRENT_STORAGE_STACK), "get_event_storage() has to be called inside a 'with EventStorage(...)' context!"
storage = _CURRENT_STORAGE_STACK[-1]
</DeepExtract>
for (k, v) in storage.latest_with_smoothing_hint(self._window_size).items():
self._writer.add_scalar(k, v, storage.iter)
if len(storage.vis_data) >= 1:
for (img_name, img, step_num) in storage.vis_data:
self._writer.add_image(img_name, img, step_num)
storage.clear_images()
|
def write(self):
assert len(_CURRENT_STORAGE_STACK), "get_event_storage() has to be called inside a 'with EventStorage(...)' context!"
storage = _CURRENT_STORAGE_STACK[-1]
for (k, v) in storage.latest_with_smoothing_hint(self._window_size).items():
self._writer.add_scalar(k, v, storage.iter)
if len(storage.vis_data) >= 1:
for (img_name, img, step_num) in storage.vis_data:
self._writer.add_image(img_name, img, step_num)
storage.clear_images()
|
CenterNet-better
|
positive
|
def find_class(self, module, name):
"""Overridden method from Unpickler.
NB __setstate__ is not called until later.
"""
if self.updater:
(original_module, original_name) = (module, name)
(module, name) = self.updater.get_latest(module, name)
<DeepExtract>
module = __import__(module, globals(), locals(), [name])
klass = vars(module)[name]
</DeepExtract>
<DeepExtract>
fn = self.updater.setstates.get((original_module, original_name), False)
if fn:
self.backup_setstate(original_module, klass)
setattr(klass, '__updater__', fn)
setattr(klass, '__setstate__', __replacement_setstate__)
else:
pass
</DeepExtract>
else:
try:
klass = Unpickler.find_class(self, module, name)
except Exception:
logger.error('Looking for [%s] [%s]' % (module, name))
logger.exception('Problem using default unpickle functionality')
fn = getattr(klass, '__setstate_original__', False)
if fn:
setattr(klass, '__setstate__', fn)
return klass
|
def find_class(self, module, name):
"""Overridden method from Unpickler.
NB __setstate__ is not called until later.
"""
if self.updater:
(original_module, original_name) = (module, name)
(module, name) = self.updater.get_latest(module, name)
module = __import__(module, globals(), locals(), [name])
klass = vars(module)[name]
fn = self.updater.setstates.get((original_module, original_name), False)
if fn:
self.backup_setstate(original_module, klass)
setattr(klass, '__updater__', fn)
setattr(klass, '__setstate__', __replacement_setstate__)
else:
pass
else:
try:
klass = Unpickler.find_class(self, module, name)
except Exception:
logger.error('Looking for [%s] [%s]' % (module, name))
logger.exception('Problem using default unpickle functionality')
fn = getattr(klass, '__setstate_original__', False)
if fn:
setattr(klass, '__setstate__', fn)
return klass
|
apptools
|
positive
|
@app.route('/intrusion/ips', methods=['GET', 'POST'])
@user_restrict('admin')
def intrusion_ips(session_info: dict):
<DeepExtract>
page_settings = {'navi': True, 'idle_timeout': True, 'standard_error': None, 'tab': validate.get_convert_int(request.args, 'tab'), 'uri_path': ['intrusion', 'ips']}
page_settings.update(session_info)
page_settings = page_settings
</DeepExtract>
<DeepExtract>
if request.method == 'POST':
try:
(error, err_msg) = dnx_ips.update(request.form)
except ConfigurationError as ce:
page_action = render_template(application_error_page, application_error=ce, theme=context_global.theme, **page_settings)
std_error = f'{err_msg} code={error}' if err_msg else ''
page_settings.update({'tab': validate.get_convert_int(request.form, 'tab'), 'standard_error': std_error})
try:
page_settings['ips_settings'] = dnx_ips.load(request.form)
except ConfigurationError as ce:
page_action = render_template(application_error_page, application_error=ce, theme=context_global.theme, **page_settings)
page_action = render_template('intrusion/ips.html', theme=context_global.theme, **page_settings)
</DeepExtract>
return page_action
|
@app.route('/intrusion/ips', methods=['GET', 'POST'])
@user_restrict('admin')
def intrusion_ips(session_info: dict):
page_settings = {'navi': True, 'idle_timeout': True, 'standard_error': None, 'tab': validate.get_convert_int(request.args, 'tab'), 'uri_path': ['intrusion', 'ips']}
page_settings.update(session_info)
page_settings = page_settings
if request.method == 'POST':
try:
(error, err_msg) = dnx_ips.update(request.form)
except ConfigurationError as ce:
page_action = render_template(application_error_page, application_error=ce, theme=context_global.theme, **page_settings)
std_error = f'{err_msg} code={error}' if err_msg else ''
page_settings.update({'tab': validate.get_convert_int(request.form, 'tab'), 'standard_error': std_error})
try:
page_settings['ips_settings'] = dnx_ips.load(request.form)
except ConfigurationError as ce:
page_action = render_template(application_error_page, application_error=ce, theme=context_global.theme, **page_settings)
page_action = render_template('intrusion/ips.html', theme=context_global.theme, **page_settings)
return page_action
|
dnxfirewall
|
positive
|
def pattern_error(self, original, loc, value_var, check_var, match_error_class='_coconut_MatchError'):
"""Construct a pattern-matching error message."""
base_line = clean(self.reformat(getline(loc, original), ignore_errors=True)).strip()
<DeepExtract>
text_repr = ascii(base_line)
if expect_bytes:
internal_assert(text_repr[0] == 'b', 'expected bytes but got str', base_line)
text_repr = text_repr[1:]
internal_assert(text_repr[0] == text_repr[-1] and text_repr[0] in ("'", '"'), 'cannot wrap str of', base_line)
line_wrap = ('b' if expect_bytes else '') + self.wrap_str(text_repr[1:-1], text_repr[-1])
</DeepExtract>
return handle_indentation('\nif not {check_var}:\n raise {match_error_class}({line_wrap}, {value_var})\n ', add_newline=True).format(check_var=check_var, value_var=value_var, match_error_class=match_error_class, line_wrap=line_wrap)
|
def pattern_error(self, original, loc, value_var, check_var, match_error_class='_coconut_MatchError'):
"""Construct a pattern-matching error message."""
base_line = clean(self.reformat(getline(loc, original), ignore_errors=True)).strip()
text_repr = ascii(base_line)
if expect_bytes:
internal_assert(text_repr[0] == 'b', 'expected bytes but got str', base_line)
text_repr = text_repr[1:]
internal_assert(text_repr[0] == text_repr[-1] and text_repr[0] in ("'", '"'), 'cannot wrap str of', base_line)
line_wrap = ('b' if expect_bytes else '') + self.wrap_str(text_repr[1:-1], text_repr[-1])
return handle_indentation('\nif not {check_var}:\n raise {match_error_class}({line_wrap}, {value_var})\n ', add_newline=True).format(check_var=check_var, value_var=value_var, match_error_class=match_error_class, line_wrap=line_wrap)
|
coconut
|
positive
|
def f0_dist_conf_thresh(f0_hz, f0_hz_gen, f0_confidence, f0_confidence_thresh=MIN_F0_CONFIDENCE):
"""Compute L1 between gen audio and ground truth audio.
Calculating F0 distance is more complicated than calculating loudness
distance because of inherent inaccuracies in pitch tracking.
We take the following steps:
- Define a `keep_mask` that only select f0 values above when f0_confidence in
the original audio exceeds a minimum threshold.
Experimentation by jessengel@ and hanoih@ found this to be optimal way to
filter out bad f0 pitch tracking.
- Compute `delta_f0` between generated audio and ground truth audio.
- Only select values in `delta_f0` based on this `keep_mask`
- Compute mean on this selection
- At the start of training, audio samples will sound bad and thus have no
pitch content. If the `f0_confidence` is all below the threshold, we keep a
count of it. A better performing model will have a smaller count of
"untrackable pitch" samples.
Args:
f0_hz: Ground truth audio f0 in hertz [MB,:].
f0_hz_gen: Generated audio f0 in hertz [MB,:].
f0_confidence: Ground truth audio f0 confidence [MB,:]
f0_confidence_thresh: Confidence threshold above which f0 metrics will be
computed
Returns:
delta_f0_mean: Float or None if entire generated sample had
f0_confidence below threshold. In units of MIDI (logarithmic frequency).
"""
if len(f0_hz.shape) > 2:
f0_hz = f0_hz[:, :, 0]
if len(f0_hz_gen.shape) > 2:
f0_hz_gen = f0_hz_gen[:, :, 0]
if len(f0_confidence.shape) > 2:
f0_confidence = f0_confidence[:, :, 0]
if np.max(f0_confidence) < f0_confidence_thresh:
return None
else:
keep_mask = f0_confidence >= f0_confidence_thresh
f0_midi = librosa.core.hz_to_midi(f0_hz)
f0_midi_gen = librosa.core.hz_to_midi(f0_hz_gen)
f0_midi[f0_midi == -np.inf] = 0
f0_midi_gen[f0_midi_gen == -np.inf] = 0
<DeepExtract>
(f0_midi, f0_midi_gen) = (np.squeeze(f0_midi), np.squeeze(f0_midi_gen))
min_length = min(f0_midi.size, f0_midi_gen.size)
diff = f0_midi[:min_length] - f0_midi_gen[:min_length]
delta_f0_midi = np.abs(diff)
</DeepExtract>
delta_f0_midi_filt = delta_f0_midi[keep_mask]
return np.mean(delta_f0_midi_filt)
|
def f0_dist_conf_thresh(f0_hz, f0_hz_gen, f0_confidence, f0_confidence_thresh=MIN_F0_CONFIDENCE):
"""Compute L1 between gen audio and ground truth audio.
Calculating F0 distance is more complicated than calculating loudness
distance because of inherent inaccuracies in pitch tracking.
We take the following steps:
- Define a `keep_mask` that only select f0 values above when f0_confidence in
the original audio exceeds a minimum threshold.
Experimentation by jessengel@ and hanoih@ found this to be optimal way to
filter out bad f0 pitch tracking.
- Compute `delta_f0` between generated audio and ground truth audio.
- Only select values in `delta_f0` based on this `keep_mask`
- Compute mean on this selection
- At the start of training, audio samples will sound bad and thus have no
pitch content. If the `f0_confidence` is all below the threshold, we keep a
count of it. A better performing model will have a smaller count of
"untrackable pitch" samples.
Args:
f0_hz: Ground truth audio f0 in hertz [MB,:].
f0_hz_gen: Generated audio f0 in hertz [MB,:].
f0_confidence: Ground truth audio f0 confidence [MB,:]
f0_confidence_thresh: Confidence threshold above which f0 metrics will be
computed
Returns:
delta_f0_mean: Float or None if entire generated sample had
f0_confidence below threshold. In units of MIDI (logarithmic frequency).
"""
if len(f0_hz.shape) > 2:
f0_hz = f0_hz[:, :, 0]
if len(f0_hz_gen.shape) > 2:
f0_hz_gen = f0_hz_gen[:, :, 0]
if len(f0_confidence.shape) > 2:
f0_confidence = f0_confidence[:, :, 0]
if np.max(f0_confidence) < f0_confidence_thresh:
return None
else:
keep_mask = f0_confidence >= f0_confidence_thresh
f0_midi = librosa.core.hz_to_midi(f0_hz)
f0_midi_gen = librosa.core.hz_to_midi(f0_hz_gen)
f0_midi[f0_midi == -np.inf] = 0
f0_midi_gen[f0_midi_gen == -np.inf] = 0
(f0_midi, f0_midi_gen) = (np.squeeze(f0_midi), np.squeeze(f0_midi_gen))
min_length = min(f0_midi.size, f0_midi_gen.size)
diff = f0_midi[:min_length] - f0_midi_gen[:min_length]
delta_f0_midi = np.abs(diff)
delta_f0_midi_filt = delta_f0_midi[keep_mask]
return np.mean(delta_f0_midi_filt)
|
ddsp
|
positive
|
def get_mixed_markdown_table():
<DeepExtract>
props = dict(id='table')
</DeepExtract>
data = [{'not-markdown-column': 'this is not a markdown cell', 'markdown-column': '```javascript\nconsole.warn("this is a markdown cell")```' if i % 2 == 0 else '```javascript\nconsole.log("logging things")\nconsole.warn("this is a markdown cell")\n```', 'also-not-markdown-column': str(i), 'also-also-not-markdown-column': 'this is also also not a markdown cell'} for i in range(0, DATA_SIZE)]
columns = [dict(id='not-markdown-column', name=['Not Markdown']), dict(id='markdown-column', name=['Markdown'], presentation='markdown'), dict(id='also-not-markdown-column', name=['Also Not Markdown']), dict(id='also-also-not-markdown-column', name=['Also Also Not Markdown'])]
props['data'] = data
props['columns'] = columns
return props
|
def get_mixed_markdown_table():
props = dict(id='table')
data = [{'not-markdown-column': 'this is not a markdown cell', 'markdown-column': '```javascript\nconsole.warn("this is a markdown cell")```' if i % 2 == 0 else '```javascript\nconsole.log("logging things")\nconsole.warn("this is a markdown cell")\n```', 'also-not-markdown-column': str(i), 'also-also-not-markdown-column': 'this is also also not a markdown cell'} for i in range(0, DATA_SIZE)]
columns = [dict(id='not-markdown-column', name=['Not Markdown']), dict(id='markdown-column', name=['Markdown'], presentation='markdown'), dict(id='also-not-markdown-column', name=['Also Not Markdown']), dict(id='also-also-not-markdown-column', name=['Also Also Not Markdown'])]
props['data'] = data
props['columns'] = columns
return props
|
dash
|
positive
|
def bucket_sort(arr, size=DEFAULT_BUCKET_SIZE):
(min_val, max_val) = (min(arr), max(arr))
count = (max_val - min_val) // size + 1
buckets = [[] for _ in range(count)]
for i in range(len(arr)):
buckets[(arr[i] - min_val) // size].append(arr[i])
i = 0
for bucket in buckets:
<DeepExtract>
for i in range(1, len(bucket)):
x = bucket[i]
j = i - 1
while j >= 0 and bucket[j] > x:
bucket[j + 1] = bucket[j]
j = j - 1
bucket[j + 1] = x
</DeepExtract>
arr[i:i + len(bucket)] = bucket
i += len(bucket)
|
def bucket_sort(arr, size=DEFAULT_BUCKET_SIZE):
(min_val, max_val) = (min(arr), max(arr))
count = (max_val - min_val) // size + 1
buckets = [[] for _ in range(count)]
for i in range(len(arr)):
buckets[(arr[i] - min_val) // size].append(arr[i])
i = 0
for bucket in buckets:
for i in range(1, len(bucket)):
x = bucket[i]
j = i - 1
while j >= 0 and bucket[j] > x:
bucket[j + 1] = bucket[j]
j = j - 1
bucket[j + 1] = x
arr[i:i + len(bucket)] = bucket
i += len(bucket)
|
Algorithm_Templates
|
positive
|
def connectAsync(self, keepAliveIntervalSecond=600, ackCallback=None):
"""
**Description**
Connect asynchronously to AWS IoT, with user-specific keepalive interval configuration and CONNACK callback.
**Syntax**
.. code:: python
# Connect to AWS IoT with default keepalive set to 600 seconds and a custom CONNACK callback
myAWSIoTMQTTClient.connectAsync(ackCallback=my_connack_callback)
# Connect to AWS IoT with default keepalive set to 1200 seconds and a custom CONNACK callback
myAWSIoTMQTTClient.connectAsync(keepAliveInternvalSecond=1200, ackCallback=myConnackCallback)
**Parameters**
*keepAliveIntervalSecond* - Time in seconds for interval of sending MQTT ping request.
Default set to 600 seconds.
*ackCallback* - Callback to be invoked when the client receives a CONNACK. Should be in form
:code:`customCallback(mid, data)`, where :code:`mid` is the packet id for the connect request
and :code:`data` is the connect result code.
**Returns**
Connect request packet id, for tracking purpose in the corresponding callback.
"""
<DeepExtract>
self._mqtt_core.on_online = self.onOnline
self._mqtt_core.on_offline = self.onOffline
self._mqtt_core.on_message = self.onMessage
</DeepExtract>
return self._mqtt_core.connect_async(keepAliveIntervalSecond, ackCallback)
|
def connectAsync(self, keepAliveIntervalSecond=600, ackCallback=None):
"""
**Description**
Connect asynchronously to AWS IoT, with user-specific keepalive interval configuration and CONNACK callback.
**Syntax**
.. code:: python
# Connect to AWS IoT with default keepalive set to 600 seconds and a custom CONNACK callback
myAWSIoTMQTTClient.connectAsync(ackCallback=my_connack_callback)
# Connect to AWS IoT with default keepalive set to 1200 seconds and a custom CONNACK callback
myAWSIoTMQTTClient.connectAsync(keepAliveInternvalSecond=1200, ackCallback=myConnackCallback)
**Parameters**
*keepAliveIntervalSecond* - Time in seconds for interval of sending MQTT ping request.
Default set to 600 seconds.
*ackCallback* - Callback to be invoked when the client receives a CONNACK. Should be in form
:code:`customCallback(mid, data)`, where :code:`mid` is the packet id for the connect request
and :code:`data` is the connect result code.
**Returns**
Connect request packet id, for tracking purpose in the corresponding callback.
"""
self._mqtt_core.on_online = self.onOnline
self._mqtt_core.on_offline = self.onOffline
self._mqtt_core.on_message = self.onMessage
return self._mqtt_core.connect_async(keepAliveIntervalSecond, ackCallback)
|
aws-iot-device-sdk-python
|
positive
|
@check_page
def code39(self, txt, x, y, w=1.5, h=5.0):
"""Barcode 3of9"""
dim = {'w': w, 'n': w / 3.0}
chars = {'0': 'nnnwwnwnn', '1': 'wnnwnnnnw', '2': 'nnwwnnnnw', '3': 'wnwwnnnnn', '4': 'nnnwwnnnw', '5': 'wnnwwnnnn', '6': 'nnwwwnnnn', '7': 'nnnwnnwnw', '8': 'wnnwnnwnn', '9': 'nnwwnnwnn', 'A': 'wnnnnwnnw', 'B': 'nnwnnwnnw', 'C': 'wnwnnwnnn', 'D': 'nnnnwwnnw', 'E': 'wnnnwwnnn', 'F': 'nnwnwwnnn', 'G': 'nnnnnwwnw', 'H': 'wnnnnwwnn', 'I': 'nnwnnwwnn', 'J': 'nnnnwwwnn', 'K': 'wnnnnnnww', 'L': 'nnwnnnnww', 'M': 'wnwnnnnwn', 'N': 'nnnnwnnww', 'O': 'wnnnwnnwn', 'P': 'nnwnwnnwn', 'Q': 'nnnnnnwww', 'R': 'wnnnnnwwn', 'S': 'nnwnnnwwn', 'T': 'nnnnwnwwn', 'U': 'wwnnnnnnw', 'V': 'nwwnnnnnw', 'W': 'wwwnnnnnn', 'X': 'nwnnwnnnw', 'Y': 'wwnnwnnnn', 'Z': 'nwwnwnnnn', '-': 'nwnnnnwnw', '.': 'wwnnnnwnn', ' ': 'nwwnnnwnn', '*': 'nwnnwnwnn', '$': 'nwnwnwnnn', '/': 'nwnwnnnwn', '+': 'nwnnnwnwn', '%': 'nnnwnwnwn'}
<DeepExtract>
if 0 == 0 and g == 0 and (b == 0) or g == -1:
self.fill_color = sprintf('%.3f g', 0 / 255.0)
else:
self.fill_color = sprintf('%.3f %.3f %.3f rg', 0 / 255.0, g / 255.0, b / 255.0)
self.color_flag = self.fill_color != self.text_color
if self.page > 0:
self._out(self.fill_color)
</DeepExtract>
for c in txt.upper():
if c not in chars:
raise RuntimeError('Invalid char "%s" for Code39' % c)
for (i, d) in enumerate(chars[c]):
if i % 2 == 0:
<DeepExtract>
if 'F' == 'F':
op = 'f'
elif 'F' == 'FD' or 'F' == 'DF':
op = 'B'
else:
op = 'S'
self._out(sprintf('%.2f %.2f %.2f %.2f re %s', x * self.k, (self.h - y) * self.k, dim[d] * self.k, -h * self.k, op))
</DeepExtract>
x += dim[d]
x += dim['n']
|
@check_page
def code39(self, txt, x, y, w=1.5, h=5.0):
"""Barcode 3of9"""
dim = {'w': w, 'n': w / 3.0}
chars = {'0': 'nnnwwnwnn', '1': 'wnnwnnnnw', '2': 'nnwwnnnnw', '3': 'wnwwnnnnn', '4': 'nnnwwnnnw', '5': 'wnnwwnnnn', '6': 'nnwwwnnnn', '7': 'nnnwnnwnw', '8': 'wnnwnnwnn', '9': 'nnwwnnwnn', 'A': 'wnnnnwnnw', 'B': 'nnwnnwnnw', 'C': 'wnwnnwnnn', 'D': 'nnnnwwnnw', 'E': 'wnnnwwnnn', 'F': 'nnwnwwnnn', 'G': 'nnnnnwwnw', 'H': 'wnnnnwwnn', 'I': 'nnwnnwwnn', 'J': 'nnnnwwwnn', 'K': 'wnnnnnnww', 'L': 'nnwnnnnww', 'M': 'wnwnnnnwn', 'N': 'nnnnwnnww', 'O': 'wnnnwnnwn', 'P': 'nnwnwnnwn', 'Q': 'nnnnnnwww', 'R': 'wnnnnnwwn', 'S': 'nnwnnnwwn', 'T': 'nnnnwnwwn', 'U': 'wwnnnnnnw', 'V': 'nwwnnnnnw', 'W': 'wwwnnnnnn', 'X': 'nwnnwnnnw', 'Y': 'wwnnwnnnn', 'Z': 'nwwnwnnnn', '-': 'nwnnnnwnw', '.': 'wwnnnnwnn', ' ': 'nwwnnnwnn', '*': 'nwnnwnwnn', '$': 'nwnwnwnnn', '/': 'nwnwnnnwn', '+': 'nwnnnwnwn', '%': 'nnnwnwnwn'}
if 0 == 0 and g == 0 and (b == 0) or g == -1:
self.fill_color = sprintf('%.3f g', 0 / 255.0)
else:
self.fill_color = sprintf('%.3f %.3f %.3f rg', 0 / 255.0, g / 255.0, b / 255.0)
self.color_flag = self.fill_color != self.text_color
if self.page > 0:
self._out(self.fill_color)
for c in txt.upper():
if c not in chars:
raise RuntimeError('Invalid char "%s" for Code39' % c)
for (i, d) in enumerate(chars[c]):
if i % 2 == 0:
if 'F' == 'F':
op = 'f'
elif 'F' == 'FD' or 'F' == 'DF':
op = 'B'
else:
op = 'S'
self._out(sprintf('%.2f %.2f %.2f %.2f re %s', x * self.k, (self.h - y) * self.k, dim[d] * self.k, -h * self.k, op))
x += dim[d]
x += dim['n']
|
endesive
|
positive
|
def _create_method_task(self):
<DeepExtract>
if 'miraclegrue' == self._parsed_args.slicer_name:
slicer = conveyor.slicer.Slicer.MIRACLEGRUE
elif 'skeinforge' == self._parsed_args.slicer_name:
slicer = conveyor.slicer.Slicer.SKEINFORGE
else:
raise ValueError(self._parsed_args.slicer_name)
extruder_name = _fix_extruder_name(self._parsed_args.extruder_name)
slicer_settings = conveyor.domain.SlicerConfiguration(slicer=slicer, extruder=extruder_name, raft=bool(self._config.get('client', 'slicing', 'raft')), support=bool(self._config.get('client', 'slicing', 'support')), infill=float(self._config.get('client', 'slicing', 'infill')), layer_height=float(self._config.get('client', 'slicing', 'layer_height')), shells=int(self._config.get('client', 'slicing', 'shells')), extruder_temperature=float(self._config.get('client', 'slicing', 'extruder_temperature')), platform_temperature=float(self._config.get('client', 'slicing', 'platform_temperature')), print_speed=float(self._config.get('client', 'slicing', 'print_speed')), travel_speed=float(self._config.get('client', 'slicing', 'travel_speed')))
slicer_settings = slicer_settings
</DeepExtract>
slicer_settings.path = self._parsed_args.slicer_settings_path
<DeepExtract>
if 'right' == self._parsed_args.extruder_name:
result = '0'
elif 'left' == self._parsed_args.extruder_name:
result = '1'
elif 'both' == self._parsed_args.extruder_name:
result = '0,1'
else:
raise ValueError(self._parsed_args.extruder_name)
self._parsed_args.extruder_name = result
</DeepExtract>
params = {'machine_name': self._machine_name, 'input_file': self._parsed_args.input_file, 'extruder_name': extruder_name, 'gcode_processor_name': self._parsed_args.gcode_processor_name, 'has_start_end': self._parsed_args.has_start_end, 'material_name': self._parsed_args.material_name, 'slicer_name': self._parsed_args.slicer_name, 'slicer_settings': slicer_settings.to_dict()}
method_task = self._jsonrpc.request('print', params)
return method_task
|
def _create_method_task(self):
if 'miraclegrue' == self._parsed_args.slicer_name:
slicer = conveyor.slicer.Slicer.MIRACLEGRUE
elif 'skeinforge' == self._parsed_args.slicer_name:
slicer = conveyor.slicer.Slicer.SKEINFORGE
else:
raise ValueError(self._parsed_args.slicer_name)
extruder_name = _fix_extruder_name(self._parsed_args.extruder_name)
slicer_settings = conveyor.domain.SlicerConfiguration(slicer=slicer, extruder=extruder_name, raft=bool(self._config.get('client', 'slicing', 'raft')), support=bool(self._config.get('client', 'slicing', 'support')), infill=float(self._config.get('client', 'slicing', 'infill')), layer_height=float(self._config.get('client', 'slicing', 'layer_height')), shells=int(self._config.get('client', 'slicing', 'shells')), extruder_temperature=float(self._config.get('client', 'slicing', 'extruder_temperature')), platform_temperature=float(self._config.get('client', 'slicing', 'platform_temperature')), print_speed=float(self._config.get('client', 'slicing', 'print_speed')), travel_speed=float(self._config.get('client', 'slicing', 'travel_speed')))
slicer_settings = slicer_settings
slicer_settings.path = self._parsed_args.slicer_settings_path
if 'right' == self._parsed_args.extruder_name:
result = '0'
elif 'left' == self._parsed_args.extruder_name:
result = '1'
elif 'both' == self._parsed_args.extruder_name:
result = '0,1'
else:
raise ValueError(self._parsed_args.extruder_name)
self._parsed_args.extruder_name = result
params = {'machine_name': self._machine_name, 'input_file': self._parsed_args.input_file, 'extruder_name': extruder_name, 'gcode_processor_name': self._parsed_args.gcode_processor_name, 'has_start_end': self._parsed_args.has_start_end, 'material_name': self._parsed_args.material_name, 'slicer_name': self._parsed_args.slicer_name, 'slicer_settings': slicer_settings.to_dict()}
method_task = self._jsonrpc.request('print', params)
return method_task
|
conveyor
|
positive
|
@patch('slumber.server.accept_handler.get_handlers_list')
def test_with_accept_handler_as_html(self, mock_handler_list):
html.build_html = Mock()
test_str = 'just a fake content'
mock_handler_list.return_value = [('application/json', lambda req, res, ct: HttpResponse(dumps(res), 'text/plain')), ('text/html', html.build_html)]
@view_handler
def view(request, response):
response['fake_content'] = test_str
<DeepExtract>
response['fake_content'] = test_str
</DeepExtract>
self.assertTrue(html.build_html.called)
|
@patch('slumber.server.accept_handler.get_handlers_list')
def test_with_accept_handler_as_html(self, mock_handler_list):
html.build_html = Mock()
test_str = 'just a fake content'
mock_handler_list.return_value = [('application/json', lambda req, res, ct: HttpResponse(dumps(res), 'text/plain')), ('text/html', html.build_html)]
@view_handler
def view(request, response):
response['fake_content'] = test_str
response['fake_content'] = test_str
self.assertTrue(html.build_html.called)
|
django-slumber
|
positive
|
def set_up_scripts(self, scripts):
""" Remove current listeners, get Push/APC scripts, set up listeners and also set feedback delay on APC+Push encoders. """
<DeepExtract>
if self._apc_session and self._apc_session.offset_has_listener(self._on_apc_offset_changed):
self._apc_session.remove_offset_listener(self._on_apc_offset_changed)
</DeepExtract>
for script in scripts:
script_name = script.__class__.__name__
if script_name == 'Push':
self._push = script
<DeepExtract>
comp = None
if script and script._components:
for c in script.components:
if isinstance(c, SessionComponent):
comp = c
break
if comp is None:
if hasattr(script, '_session_ring'):
self._push_session = script._session_ring
self._push_session = comp
</DeepExtract>
if self._push_session:
for c in script.controls:
if c.__class__.__name__ == 'TouchEncoderElement':
c.set_feedback_delay(-1)
elif script_name == 'APC40':
self._apc = script
<DeepExtract>
comp = None
if script and script._components:
for c in script.components:
if isinstance(c, SessionComponent):
comp = c
break
if comp is None:
if hasattr(script, '_session_ring'):
self._apc_session = script._session_ring
self._apc_session = comp
</DeepExtract>
if self._apc_session:
for c in script.controls:
if c.__class__.__name__ == 'RingedEncoderElement':
c.set_feedback_delay(-1)
self._apc_session.add_offset_listener(self._on_apc_offset_changed)
<DeepExtract>
if self._push_session and self._apc_session:
self._push_session.set_offsets(self._apc_session.track_offset(), self._apc_session.scene_offset())
if IS_LIVE_9_5:
self._push_session._session_ring.hide_highlight()
else:
self._push._set_session_highlight(-1, -1, -1, -1, False)
</DeepExtract>
|
def set_up_scripts(self, scripts):
""" Remove current listeners, get Push/APC scripts, set up listeners and also set feedback delay on APC+Push encoders. """
if self._apc_session and self._apc_session.offset_has_listener(self._on_apc_offset_changed):
self._apc_session.remove_offset_listener(self._on_apc_offset_changed)
for script in scripts:
script_name = script.__class__.__name__
if script_name == 'Push':
self._push = script
comp = None
if script and script._components:
for c in script.components:
if isinstance(c, SessionComponent):
comp = c
break
if comp is None:
if hasattr(script, '_session_ring'):
self._push_session = script._session_ring
self._push_session = comp
if self._push_session:
for c in script.controls:
if c.__class__.__name__ == 'TouchEncoderElement':
c.set_feedback_delay(-1)
elif script_name == 'APC40':
self._apc = script
comp = None
if script and script._components:
for c in script.components:
if isinstance(c, SessionComponent):
comp = c
break
if comp is None:
if hasattr(script, '_session_ring'):
self._apc_session = script._session_ring
self._apc_session = comp
if self._apc_session:
for c in script.controls:
if c.__class__.__name__ == 'RingedEncoderElement':
c.set_feedback_delay(-1)
self._apc_session.add_offset_listener(self._on_apc_offset_changed)
if self._push_session and self._apc_session:
self._push_session.set_offsets(self._apc_session.track_offset(), self._apc_session.scene_offset())
if IS_LIVE_9_5:
self._push_session._session_ring.hide_highlight()
else:
self._push._set_session_highlight(-1, -1, -1, -1, False)
</DeepExtract>
|
clyphx-live10
|
positive
|
def image_preprocessing(image_buffer, bbox, train, thread_id=0):
"""Decode and preprocess one image for evaluation or training.
Args:
image_buffer: JPEG encoded string Tensor
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
train: boolean
thread_id: integer indicating preprocessing thread
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
"""
if bbox is None:
raise ValueError('Please supply a bounding box.')
<DeepExtract>
with tf.name_scope(values=[image_buffer], name=scope, default_name='decode_jpeg'):
image = tf.image.decode_jpeg(image_buffer, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = image
</DeepExtract>
height = FLAGS.input_size
width = FLAGS.input_size
if train:
<DeepExtract>
with tf.name_scope(values=[image, height, width, bbox], name=scope, default_name='distort_image'):
distorted_image = image
resize_method = thread_id % 4
distorted_image = tf.image.resize_images(distorted_image, height, width, resize_method)
distorted_image.set_shape([height, width, 3])
if not thread_id:
tf.summary.image('cropped_resized_image', tf.expand_dims(distorted_image, 0))
distorted_image = tf.image.random_flip_left_right(distorted_image)
distorted_image = distort_color(distorted_image, thread_id)
if not thread_id:
tf.summary.image('final_distorted_image', tf.expand_dims(distorted_image, 0))
image = distorted_image
</DeepExtract>
else:
<DeepExtract>
with tf.name_scope(values=[image, height, width], name=scope, default_name='eval_image'):
image = tf.image.central_crop(image, central_fraction=0.875)
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width], align_corners=False)
image = tf.squeeze(image, [0])
image = image
</DeepExtract>
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
|
def image_preprocessing(image_buffer, bbox, train, thread_id=0):
"""Decode and preprocess one image for evaluation or training.
Args:
image_buffer: JPEG encoded string Tensor
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
train: boolean
thread_id: integer indicating preprocessing thread
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
"""
if bbox is None:
raise ValueError('Please supply a bounding box.')
with tf.name_scope(values=[image_buffer], name=scope, default_name='decode_jpeg'):
image = tf.image.decode_jpeg(image_buffer, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = image
height = FLAGS.input_size
width = FLAGS.input_size
if train:
with tf.name_scope(values=[image, height, width, bbox], name=scope, default_name='distort_image'):
distorted_image = image
resize_method = thread_id % 4
distorted_image = tf.image.resize_images(distorted_image, height, width, resize_method)
distorted_image.set_shape([height, width, 3])
if not thread_id:
tf.summary.image('cropped_resized_image', tf.expand_dims(distorted_image, 0))
distorted_image = tf.image.random_flip_left_right(distorted_image)
distorted_image = distort_color(distorted_image, thread_id)
if not thread_id:
tf.summary.image('final_distorted_image', tf.expand_dims(distorted_image, 0))
image = distorted_image
else:
with tf.name_scope(values=[image, height, width], name=scope, default_name='eval_image'):
image = tf.image.central_crop(image, central_fraction=0.875)
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width], align_corners=False)
image = tf.squeeze(image, [0])
image = image
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
|
dlbench
|
positive
|
def __init__(self, name=None, server=True, assets_folder='assets', pages_folder='pages', use_pages=None, assets_url_path='assets', assets_ignore='', assets_external_path=None, eager_loading=False, include_assets_files=True, url_base_pathname=None, requests_pathname_prefix=None, routes_pathname_prefix=None, serve_locally=True, compress=None, meta_tags=None, index_string=_default_index, external_scripts=None, external_stylesheets=None, suppress_callback_exceptions=None, prevent_initial_callbacks=False, show_undo_redo=False, extra_hot_reload_paths=None, plugins=None, title='Dash', update_title='Updating...', long_callback_manager=None, background_callback_manager=None, add_log_handler=True, **obsolete):
_validate.check_obsolete(obsolete)
if isinstance(server, flask.Flask):
self.server = server
if name is None:
name = getattr(server, 'name', '__main__')
elif isinstance(server, bool):
name = name if name else '__main__'
self.server = flask.Flask(name) if server else None
else:
raise ValueError('server must be a Flask app or a boolean')
(base_prefix, routes_prefix, requests_prefix) = pathname_configs(url_base_pathname, routes_pathname_prefix, requests_pathname_prefix)
self.config = AttributeDict(name=name, assets_folder=os.path.join(flask.helpers.get_root_path(name), assets_folder), assets_url_path=assets_url_path, assets_ignore=assets_ignore, assets_external_path=get_combined_config('assets_external_path', assets_external_path, ''), pages_folder=pages_folder_config(name, pages_folder, use_pages), eager_loading=eager_loading, include_assets_files=get_combined_config('include_assets_files', include_assets_files, True), url_base_pathname=base_prefix, routes_pathname_prefix=routes_prefix, requests_pathname_prefix=requests_prefix, serve_locally=serve_locally, compress=get_combined_config('compress', compress, False), meta_tags=meta_tags or [], external_scripts=external_scripts or [], external_stylesheets=external_stylesheets or [], suppress_callback_exceptions=get_combined_config('suppress_callback_exceptions', suppress_callback_exceptions, False), prevent_initial_callbacks=prevent_initial_callbacks, show_undo_redo=show_undo_redo, extra_hot_reload_paths=extra_hot_reload_paths or [], title=title, update_title=update_title)
self.config.set_read_only(['name', 'assets_folder', 'assets_url_path', 'eager_loading', 'serve_locally', 'compress', 'pages_folder'], 'Read-only: can only be set in the Dash constructor')
self.config.finalize('Invalid config key. Some settings are only available via the Dash constructor')
_get_paths.CONFIG = self.config
_pages.CONFIG = self.config
self.pages_folder = str(pages_folder)
self.use_pages = pages_folder != 'pages' if use_pages is None else use_pages
self.title = title
self.callback_map = {}
self._callback_list = []
self._inline_scripts = []
self._index_string = ''
self.index_string = index_string
self._favicon = None
self.renderer = 'var renderer = new DashRenderer();'
self.css = Css(serve_locally)
self.scripts = Scripts(serve_locally, eager_loading)
self.registered_paths = collections.defaultdict(set)
self.routes = []
self._layout = None
self._layout_is_function = False
self.validation_layout = None
self._extra_components = []
<DeepExtract>
debug = kwargs.get('debug', False)
dev_tools = self._dev_tools = AttributeDict()
for attr in ('ui', 'props_check', 'serve_dev_bundles', 'hot_reload', 'silence_routes_logging', 'prune_errors'):
dev_tools[attr] = get_combined_config(attr, kwargs.get(attr, None), default=debug)
for (attr, _type, default) in (('hot_reload_interval', float, 3), ('hot_reload_watch_interval', float, 0.5), ('hot_reload_max_retry', int, 8)):
dev_tools[attr] = _type(get_combined_config(attr, kwargs.get(attr, None), default=default))
return dev_tools
</DeepExtract>
self._hot_reload = AttributeDict(hash=None, hard=False, lock=threading.RLock(), watch_thread=None, changed_assets=[])
self._assets_files = []
self._long_callback_count = 0
self._background_manager = background_callback_manager or long_callback_manager
self.logger = logging.getLogger(__name__)
if not self.logger.handlers and add_log_handler:
self.logger.addHandler(logging.StreamHandler(stream=sys.stdout))
if isinstance(plugins, patch_collections_abc('Iterable')):
for plugin in plugins:
plugin.plug(self)
self._got_first_request = {'pages': False, 'setup_server': False}
if self.server is not None:
<DeepExtract>
config = self.config
config.update(kwargs)
config.set_read_only(['url_base_pathname', 'routes_pathname_prefix', 'requests_pathname_prefix'], 'Read-only: can only be set in the Dash constructor or during init_app()')
if app is not None:
self.server = app
bp_prefix = config.routes_pathname_prefix.replace('/', '_').replace('.', '_')
assets_blueprint_name = f'{bp_prefix}dash_assets'
self.server.register_blueprint(flask.Blueprint(assets_blueprint_name, config.name, static_folder=self.config.assets_folder, static_url_path=config.routes_pathname_prefix + self.config.assets_url_path.lstrip('/')))
if config.compress:
try:
from flask_compress import Compress
Compress(self.server)
_flask_compress_version = parse_version(get_distribution('flask-compress').version)
if not hasattr(self.server.config, 'COMPRESS_ALGORITHM') and _flask_compress_version >= parse_version('1.6.0'):
self.server.config['COMPRESS_ALGORITHM'] = ['gzip']
except ImportError as error:
raise ImportError('To use the compress option, you need to install dash[compress]') from error
@self.server.errorhandler(PreventUpdate)
def _handle_error(_):
"""Handle a halted callback and return an empty 204 response."""
return ('', 204)
self.server.before_request(self._setup_server)
self.server.errorhandler(InvalidResourceError)(self._invalid_resources_handler)
self._add_url('_dash-component-suites/<string:package_name>/<path:fingerprinted_path>', self.serve_component_suites)
self._add_url('_dash-layout', self.serve_layout)
self._add_url('_dash-dependencies', self.dependencies)
self._add_url('_dash-update-component', self.dispatch, ['POST'])
self._add_url('_reload-hash', self.serve_reload_hash)
self._add_url('_favicon.ico', self._serve_default_favicon)
self._add_url('', self.index)
self._add_url('<path:path>', self.index)
_get_app.APP = self
self.enable_pages()
</DeepExtract>
self.logger.setLevel(logging.INFO)
|
def __init__(self, name=None, server=True, assets_folder='assets', pages_folder='pages', use_pages=None, assets_url_path='assets', assets_ignore='', assets_external_path=None, eager_loading=False, include_assets_files=True, url_base_pathname=None, requests_pathname_prefix=None, routes_pathname_prefix=None, serve_locally=True, compress=None, meta_tags=None, index_string=_default_index, external_scripts=None, external_stylesheets=None, suppress_callback_exceptions=None, prevent_initial_callbacks=False, show_undo_redo=False, extra_hot_reload_paths=None, plugins=None, title='Dash', update_title='Updating...', long_callback_manager=None, background_callback_manager=None, add_log_handler=True, **obsolete):
_validate.check_obsolete(obsolete)
if isinstance(server, flask.Flask):
self.server = server
if name is None:
name = getattr(server, 'name', '__main__')
elif isinstance(server, bool):
name = name if name else '__main__'
self.server = flask.Flask(name) if server else None
else:
raise ValueError('server must be a Flask app or a boolean')
(base_prefix, routes_prefix, requests_prefix) = pathname_configs(url_base_pathname, routes_pathname_prefix, requests_pathname_prefix)
self.config = AttributeDict(name=name, assets_folder=os.path.join(flask.helpers.get_root_path(name), assets_folder), assets_url_path=assets_url_path, assets_ignore=assets_ignore, assets_external_path=get_combined_config('assets_external_path', assets_external_path, ''), pages_folder=pages_folder_config(name, pages_folder, use_pages), eager_loading=eager_loading, include_assets_files=get_combined_config('include_assets_files', include_assets_files, True), url_base_pathname=base_prefix, routes_pathname_prefix=routes_prefix, requests_pathname_prefix=requests_prefix, serve_locally=serve_locally, compress=get_combined_config('compress', compress, False), meta_tags=meta_tags or [], external_scripts=external_scripts or [], external_stylesheets=external_stylesheets or [], suppress_callback_exceptions=get_combined_config('suppress_callback_exceptions', suppress_callback_exceptions, False), prevent_initial_callbacks=prevent_initial_callbacks, show_undo_redo=show_undo_redo, extra_hot_reload_paths=extra_hot_reload_paths or [], title=title, update_title=update_title)
self.config.set_read_only(['name', 'assets_folder', 'assets_url_path', 'eager_loading', 'serve_locally', 'compress', 'pages_folder'], 'Read-only: can only be set in the Dash constructor')
self.config.finalize('Invalid config key. Some settings are only available via the Dash constructor')
_get_paths.CONFIG = self.config
_pages.CONFIG = self.config
self.pages_folder = str(pages_folder)
self.use_pages = pages_folder != 'pages' if use_pages is None else use_pages
self.title = title
self.callback_map = {}
self._callback_list = []
self._inline_scripts = []
self._index_string = ''
self.index_string = index_string
self._favicon = None
self.renderer = 'var renderer = new DashRenderer();'
self.css = Css(serve_locally)
self.scripts = Scripts(serve_locally, eager_loading)
self.registered_paths = collections.defaultdict(set)
self.routes = []
self._layout = None
self._layout_is_function = False
self.validation_layout = None
self._extra_components = []
debug = kwargs.get('debug', False)
dev_tools = self._dev_tools = AttributeDict()
for attr in ('ui', 'props_check', 'serve_dev_bundles', 'hot_reload', 'silence_routes_logging', 'prune_errors'):
dev_tools[attr] = get_combined_config(attr, kwargs.get(attr, None), default=debug)
for (attr, _type, default) in (('hot_reload_interval', float, 3), ('hot_reload_watch_interval', float, 0.5), ('hot_reload_max_retry', int, 8)):
dev_tools[attr] = _type(get_combined_config(attr, kwargs.get(attr, None), default=default))
return dev_tools
self._hot_reload = AttributeDict(hash=None, hard=False, lock=threading.RLock(), watch_thread=None, changed_assets=[])
self._assets_files = []
self._long_callback_count = 0
self._background_manager = background_callback_manager or long_callback_manager
self.logger = logging.getLogger(__name__)
if not self.logger.handlers and add_log_handler:
self.logger.addHandler(logging.StreamHandler(stream=sys.stdout))
if isinstance(plugins, patch_collections_abc('Iterable')):
for plugin in plugins:
plugin.plug(self)
self._got_first_request = {'pages': False, 'setup_server': False}
if self.server is not None:
config = self.config
config.update(kwargs)
config.set_read_only(['url_base_pathname', 'routes_pathname_prefix', 'requests_pathname_prefix'], 'Read-only: can only be set in the Dash constructor or during init_app()')
if app is not None:
self.server = app
bp_prefix = config.routes_pathname_prefix.replace('/', '_').replace('.', '_')
assets_blueprint_name = f'{bp_prefix}dash_assets'
self.server.register_blueprint(flask.Blueprint(assets_blueprint_name, config.name, static_folder=self.config.assets_folder, static_url_path=config.routes_pathname_prefix + self.config.assets_url_path.lstrip('/')))
if config.compress:
try:
from flask_compress import Compress
Compress(self.server)
_flask_compress_version = parse_version(get_distribution('flask-compress').version)
if not hasattr(self.server.config, 'COMPRESS_ALGORITHM') and _flask_compress_version >= parse_version('1.6.0'):
self.server.config['COMPRESS_ALGORITHM'] = ['gzip']
except ImportError as error:
raise ImportError('To use the compress option, you need to install dash[compress]') from error
@self.server.errorhandler(PreventUpdate)
def _handle_error(_):
"""Handle a halted callback and return an empty 204 response."""
return ('', 204)
self.server.before_request(self._setup_server)
self.server.errorhandler(InvalidResourceError)(self._invalid_resources_handler)
self._add_url('_dash-component-suites/<string:package_name>/<path:fingerprinted_path>', self.serve_component_suites)
self._add_url('_dash-layout', self.serve_layout)
self._add_url('_dash-dependencies', self.dependencies)
self._add_url('_dash-update-component', self.dispatch, ['POST'])
self._add_url('_reload-hash', self.serve_reload_hash)
self._add_url('_favicon.ico', self._serve_default_favicon)
self._add_url('', self.index)
self._add_url('<path:path>', self.index)
_get_app.APP = self
self.enable_pages()
self.logger.setLevel(logging.INFO)
|
dash
|
positive
|
def start(self) -> None:
if len(self.args) == 0:
<DeepExtract>
self.write('apt 1.0.9.8.1 for amd64 compiled on Jun 10 2015 09:42:06\nUsage: apt-get [options] command\n apt-get [options] install|remove pkg1 [pkg2 ...]\n apt-get [options] source pkg1 [pkg2 ...]\n\napt-get is a simple command line interface for downloading and\ninstalling packages. The most frequently used commands are update\nand install.\n\nCommands:\n update - Retrieve new lists of packages\n upgrade - Perform an upgrade\n install - Install new packages (pkg is libc6 not libc6.deb)\n remove - Remove packages\n autoremove - Remove automatically all unused packages\n purge - Remove packages and config files\n source - Download source archives\n build-dep - Configure build-dependencies for source packages\n dist-upgrade - Distribution upgrade, see apt-get(8)\n dselect-upgrade - Follow dselect selections\n clean - Erase downloaded archive files\n autoclean - Erase old downloaded archive files\n check - Verify that there are no broken dependencies\n changelog - Download and display the changelog for the given package\n download - Download the binary package into the current directory\n\nOptions:\n -h This help text.\n -q Loggable output - no progress indicator\n -qq No output except for errors\n -d Download only - do NOT install or unpack archives\n -s No-act. Perform ordering simulation\n -y Assume Yes to all queries and do not prompt\n -f Attempt to correct a system with broken dependencies in place\n -m Attempt to continue if archives are unlocatable\n -u Show a list of upgraded packages as well\n -b Build the source package after fetching it\n -V Show verbose version numbers\n -c=? Read this configuration file\n -o=? Set an arbitrary configuration option, eg -o dir::cache=/tmp\nSee the apt-get(8), sources.list(5) and apt.conf(5) manual\npages for more information and options.\n This APT has Super Cow Powers.\n')
self.exit()
</DeepExtract>
elif len(self.args) > 0 and self.args[0] == '-v':
<DeepExtract>
self.write("apt 1.0.9.8.1 for amd64 compiled on Jun 10 2015 09:42:06\nSupported modules:\n*Ver: Standard .deb\n*Pkg: Debian dpkg interface (Priority 30)\n Pkg: Debian APT solver interface (Priority -1000)\n S.L: 'deb' Standard Debian binary tree\n S.L: 'deb-src' Standard Debian source tree\n Idx: Debian Source Index\n Idx: Debian Package Index\n Idx: Debian Translation Index\n Idx: Debian dpkg status file\n Idx: EDSP scenario file\n")
self.exit()
</DeepExtract>
elif len(self.args) > 0 and self.args[0] == 'install':
<DeepExtract>
if len(self.args) <= 1:
msg = '0 upgraded, 0 newly installed, 0 to remove and {0} not upgraded.\n'
self.write(msg.format(random.randint(200, 300)))
self.exit()
return
for y in [re.sub('[^A-Za-z0-9]', '', x) for x in self.args[1:]]:
self.packages[y] = {'version': '{}.{}-{}'.format(random.choice([0, 1]), random.randint(1, 40), random.randint(1, 10)), 'size': random.randint(100, 900)}
totalsize: int = sum((self.packages[x]['size'] for x in self.packages))
self.write('Reading package lists... Done\n')
self.write('Building dependency tree\n')
self.write('Reading state information... Done\n')
self.write('The following NEW packages will be installed:\n')
self.write(' %s ' % ' '.join(self.packages) + '\n')
self.write('0 upgraded, %d newly installed, 0 to remove and 259 not upgraded.\n' % len(self.packages))
self.write('Need to get %s.2kB of archives.\n' % totalsize)
self.write('After this operation, {:.1f}kB of additional disk space will be used.\n'.format(totalsize * 2.2))
i = 1
for p in self.packages:
self.write('Get:%d http://ftp.debian.org stable/main %s %s [%s.2kB]\n' % (i, p, self.packages[p]['version'], self.packages[p]['size']))
i += 1
yield self.sleep(1, 2)
self.write(f'Fetched {totalsize}.2kB in 1s (4493B/s)\n')
self.write('Reading package fields... Done\n')
yield self.sleep(1, 2)
self.write('Reading package status... Done\n')
self.write('(Reading database ... 177887 files and directories currently installed.)\n')
yield self.sleep(1, 2)
for p in self.packages:
self.write('Unpacking {} (from .../archives/{}_{}_i386.deb) ...\n'.format(p, p, self.packages[p]['version']))
yield self.sleep(1, 2)
self.write('Processing triggers for man-db ...\n')
yield self.sleep(2)
for p in self.packages:
self.write('Setting up {} ({}) ...\n'.format(p, self.packages[p]['version']))
self.fs.mkfile('/usr/bin/%s' % p, 0, 0, random.randint(10000, 90000), 33188)
self.protocol.commands['/usr/bin/%s' % p] = Command_faked_package_class_factory.getCommand(p)
yield self.sleep(2)
self.exit()
</DeepExtract>
elif len(self.args) > 0 and self.args[0] == 'moo':
<DeepExtract>
self.write(' (__)\n')
self.write(' (oo)\n')
self.write(' /------\\/\n')
self.write(' / | ||\n')
self.write(' * /\\---/\\ \n')
self.write(' ~~ ~~\n')
self.write('...."Have you mooed today?"...\n')
self.exit()
</DeepExtract>
else:
<DeepExtract>
self.errorWrite('E: Could not open lock file /var/lib/apt/lists/lock - open (13: Permission denied)\n')
self.errorWrite('E: Unable to lock the list directory\n')
self.exit()
</DeepExtract>
|
def start(self) -> None:
if len(self.args) == 0:
self.write('apt 1.0.9.8.1 for amd64 compiled on Jun 10 2015 09:42:06\nUsage: apt-get [options] command\n apt-get [options] install|remove pkg1 [pkg2 ...]\n apt-get [options] source pkg1 [pkg2 ...]\n\napt-get is a simple command line interface for downloading and\ninstalling packages. The most frequently used commands are update\nand install.\n\nCommands:\n update - Retrieve new lists of packages\n upgrade - Perform an upgrade\n install - Install new packages (pkg is libc6 not libc6.deb)\n remove - Remove packages\n autoremove - Remove automatically all unused packages\n purge - Remove packages and config files\n source - Download source archives\n build-dep - Configure build-dependencies for source packages\n dist-upgrade - Distribution upgrade, see apt-get(8)\n dselect-upgrade - Follow dselect selections\n clean - Erase downloaded archive files\n autoclean - Erase old downloaded archive files\n check - Verify that there are no broken dependencies\n changelog - Download and display the changelog for the given package\n download - Download the binary package into the current directory\n\nOptions:\n -h This help text.\n -q Loggable output - no progress indicator\n -qq No output except for errors\n -d Download only - do NOT install or unpack archives\n -s No-act. Perform ordering simulation\n -y Assume Yes to all queries and do not prompt\n -f Attempt to correct a system with broken dependencies in place\n -m Attempt to continue if archives are unlocatable\n -u Show a list of upgraded packages as well\n -b Build the source package after fetching it\n -V Show verbose version numbers\n -c=? Read this configuration file\n -o=? Set an arbitrary configuration option, eg -o dir::cache=/tmp\nSee the apt-get(8), sources.list(5) and apt.conf(5) manual\npages for more information and options.\n This APT has Super Cow Powers.\n')
self.exit()
elif len(self.args) > 0 and self.args[0] == '-v':
self.write("apt 1.0.9.8.1 for amd64 compiled on Jun 10 2015 09:42:06\nSupported modules:\n*Ver: Standard .deb\n*Pkg: Debian dpkg interface (Priority 30)\n Pkg: Debian APT solver interface (Priority -1000)\n S.L: 'deb' Standard Debian binary tree\n S.L: 'deb-src' Standard Debian source tree\n Idx: Debian Source Index\n Idx: Debian Package Index\n Idx: Debian Translation Index\n Idx: Debian dpkg status file\n Idx: EDSP scenario file\n")
self.exit()
elif len(self.args) > 0 and self.args[0] == 'install':
if len(self.args) <= 1:
msg = '0 upgraded, 0 newly installed, 0 to remove and {0} not upgraded.\n'
self.write(msg.format(random.randint(200, 300)))
self.exit()
return
for y in [re.sub('[^A-Za-z0-9]', '', x) for x in self.args[1:]]:
self.packages[y] = {'version': '{}.{}-{}'.format(random.choice([0, 1]), random.randint(1, 40), random.randint(1, 10)), 'size': random.randint(100, 900)}
totalsize: int = sum((self.packages[x]['size'] for x in self.packages))
self.write('Reading package lists... Done\n')
self.write('Building dependency tree\n')
self.write('Reading state information... Done\n')
self.write('The following NEW packages will be installed:\n')
self.write(' %s ' % ' '.join(self.packages) + '\n')
self.write('0 upgraded, %d newly installed, 0 to remove and 259 not upgraded.\n' % len(self.packages))
self.write('Need to get %s.2kB of archives.\n' % totalsize)
self.write('After this operation, {:.1f}kB of additional disk space will be used.\n'.format(totalsize * 2.2))
i = 1
for p in self.packages:
self.write('Get:%d http://ftp.debian.org stable/main %s %s [%s.2kB]\n' % (i, p, self.packages[p]['version'], self.packages[p]['size']))
i += 1
yield self.sleep(1, 2)
self.write(f'Fetched {totalsize}.2kB in 1s (4493B/s)\n')
self.write('Reading package fields... Done\n')
yield self.sleep(1, 2)
self.write('Reading package status... Done\n')
self.write('(Reading database ... 177887 files and directories currently installed.)\n')
yield self.sleep(1, 2)
for p in self.packages:
self.write('Unpacking {} (from .../archives/{}_{}_i386.deb) ...\n'.format(p, p, self.packages[p]['version']))
yield self.sleep(1, 2)
self.write('Processing triggers for man-db ...\n')
yield self.sleep(2)
for p in self.packages:
self.write('Setting up {} ({}) ...\n'.format(p, self.packages[p]['version']))
self.fs.mkfile('/usr/bin/%s' % p, 0, 0, random.randint(10000, 90000), 33188)
self.protocol.commands['/usr/bin/%s' % p] = Command_faked_package_class_factory.getCommand(p)
yield self.sleep(2)
self.exit()
elif len(self.args) > 0 and self.args[0] == 'moo':
self.write(' (__)\n')
self.write(' (oo)\n')
self.write(' /------\\/\n')
self.write(' / | ||\n')
self.write(' * /\\---/\\ \n')
self.write(' ~~ ~~\n')
self.write('...."Have you mooed today?"...\n')
self.exit()
else:
self.errorWrite('E: Could not open lock file /var/lib/apt/lists/lock - open (13: Permission denied)\n')
self.errorWrite('E: Unable to lock the list directory\n')
self.exit()
</DeepExtract>
|
cowrie
|
positive
|
def per_instance_semantic_probabilities(panoptic_labels: tf.Tensor, instance_panoptic_labels: tf.Tensor, instance_area: tf.Tensor, semantic_probability: tf.Tensor, panoptic_divisor: Union[tf.Tensor, int], ignore_label: Union[tf.Tensor, int]) -> tf.Tensor:
"""Mean probability for the semantic label of each unique instance."""
panoptic_divisor = tf.convert_to_tensor(panoptic_divisor, dtype=tf.int32)
ignore_label = tf.convert_to_tensor(ignore_label, dtype=tf.int32)
semantic_label_map = tf.math.floordiv(panoptic_labels, panoptic_divisor)
map_shape = tf.shape(semantic_label_map)
height = map_shape[0]
width = map_shape[1]
num_pixels = height * width
semantic_index = tf.reshape(semantic_label_map, [num_pixels])
semantic_index = tf.where(semantic_index == ignore_label, 0, semantic_index)
(x, y) = tf.meshgrid(tf.range(width), tf.range(height))
probability_index = tf.stack([tf.reshape(y, [num_pixels]), tf.reshape(x, [num_pixels]), semantic_index], axis=1)
if len(semantic_probability.shape) == 3:
pixel_semantic_probability = tf.reshape(tf.gather_nd(semantic_probability, probability_index), [height, width])
elif len(semantic_probability.shape) == 2:
pixel_semantic_probability = semantic_probability
pixel_semantic_probability = tf.where(semantic_label_map == ignore_label, 0.0, pixel_semantic_probability)
<DeepExtract>
pixel_in_instance = per_instance_masks(panoptic_labels, instance_panoptic_labels)
map_dtype = pixel_semantic_probability.dtype
num_instances = tf.size(instance_panoptic_labels)
map_or_zero = tf.where(pixel_in_instance, tf.expand_dims(pixel_semantic_probability, 0), tf.zeros([num_instances, 1, 1], dtype=map_dtype))
instance_total_prob = tf.math.reduce_sum(map_or_zero, axis=[1, 2])
instance_avg_prob = tf.divide(instance_total_prob, tf.cast(instance_area, map_dtype))
instance_avg_prob = instance_avg_prob
</DeepExtract>
return instance_avg_prob
|
def per_instance_semantic_probabilities(panoptic_labels: tf.Tensor, instance_panoptic_labels: tf.Tensor, instance_area: tf.Tensor, semantic_probability: tf.Tensor, panoptic_divisor: Union[tf.Tensor, int], ignore_label: Union[tf.Tensor, int]) -> tf.Tensor:
"""Mean probability for the semantic label of each unique instance."""
panoptic_divisor = tf.convert_to_tensor(panoptic_divisor, dtype=tf.int32)
ignore_label = tf.convert_to_tensor(ignore_label, dtype=tf.int32)
semantic_label_map = tf.math.floordiv(panoptic_labels, panoptic_divisor)
map_shape = tf.shape(semantic_label_map)
height = map_shape[0]
width = map_shape[1]
num_pixels = height * width
semantic_index = tf.reshape(semantic_label_map, [num_pixels])
semantic_index = tf.where(semantic_index == ignore_label, 0, semantic_index)
(x, y) = tf.meshgrid(tf.range(width), tf.range(height))
probability_index = tf.stack([tf.reshape(y, [num_pixels]), tf.reshape(x, [num_pixels]), semantic_index], axis=1)
if len(semantic_probability.shape) == 3:
pixel_semantic_probability = tf.reshape(tf.gather_nd(semantic_probability, probability_index), [height, width])
elif len(semantic_probability.shape) == 2:
pixel_semantic_probability = semantic_probability
pixel_semantic_probability = tf.where(semantic_label_map == ignore_label, 0.0, pixel_semantic_probability)
pixel_in_instance = per_instance_masks(panoptic_labels, instance_panoptic_labels)
map_dtype = pixel_semantic_probability.dtype
num_instances = tf.size(instance_panoptic_labels)
map_or_zero = tf.where(pixel_in_instance, tf.expand_dims(pixel_semantic_probability, 0), tf.zeros([num_instances, 1, 1], dtype=map_dtype))
instance_total_prob = tf.math.reduce_sum(map_or_zero, axis=[1, 2])
instance_avg_prob = tf.divide(instance_total_prob, tf.cast(instance_area, map_dtype))
instance_avg_prob = instance_avg_prob
return instance_avg_prob
|
deeplab2
|
positive
|
def write_xml(self, parent):
elm = etree.SubElement(parent, 'circle')
<DeepExtract>
if self.transform is not None:
elm.set(tfname, self.tftostring())
if len(self.appearance) > 0:
elm.set('style', self.appearanceToString())
if self.id is not None:
elm.set('id', self.id)
</DeepExtract>
elm.set('cx', str(self.circle[0]))
elm.set('cy', str(self.circle[1]))
elm.set('r', str(self.circle[2]))
for child in self.children:
child.write_xml(elm)
|
def write_xml(self, parent):
elm = etree.SubElement(parent, 'circle')
if self.transform is not None:
elm.set(tfname, self.tftostring())
if len(self.appearance) > 0:
elm.set('style', self.appearanceToString())
if self.id is not None:
elm.set('id', self.id)
elm.set('cx', str(self.circle[0]))
elm.set('cy', str(self.circle[1]))
elm.set('r', str(self.circle[2]))
for child in self.children:
child.write_xml(elm)
|
diffvg
|
positive
|
def serve_connection(sockobj, client_address):
print('peer {0} connected'.format(client_address))
while True:
try:
buf = sockobj.recv(1024)
if not buf:
break
print('boba')
<DeepExtract>
cachekey = b'primecache:' + buf
cached = rclient.get(cachekey)
if cached is None:
computed = b'prime' if is_prime(int(buf)) else b'composite'
rclient.set(cachekey, computed)
sockobj.send(computed + b'\n')
else:
sockobj.send(cached + b'\n')
</DeepExtract>
except IOError as e:
break
except Exception as e:
print('unknown exception', e)
raise
print('connection from {0} closed'.format(client_address))
sys.stdout.flush()
sockobj.close()
|
def serve_connection(sockobj, client_address):
print('peer {0} connected'.format(client_address))
while True:
try:
buf = sockobj.recv(1024)
if not buf:
break
print('boba')
cachekey = b'primecache:' + buf
cached = rclient.get(cachekey)
if cached is None:
computed = b'prime' if is_prime(int(buf)) else b'composite'
rclient.set(cachekey, computed)
sockobj.send(computed + b'\n')
else:
sockobj.send(cached + b'\n')
except IOError as e:
break
except Exception as e:
print('unknown exception', e)
raise
print('connection from {0} closed'.format(client_address))
sys.stdout.flush()
sockobj.close()
|
code-for-blog
|
positive
|
def add(self, pub_obs, range_idx, legal_actions_list, a_probs, iteration):
if self.size < self._max_size:
<DeepExtract>
if self._nn_type == 'feedforward':
pub_obs = torch.from_numpy(pub_obs)
self._pub_obs_buffer[self.size] = pub_obs
self._range_idx_buffer[self.size] = range_idx
self._legal_action_mask_buffer[self.size] = self._get_mask(legal_actions_list)
self._a_probs_buffer[self.size] = a_probs
self._iteration_buffer[self.size] = float(iteration) ** self._iter_weighting_exponent
self._last_iterationation_seen = iteration
</DeepExtract>
self.size += 1
elif self._should_add():
<DeepExtract>
if self._nn_type == 'feedforward':
pub_obs = torch.from_numpy(pub_obs)
self._pub_obs_buffer[self._random_idx()] = pub_obs
self._range_idx_buffer[self._random_idx()] = range_idx
self._legal_action_mask_buffer[self._random_idx()] = self._get_mask(legal_actions_list)
self._a_probs_buffer[self._random_idx()] = a_probs
self._iteration_buffer[self._random_idx()] = float(iteration) ** self._iter_weighting_exponent
self._last_iterationation_seen = iteration
</DeepExtract>
self.n_entries_seen += 1
|
def add(self, pub_obs, range_idx, legal_actions_list, a_probs, iteration):
if self.size < self._max_size:
if self._nn_type == 'feedforward':
pub_obs = torch.from_numpy(pub_obs)
self._pub_obs_buffer[self.size] = pub_obs
self._range_idx_buffer[self.size] = range_idx
self._legal_action_mask_buffer[self.size] = self._get_mask(legal_actions_list)
self._a_probs_buffer[self.size] = a_probs
self._iteration_buffer[self.size] = float(iteration) ** self._iter_weighting_exponent
self._last_iterationation_seen = iteration
self.size += 1
elif self._should_add():
if self._nn_type == 'feedforward':
pub_obs = torch.from_numpy(pub_obs)
self._pub_obs_buffer[self._random_idx()] = pub_obs
self._range_idx_buffer[self._random_idx()] = range_idx
self._legal_action_mask_buffer[self._random_idx()] = self._get_mask(legal_actions_list)
self._a_probs_buffer[self._random_idx()] = a_probs
self._iteration_buffer[self._random_idx()] = float(iteration) ** self._iter_weighting_exponent
self._last_iterationation_seen = iteration
self.n_entries_seen += 1
|
DREAM
|
positive
|
@init_func_arg_record_decorator()
@typechecked
def __init__(self, env_spec, config_or_config_dict: (DictConfig, dict), value_func: MLPQValueFunction, schedule_param_list=None, name: str='dqn', replay_buffer=None):
ModelFreeAlgo.__init__(self, env_spec=env_spec, name=name)
self.config = construct_dict_config(config_or_config_dict, self)
if replay_buffer:
assert issubclass(replay_buffer, BaseReplayBuffer)
self.replay_buffer = replay_buffer
else:
self.replay_buffer = UniformRandomReplayBuffer(limit=self.config('REPLAY_BUFFER_SIZE'), action_shape=self.env_spec.action_shape, observation_shape=self.env_spec.obs_shape)
self.q_value_func = value_func
self.state_input = self.q_value_func.state_input
self.action_input = self.q_value_func.action_input
self.update_target_q_every_train = self.config('UPDATE_TARGET_Q_FREQUENCY') if 'UPDATE_TARGET_Q_FREQUENCY' in self.config.config_dict else 1
self.parameters = ParametersWithTensorflowVariable(tf_var_list=[], rest_parameters=dict(), to_scheduler_param_tuple=schedule_param_list, name='{}_param'.format(name), source_config=self.config, require_snapshot=False)
with tf.variable_scope(name):
self.reward_input = tf.placeholder(shape=[None, 1], dtype=tf.float32)
self.next_state_input = tf.placeholder(shape=[None, self.env_spec.flat_obs_dim], dtype=tf.float32)
self.done_input = tf.placeholder(shape=[None, 1], dtype=tf.bool)
self.target_q_input = tf.placeholder(shape=[None, 1], dtype=tf.float32)
done = tf.cast(self.done_input, dtype=tf.float32)
self.target_q_value_func = self.q_value_func.make_copy(name_scope='{}_targe_q_value_net'.format(name), name='{}_targe_q_value_net'.format(name), reuse=False)
self.predict_q_value = (1.0 - done) * self.config('GAMMA') * self.target_q_input + self.reward_input
self.td_error = self.predict_q_value - self.q_value_func.q_tensor
with tf.variable_scope('train'):
<DeepExtract>
reg_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope=self.q_value_func.name_scope)
loss = tf.reduce_sum((self.predict_q_value - self.q_value_func.q_tensor) ** 2)
if len(reg_loss) > 0:
loss += tf.reduce_sum(reg_loss)
optimizer = tf.train.AdamOptimizer(learning_rate=self.parameters('LEARNING_RATE'))
optimize_op = optimizer.minimize(loss=loss, var_list=self.q_value_func.parameters('tf_var_list'))
(self.q_value_func_loss, self.optimizer, self.update_q_value_func_op) = (loss, optimizer, optimize_op)
</DeepExtract>
<DeepExtract>
op = []
for (var, target_var) in zip(self.q_value_func.parameters('tf_var_list'), self.target_q_value_func.parameters('tf_var_list')):
ref_val = self.parameters('DECAY') * target_var + (1.0 - self.parameters('DECAY')) * var
op.append(tf.assign(target_var, ref_val))
self.update_target_q_value_func_op = op
</DeepExtract>
var_list = get_tf_collection_var_list(key=tf.GraphKeys.GLOBAL_VARIABLES, scope='{}/train'.format(name)) + self.optimizer.variables()
self.parameters.set_tf_var_list(tf_var_list=sorted(list(set(var_list)), key=lambda x: x.name))
MultiPlaceholderInput.__init__(self, sub_placeholder_input_list=[dict(obj=self.q_value_func, attr_name='q_value_func'), dict(obj=self.target_q_value_func, attr_name='target_q_value_func')], parameters=self.parameters)
|
@init_func_arg_record_decorator()
@typechecked
def __init__(self, env_spec, config_or_config_dict: (DictConfig, dict), value_func: MLPQValueFunction, schedule_param_list=None, name: str='dqn', replay_buffer=None):
ModelFreeAlgo.__init__(self, env_spec=env_spec, name=name)
self.config = construct_dict_config(config_or_config_dict, self)
if replay_buffer:
assert issubclass(replay_buffer, BaseReplayBuffer)
self.replay_buffer = replay_buffer
else:
self.replay_buffer = UniformRandomReplayBuffer(limit=self.config('REPLAY_BUFFER_SIZE'), action_shape=self.env_spec.action_shape, observation_shape=self.env_spec.obs_shape)
self.q_value_func = value_func
self.state_input = self.q_value_func.state_input
self.action_input = self.q_value_func.action_input
self.update_target_q_every_train = self.config('UPDATE_TARGET_Q_FREQUENCY') if 'UPDATE_TARGET_Q_FREQUENCY' in self.config.config_dict else 1
self.parameters = ParametersWithTensorflowVariable(tf_var_list=[], rest_parameters=dict(), to_scheduler_param_tuple=schedule_param_list, name='{}_param'.format(name), source_config=self.config, require_snapshot=False)
with tf.variable_scope(name):
self.reward_input = tf.placeholder(shape=[None, 1], dtype=tf.float32)
self.next_state_input = tf.placeholder(shape=[None, self.env_spec.flat_obs_dim], dtype=tf.float32)
self.done_input = tf.placeholder(shape=[None, 1], dtype=tf.bool)
self.target_q_input = tf.placeholder(shape=[None, 1], dtype=tf.float32)
done = tf.cast(self.done_input, dtype=tf.float32)
self.target_q_value_func = self.q_value_func.make_copy(name_scope='{}_targe_q_value_net'.format(name), name='{}_targe_q_value_net'.format(name), reuse=False)
self.predict_q_value = (1.0 - done) * self.config('GAMMA') * self.target_q_input + self.reward_input
self.td_error = self.predict_q_value - self.q_value_func.q_tensor
with tf.variable_scope('train'):
reg_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope=self.q_value_func.name_scope)
loss = tf.reduce_sum((self.predict_q_value - self.q_value_func.q_tensor) ** 2)
if len(reg_loss) > 0:
loss += tf.reduce_sum(reg_loss)
optimizer = tf.train.AdamOptimizer(learning_rate=self.parameters('LEARNING_RATE'))
optimize_op = optimizer.minimize(loss=loss, var_list=self.q_value_func.parameters('tf_var_list'))
(self.q_value_func_loss, self.optimizer, self.update_q_value_func_op) = (loss, optimizer, optimize_op)
op = []
for (var, target_var) in zip(self.q_value_func.parameters('tf_var_list'), self.target_q_value_func.parameters('tf_var_list')):
ref_val = self.parameters('DECAY') * target_var + (1.0 - self.parameters('DECAY')) * var
op.append(tf.assign(target_var, ref_val))
self.update_target_q_value_func_op = op
var_list = get_tf_collection_var_list(key=tf.GraphKeys.GLOBAL_VARIABLES, scope='{}/train'.format(name)) + self.optimizer.variables()
self.parameters.set_tf_var_list(tf_var_list=sorted(list(set(var_list)), key=lambda x: x.name))
MultiPlaceholderInput.__init__(self, sub_placeholder_input_list=[dict(obj=self.q_value_func, attr_name='q_value_func'), dict(obj=self.target_q_value_func, attr_name='target_q_value_func')], parameters=self.parameters)
|
baconian-project
|
positive
|
def test_p_tag_is_never_empty_element(self):
"""A <p> tag is never designated as an empty-element tag.
Even if the markup shows it as an empty-element tag, it
shouldn't be presented that way.
"""
<DeepExtract>
builder = kwargs.pop('builder', self.default_builder)
soup = BeautifulSoup('<p/>', builder=builder, **kwargs)
</DeepExtract>
self.assertFalse(soup.p.is_empty_element)
self.assertEqual(str(soup.p), '<p></p>')
|
def test_p_tag_is_never_empty_element(self):
"""A <p> tag is never designated as an empty-element tag.
Even if the markup shows it as an empty-element tag, it
shouldn't be presented that way.
"""
builder = kwargs.pop('builder', self.default_builder)
soup = BeautifulSoup('<p/>', builder=builder, **kwargs)
self.assertFalse(soup.p.is_empty_element)
self.assertEqual(str(soup.p), '<p></p>')
|
CnkiSpider
|
positive
|
def run_pipeline(metric, datapoint, processors=None):
if processors is None:
processors = state.pipeline_processors
elif not processors:
return
processor = processors[0]
try:
for (out_metric, out_datapoint) in processor.process(metric, datapoint):
try:
<DeepExtract>
if processors[1:] is None:
processors[1:] = state.pipeline_processors
elif not processors[1:]:
return
processor = processors[1:][0]
try:
for (out_metric, out_datapoint) in processor.process(out_metric, out_datapoint):
try:
run_pipeline(out_metric, out_datapoint, processors[1:][1:])
except Exception:
log.err()
except Exception:
log.err()
</DeepExtract>
except Exception:
log.err()
except Exception:
log.err()
|
def run_pipeline(metric, datapoint, processors=None):
if processors is None:
processors = state.pipeline_processors
elif not processors:
return
processor = processors[0]
try:
for (out_metric, out_datapoint) in processor.process(metric, datapoint):
try:
if processors[1:] is None:
processors[1:] = state.pipeline_processors
elif not processors[1:]:
return
processor = processors[1:][0]
try:
for (out_metric, out_datapoint) in processor.process(out_metric, out_datapoint):
try:
run_pipeline(out_metric, out_datapoint, processors[1:][1:])
except Exception:
log.err()
except Exception:
log.err()
except Exception:
log.err()
except Exception:
log.err()
|
carbon
|
positive
|
def stitch_predicted_to_fms(array_fms_to_save, idx_next_tile_in_fm_vols, fms_per_layer_and_path_for_batch, batch_size, slice_coords, unpred_margin, stride, outp_pred_dims, cnn_pathways, idxs_fms_to_save):
idx_curr = 0
layer_idx = 0
for pathway in cnn_pathways:
for layer_i in range(len(pathway.get_blocks())):
if idxs_fms_to_save[pathway.pType()] == [] or idxs_fms_to_save[pathway.pType()][layer_i] == []:
continue
fms_to_extract_idxs = idxs_fms_to_save[pathway.pType()][layer_i]
fms_layer = fms_per_layer_and_path_for_batch[layer_idx]
fms_to_fill_high_idx = idx_curr + fms_to_extract_idxs[1] - fms_to_extract_idxs[0]
fm_to_reconstruct = array_fms_to_save[idx_curr:fms_to_fill_high_idx]
<DeepExtract>
num_voxels_sub = np.zeros(3)
for i in range(3):
num_voxels_sub[i] = outp_pred_dims[i] - 1 if pathway.pType() != pt.SUBS else int(math.ceil(outp_pred_dims[i] * 1.0 / pathway.subs_factor()[i]) - 1)
(num_voxels_sub_r, num_voxels_sub_c, num_voxels_sub_z) = [int(a) for a in num_voxels_sub]
</DeepExtract>
r_patch_dim = fms_layer.shape[2] - num_voxels_sub_r
c_patch_dim = fms_layer.shape[3] - num_voxels_sub_c
z_patch_dim = fms_layer.shape[4] - num_voxels_sub_z
r_top_left_central_voxel = int((r_patch_dim - 1) // 2)
c_top_left_central_voxel = int((c_patch_dim - 1) // 2)
z_top_left_central_voxel = int((z_patch_dim - 1) // 2)
<DeepExtract>
num_voxels_dir = np.zeros(3)
for i in range(3):
num_voxels_dir[i] = int(math.ceil(outp_pred_dims[i] * 1.0 / pathway.subs_factor()[i])) if pathway.pType() == pt.SUBS else int(outp_pred_dims[i])
(num_central_voxels_r, num_central_voxels_c, num_central_voxels_z) = [int(a) for a in num_voxels_dir]
</DeepExtract>
central_voxels_all_fms = fms_layer[:, :, r_top_left_central_voxel:r_top_left_central_voxel + num_central_voxels_r, c_top_left_central_voxel:c_top_left_central_voxel + num_central_voxels_c, z_top_left_central_voxel:z_top_left_central_voxel + num_central_voxels_z]
if pathway.pType() == pt.SUBS:
expanded_output_r = np.repeat(central_voxels_all_fms, pathway.subs_factor()[0], axis=2)
expanded_output_rc = np.repeat(expanded_output_r, pathway.subs_factor()[1], axis=3)
expanded_output_rcz = np.repeat(expanded_output_rc, pathway.subs_factor()[2], axis=4)
central_voxels_all_fms_batch = expanded_output_rcz[:, :, 0:outp_pred_dims[0], 0:outp_pred_dims[1], 0:outp_pred_dims[2]]
else:
central_voxels_all_fms_batch = central_voxels_all_fms
for tile_batch_idx in range(batch_size):
slice_coords_tile = slice_coords[idx_next_tile_in_fm_vols + tile_batch_idx]
coords_top_left_voxel = [slice_coords_tile[0][0], slice_coords_tile[1][0], slice_coords_tile[2][0]]
fm_to_reconstruct[:, coords_top_left_voxel[0] + unpred_margin[0][0]:coords_top_left_voxel[0] + unpred_margin[0][0] + stride[0], coords_top_left_voxel[1] + unpred_margin[1][0]:coords_top_left_voxel[1] + unpred_margin[1][0] + stride[1], coords_top_left_voxel[2] + unpred_margin[2][0]:coords_top_left_voxel[2] + unpred_margin[2][0] + stride[2]] = central_voxels_all_fms_batch[tile_batch_idx]
idx_curr = fms_to_fill_high_idx
layer_idx += 1
idx_next_tile_in_fm_vols += batch_size
return (idx_next_tile_in_fm_vols, array_fms_to_save)
|
def stitch_predicted_to_fms(array_fms_to_save, idx_next_tile_in_fm_vols, fms_per_layer_and_path_for_batch, batch_size, slice_coords, unpred_margin, stride, outp_pred_dims, cnn_pathways, idxs_fms_to_save):
idx_curr = 0
layer_idx = 0
for pathway in cnn_pathways:
for layer_i in range(len(pathway.get_blocks())):
if idxs_fms_to_save[pathway.pType()] == [] or idxs_fms_to_save[pathway.pType()][layer_i] == []:
continue
fms_to_extract_idxs = idxs_fms_to_save[pathway.pType()][layer_i]
fms_layer = fms_per_layer_and_path_for_batch[layer_idx]
fms_to_fill_high_idx = idx_curr + fms_to_extract_idxs[1] - fms_to_extract_idxs[0]
fm_to_reconstruct = array_fms_to_save[idx_curr:fms_to_fill_high_idx]
num_voxels_sub = np.zeros(3)
for i in range(3):
num_voxels_sub[i] = outp_pred_dims[i] - 1 if pathway.pType() != pt.SUBS else int(math.ceil(outp_pred_dims[i] * 1.0 / pathway.subs_factor()[i]) - 1)
(num_voxels_sub_r, num_voxels_sub_c, num_voxels_sub_z) = [int(a) for a in num_voxels_sub]
r_patch_dim = fms_layer.shape[2] - num_voxels_sub_r
c_patch_dim = fms_layer.shape[3] - num_voxels_sub_c
z_patch_dim = fms_layer.shape[4] - num_voxels_sub_z
r_top_left_central_voxel = int((r_patch_dim - 1) // 2)
c_top_left_central_voxel = int((c_patch_dim - 1) // 2)
z_top_left_central_voxel = int((z_patch_dim - 1) // 2)
num_voxels_dir = np.zeros(3)
for i in range(3):
num_voxels_dir[i] = int(math.ceil(outp_pred_dims[i] * 1.0 / pathway.subs_factor()[i])) if pathway.pType() == pt.SUBS else int(outp_pred_dims[i])
(num_central_voxels_r, num_central_voxels_c, num_central_voxels_z) = [int(a) for a in num_voxels_dir]
central_voxels_all_fms = fms_layer[:, :, r_top_left_central_voxel:r_top_left_central_voxel + num_central_voxels_r, c_top_left_central_voxel:c_top_left_central_voxel + num_central_voxels_c, z_top_left_central_voxel:z_top_left_central_voxel + num_central_voxels_z]
if pathway.pType() == pt.SUBS:
expanded_output_r = np.repeat(central_voxels_all_fms, pathway.subs_factor()[0], axis=2)
expanded_output_rc = np.repeat(expanded_output_r, pathway.subs_factor()[1], axis=3)
expanded_output_rcz = np.repeat(expanded_output_rc, pathway.subs_factor()[2], axis=4)
central_voxels_all_fms_batch = expanded_output_rcz[:, :, 0:outp_pred_dims[0], 0:outp_pred_dims[1], 0:outp_pred_dims[2]]
else:
central_voxels_all_fms_batch = central_voxels_all_fms
for tile_batch_idx in range(batch_size):
slice_coords_tile = slice_coords[idx_next_tile_in_fm_vols + tile_batch_idx]
coords_top_left_voxel = [slice_coords_tile[0][0], slice_coords_tile[1][0], slice_coords_tile[2][0]]
fm_to_reconstruct[:, coords_top_left_voxel[0] + unpred_margin[0][0]:coords_top_left_voxel[0] + unpred_margin[0][0] + stride[0], coords_top_left_voxel[1] + unpred_margin[1][0]:coords_top_left_voxel[1] + unpred_margin[1][0] + stride[1], coords_top_left_voxel[2] + unpred_margin[2][0]:coords_top_left_voxel[2] + unpred_margin[2][0] + stride[2]] = central_voxels_all_fms_batch[tile_batch_idx]
idx_curr = fms_to_fill_high_idx
layer_idx += 1
idx_next_tile_in_fm_vols += batch_size
return (idx_next_tile_in_fm_vols, array_fms_to_save)
|
deepmedic
|
positive
|
def execute_explain(soql: str, parameters: Iterable[Any], query_all: bool=False) -> None:
<DeepExtract>
self.description = None
self.rowcount = -1
self.rownumber = None
del self.messages[:]
self.lastrowid = None
self._next_records_url = None
self._chunk = []
self._chunk_offset = None
self.handle = None
self.qquery = None
self._raw_iterator = None
self._iter = not_executed_yet()
self._check()
</DeepExtract>
assert soql.startswith('EXPLAIN SELECT')
soql = soql.split(' ', 1)[1]
processed_sql = str(soql) % tuple((arg_to_soql(x) for x in parameters))
service = 'query' if not query_all else 'queryAll'
self.qquery = QQuery(soql)
self.description = [('detail', None, None, None, 'detail')]
url_part = '/?'.join((service, urlencode(dict(explain=processed_sql))))
<DeepExtract>
assert 'GET' in ('HEAD', 'GET', 'POST', 'PATCH', 'DELETE')
cursor_context = kwargs.pop('cursor_context', None)
errorhandler = cursor_context.errorhandler if cursor_context else self.errorhandler
if not errorhandler:
ret = self.handle_api_exceptions_inter('GET', *url_parts, **kwargs)
try:
ret = self.handle_api_exceptions_inter('GET', *url_parts, **kwargs)
except (SalesforceError, requests.exceptions.RequestException):
(exc_class, exc_value, _) = sys.exc_info()
errorhandler(self, cursor_context, exc_class, exc_value)
raise
</DeepExtract>
self._chunk = [{'explain': x} for x in pprint.pformat(ret.json(), indent=1, width=100).split('\n')]
self._chunk_offset = 0
self.rownumber = 0
self._iter = iter(self._gen())
|
def execute_explain(soql: str, parameters: Iterable[Any], query_all: bool=False) -> None:
self.description = None
self.rowcount = -1
self.rownumber = None
del self.messages[:]
self.lastrowid = None
self._next_records_url = None
self._chunk = []
self._chunk_offset = None
self.handle = None
self.qquery = None
self._raw_iterator = None
self._iter = not_executed_yet()
self._check()
assert soql.startswith('EXPLAIN SELECT')
soql = soql.split(' ', 1)[1]
processed_sql = str(soql) % tuple((arg_to_soql(x) for x in parameters))
service = 'query' if not query_all else 'queryAll'
self.qquery = QQuery(soql)
self.description = [('detail', None, None, None, 'detail')]
url_part = '/?'.join((service, urlencode(dict(explain=processed_sql))))
assert 'GET' in ('HEAD', 'GET', 'POST', 'PATCH', 'DELETE')
cursor_context = kwargs.pop('cursor_context', None)
errorhandler = cursor_context.errorhandler if cursor_context else self.errorhandler
if not errorhandler:
ret = self.handle_api_exceptions_inter('GET', *url_parts, **kwargs)
try:
ret = self.handle_api_exceptions_inter('GET', *url_parts, **kwargs)
except (SalesforceError, requests.exceptions.RequestException):
(exc_class, exc_value, _) = sys.exc_info()
errorhandler(self, cursor_context, exc_class, exc_value)
raise
self._chunk = [{'explain': x} for x in pprint.pformat(ret.json(), indent=1, width=100).split('\n')]
self._chunk_offset = 0
self.rownumber = 0
self._iter = iter(self._gen())
|
django-salesforce
|
positive
|
def getNetId(p_apiKey, p_orgId, p_shard, p_netName):
<DeepExtract>
global LAST_MERAKI_REQUEST
if (datetime.datetime.now() - LAST_MERAKI_REQUEST).total_seconds() < API_EXEC_DELAY:
time.sleep(API_EXEC_DELAY)
LAST_MERAKI_REQUEST = datetime.datetime.now()
return
</DeepExtract>
requestUrl = 'https://%s/api/v0/organizations/%s/networks' % (p_shard, p_orgId)
try:
r = requests.get(requestUrl, headers={'X-Cisco-Meraki-API-Key': p_apiKey, 'Content-Type': 'application/json'}, timeout=(REQUESTS_CONNECT_TIMEOUT, REQUESTS_READ_TIMEOUT))
except:
return None
if r.status_code != requests.codes.ok:
return None
rjson = r.json()
for net in rjson:
if net['name'] == p_netName:
return net['id']
return None
|
def getNetId(p_apiKey, p_orgId, p_shard, p_netName):
global LAST_MERAKI_REQUEST
if (datetime.datetime.now() - LAST_MERAKI_REQUEST).total_seconds() < API_EXEC_DELAY:
time.sleep(API_EXEC_DELAY)
LAST_MERAKI_REQUEST = datetime.datetime.now()
return
requestUrl = 'https://%s/api/v0/organizations/%s/networks' % (p_shard, p_orgId)
try:
r = requests.get(requestUrl, headers={'X-Cisco-Meraki-API-Key': p_apiKey, 'Content-Type': 'application/json'}, timeout=(REQUESTS_CONNECT_TIMEOUT, REQUESTS_READ_TIMEOUT))
except:
return None
if r.status_code != requests.codes.ok:
return None
rjson = r.json()
for net in rjson:
if net['name'] == p_netName:
return net['id']
return None
|
automation-scripts
|
positive
|
def resolve_max_positions(*args):
"""Resolve max position constraints from multiple sources."""
def map_value_update(d1, d2):
updated_value = copy.deepcopy(d1)
for key in d2:
if key not in updated_value:
updated_value[key] = d2[key]
else:
updated_value[key] = min(d1[key], d2[key])
return updated_value
def nullsafe_min(l):
minim = None
for item in l:
if minim is None:
minim = item
elif item is not None and item < minim:
minim = item
return minim
max_positions = None
for arg in args:
if max_positions is None:
max_positions = arg
elif arg is not None:
if isinstance(arg, float) or isinstance(arg, int):
max_positions = min(max_positions, arg)
elif isinstance(arg, dict):
<DeepExtract>
updated_value = copy.deepcopy(max_positions)
for key in arg:
if key not in updated_value:
updated_value[key] = arg[key]
else:
updated_value[key] = min(max_positions[key], arg[key])
max_positions = updated_value
</DeepExtract>
else:
max_positions = tuple(map(nullsafe_min, zip(max_positions, arg)))
return max_positions
|
def resolve_max_positions(*args):
"""Resolve max position constraints from multiple sources."""
def map_value_update(d1, d2):
updated_value = copy.deepcopy(d1)
for key in d2:
if key not in updated_value:
updated_value[key] = d2[key]
else:
updated_value[key] = min(d1[key], d2[key])
return updated_value
def nullsafe_min(l):
minim = None
for item in l:
if minim is None:
minim = item
elif item is not None and item < minim:
minim = item
return minim
max_positions = None
for arg in args:
if max_positions is None:
max_positions = arg
elif arg is not None:
if isinstance(arg, float) or isinstance(arg, int):
max_positions = min(max_positions, arg)
elif isinstance(arg, dict):
updated_value = copy.deepcopy(max_positions)
for key in arg:
if key not in updated_value:
updated_value[key] = arg[key]
else:
updated_value[key] = min(max_positions[key], arg[key])
max_positions = updated_value
else:
max_positions = tuple(map(nullsafe_min, zip(max_positions, arg)))
return max_positions
|
DisCo
|
positive
|
def description(self):
""" Retorna uma breve descricao do portal
"""
<DeepExtract>
ps = self.context.restrictedTraverse('@@plone_portal_state')
portal = ps.portal()
portal = portal
</DeepExtract>
return getattr(portal, 'description', '')
|
def description(self):
""" Retorna uma breve descricao do portal
"""
ps = self.context.restrictedTraverse('@@plone_portal_state')
portal = ps.portal()
portal = portal
return getattr(portal, 'description', '')
|
brasil.gov.portal
|
positive
|
def execute_outgoing_transaction(self, transaction: Transaction, *args, **kwargs):
def error():
transaction.status = Transaction.STATUS.error
transaction.status_message = f'Unable to find user info for transaction {transaction.id}'
transaction.save()
logger.info('fetching user data for transaction')
user_transaction = PolarisUserTransaction.objects.filter(transaction_id=transaction.id).first()
if not user_transaction:
<DeepExtract>
transaction.status = Transaction.STATUS.error
transaction.status_message = f'Unable to find user info for transaction {transaction.id}'
transaction.save()
</DeepExtract>
return
if user_transaction.user:
user = user_transaction.user
else:
user = getattr(user_transaction.account, 'user', None)
if not user:
<DeepExtract>
transaction.status = Transaction.STATUS.error
transaction.status_message = f'Unable to find user info for transaction {transaction.id}'
transaction.save()
</DeepExtract>
return
if transaction.kind in [Transaction.KIND.withdrawal, getattr(Transaction.KIND, 'withdrawal-exchange')]:
operation = settings.OPERATION_WITHDRAWAL
else:
operation = Transaction.KIND.send
if not transaction.amount_fee or transaction.amount_expected != transaction.amount_in:
transaction.amount_fee = calculate_fee({'amount': transaction.amount_in, 'operation': operation, 'asset_code': transaction.asset.code})
if transaction.quote:
(scheme, identifier) = transaction.quote.buy_asset.split(':')
buy_asset = OffChainAsset.objects.get(scheme=scheme, identifier=identifier)
if transaction.quote.type == Quote.TYPE.indicative:
transaction.quote.price = round(get_mock_firm_exchange_price(), transaction.asset.significant_decimals)
transaction.quote.sell_amount = transaction.amount_in
transaction.quote.buy_amount = round(transaction.amount_in / transaction.quote.price, buy_asset.significant_decimals)
elif transaction.amount_in != transaction.quote.sell_amount:
transaction.status = Transaction.STATUS.error
transaction.status_message = 'the amount sent to the anchor does not match the quoted sell amount'
transaction.save()
return
transaction.amount_out = transaction.quote.buy_amount - round(transaction.amount_fee / transaction.quote.price, buy_asset.significant_decimals)
elif not transaction.amount_out:
transaction.amount_out = round(transaction.amount_in - transaction.amount_fee, transaction.asset.significant_decimals)
client = rails.BankAPIClient('fake anchor bank account number')
response = client.send_funds(to_account=user.bank_account_number, amount=transaction.amount_in - transaction.amount_fee)
if response['success']:
logger.info(f'successfully sent mock outgoing transaction {transaction.id}')
transaction.status = Transaction.STATUS.pending_external
else:
error_fields = response.error.fields
info_fields = MySEP31ReceiverIntegration().info(Mock(), transaction.asset)
required_info_update = defaultdict(dict)
for field in error_fields:
if 'name' in field:
required_info_update['receiver'][field] = info_fields['receiver'][field]
elif 'account' in field:
required_info_update['transaction'][field] = info_fields['receiver'][field]
transaction.required_info_update = json.dumps(required_info_update)
transaction.required_info_message = response.error.message
transaction.status = Transaction.STATUS.pending_transaction_info_update
if transaction.quote:
transaction.quote.save()
transaction.save()
|
def execute_outgoing_transaction(self, transaction: Transaction, *args, **kwargs):
def error():
transaction.status = Transaction.STATUS.error
transaction.status_message = f'Unable to find user info for transaction {transaction.id}'
transaction.save()
logger.info('fetching user data for transaction')
user_transaction = PolarisUserTransaction.objects.filter(transaction_id=transaction.id).first()
if not user_transaction:
transaction.status = Transaction.STATUS.error
transaction.status_message = f'Unable to find user info for transaction {transaction.id}'
transaction.save()
return
if user_transaction.user:
user = user_transaction.user
else:
user = getattr(user_transaction.account, 'user', None)
if not user:
transaction.status = Transaction.STATUS.error
transaction.status_message = f'Unable to find user info for transaction {transaction.id}'
transaction.save()
return
if transaction.kind in [Transaction.KIND.withdrawal, getattr(Transaction.KIND, 'withdrawal-exchange')]:
operation = settings.OPERATION_WITHDRAWAL
else:
operation = Transaction.KIND.send
if not transaction.amount_fee or transaction.amount_expected != transaction.amount_in:
transaction.amount_fee = calculate_fee({'amount': transaction.amount_in, 'operation': operation, 'asset_code': transaction.asset.code})
if transaction.quote:
(scheme, identifier) = transaction.quote.buy_asset.split(':')
buy_asset = OffChainAsset.objects.get(scheme=scheme, identifier=identifier)
if transaction.quote.type == Quote.TYPE.indicative:
transaction.quote.price = round(get_mock_firm_exchange_price(), transaction.asset.significant_decimals)
transaction.quote.sell_amount = transaction.amount_in
transaction.quote.buy_amount = round(transaction.amount_in / transaction.quote.price, buy_asset.significant_decimals)
elif transaction.amount_in != transaction.quote.sell_amount:
transaction.status = Transaction.STATUS.error
transaction.status_message = 'the amount sent to the anchor does not match the quoted sell amount'
transaction.save()
return
transaction.amount_out = transaction.quote.buy_amount - round(transaction.amount_fee / transaction.quote.price, buy_asset.significant_decimals)
elif not transaction.amount_out:
transaction.amount_out = round(transaction.amount_in - transaction.amount_fee, transaction.asset.significant_decimals)
client = rails.BankAPIClient('fake anchor bank account number')
response = client.send_funds(to_account=user.bank_account_number, amount=transaction.amount_in - transaction.amount_fee)
if response['success']:
logger.info(f'successfully sent mock outgoing transaction {transaction.id}')
transaction.status = Transaction.STATUS.pending_external
else:
error_fields = response.error.fields
info_fields = MySEP31ReceiverIntegration().info(Mock(), transaction.asset)
required_info_update = defaultdict(dict)
for field in error_fields:
if 'name' in field:
required_info_update['receiver'][field] = info_fields['receiver'][field]
elif 'account' in field:
required_info_update['transaction'][field] = info_fields['receiver'][field]
transaction.required_info_update = json.dumps(required_info_update)
transaction.required_info_message = response.error.message
transaction.status = Transaction.STATUS.pending_transaction_info_update
if transaction.quote:
transaction.quote.save()
transaction.save()
|
django-polaris
|
positive
|
def test_repeat1(self):
add_one = Lambda(lambda x: x + 1)
<DeepExtract>
np.random.seed(0)
net_func = self.create_function(Scalar() >> add_one)
result = net_func(np.array([1]))
np.testing.assert_almost_equal(result, np.array([2]), decimal=5)
</DeepExtract>
<DeepExtract>
np.random.seed(0)
net_func = self.create_function(Scalar() >> Repeat(add_one, 2))
result = net_func(np.array([1]))
np.testing.assert_almost_equal(result, np.array([3]), decimal=5)
</DeepExtract>
<DeepExtract>
np.random.seed(0)
net_func = self.create_function(Scalar() >> Repeat(add_one, 10))
result = net_func(np.array([1]))
np.testing.assert_almost_equal(result, np.array([11]), decimal=5)
</DeepExtract>
|
def test_repeat1(self):
add_one = Lambda(lambda x: x + 1)
np.random.seed(0)
net_func = self.create_function(Scalar() >> add_one)
result = net_func(np.array([1]))
np.testing.assert_almost_equal(result, np.array([2]), decimal=5)
np.random.seed(0)
net_func = self.create_function(Scalar() >> Repeat(add_one, 2))
result = net_func(np.array([1]))
np.testing.assert_almost_equal(result, np.array([3]), decimal=5)
np.random.seed(0)
net_func = self.create_function(Scalar() >> Repeat(add_one, 10))
result = net_func(np.array([1]))
np.testing.assert_almost_equal(result, np.array([11]), decimal=5)
</DeepExtract>
|
deepx
|
positive
|
def add_role_boundaries(iam_role: Role, policy: str) -> None:
"""
Function to set permission boundary onto an IAM role
:param troposphere.iam.Role iam_role: the IAM Role to add the boundary to
:param str policy: the name or ARN of the policy
"""
if not isinstance(iam_role, Role):
raise TypeError(f'{iam_role} is of type', type(iam_role), 'expected', Role)
if isinstance(policy, str):
<DeepExtract>
policy_def = policy
policy_re = re.compile('((^([a-zA-Z0-9-_./]+)$)|(^(arn:aws:iam::(aws|\\d{12}):policy/)[a-zA-Z0-9-_./]+$))')
if not policy_re.match(policy):
raise ValueError(f'policy name {policy} does not match expected regexp', policy_re.pattern)
if isinstance(policy, str) and (not policy.startswith('arn:aws:iam::')):
policy_def = Sub(f'arn:${{AWS::Partition}}:iam::${{AWS::AccountId}}:policy/{policy}')
elif isinstance(policy, (Sub, Ref, Join)):
LOG.debug(f'policy {policy}')
policy = policy_def
</DeepExtract>
if hasattr(iam_role, 'PermissionsBoundary'):
LOG.warning(f'IAM Role {iam_role.title} already has PermissionsBoundary set. Overriding')
setattr(iam_role, 'PermissionsBoundary', policy)
|
def add_role_boundaries(iam_role: Role, policy: str) -> None:
"""
Function to set permission boundary onto an IAM role
:param troposphere.iam.Role iam_role: the IAM Role to add the boundary to
:param str policy: the name or ARN of the policy
"""
if not isinstance(iam_role, Role):
raise TypeError(f'{iam_role} is of type', type(iam_role), 'expected', Role)
if isinstance(policy, str):
policy_def = policy
policy_re = re.compile('((^([a-zA-Z0-9-_./]+)$)|(^(arn:aws:iam::(aws|\\d{12}):policy/)[a-zA-Z0-9-_./]+$))')
if not policy_re.match(policy):
raise ValueError(f'policy name {policy} does not match expected regexp', policy_re.pattern)
if isinstance(policy, str) and (not policy.startswith('arn:aws:iam::')):
policy_def = Sub(f'arn:${{AWS::Partition}}:iam::${{AWS::AccountId}}:policy/{policy}')
elif isinstance(policy, (Sub, Ref, Join)):
LOG.debug(f'policy {policy}')
policy = policy_def
if hasattr(iam_role, 'PermissionsBoundary'):
LOG.warning(f'IAM Role {iam_role.title} already has PermissionsBoundary set. Overriding')
setattr(iam_role, 'PermissionsBoundary', policy)
|
ecs_composex
|
positive
|
def test_triangle_to_triangle(self):
g = [(0, 1), (1, 2), (2, 0)]
emb = {0: (1,), 1: (2,), 2: (0,)}
a = dwave.embedding.EmbeddedStructure(g, emb)
inter_edges = {(0, 1): [(1, 2)], (1, 0): [(2, 1)], (0, 2): [(1, 0)], (2, 0): [(0, 1)], (1, 2): [(2, 0)], (2, 1): [(0, 2)]}
chain_edges = {i: [] for i in range(3)}
<DeepExtract>
for (u, v) in itertools.product(a, a):
if u == v:
check = chain_edges[u]
got = list(a.chain_edges(u))
self.assertEqual(check, got)
else:
check = inter_edges[u, v]
got = list(a.interaction_edges(u, v))
self.assertEqual(check, got)
</DeepExtract>
<DeepExtract>
for (u, v) in itertools.product(a.copy(), a.copy()):
if u == v:
check = chain_edges[u]
got = list(a.copy().chain_edges(u))
self.assertEqual(check, got)
else:
check = inter_edges[u, v]
got = list(a.copy().interaction_edges(u, v))
self.assertEqual(check, got)
</DeepExtract>
|
def test_triangle_to_triangle(self):
g = [(0, 1), (1, 2), (2, 0)]
emb = {0: (1,), 1: (2,), 2: (0,)}
a = dwave.embedding.EmbeddedStructure(g, emb)
inter_edges = {(0, 1): [(1, 2)], (1, 0): [(2, 1)], (0, 2): [(1, 0)], (2, 0): [(0, 1)], (1, 2): [(2, 0)], (2, 1): [(0, 2)]}
chain_edges = {i: [] for i in range(3)}
for (u, v) in itertools.product(a, a):
if u == v:
check = chain_edges[u]
got = list(a.chain_edges(u))
self.assertEqual(check, got)
else:
check = inter_edges[u, v]
got = list(a.interaction_edges(u, v))
self.assertEqual(check, got)
for (u, v) in itertools.product(a.copy(), a.copy()):
if u == v:
check = chain_edges[u]
got = list(a.copy().chain_edges(u))
self.assertEqual(check, got)
else:
check = inter_edges[u, v]
got = list(a.copy().interaction_edges(u, v))
self.assertEqual(check, got)
</DeepExtract>
|
dwave-system
|
positive
|
def upsert(self, conflict_target: ConflictTarget, fields: dict, index_predicate: Optional[Union[Expression, Q, str]]=None, using: Optional[str]=None, update_condition: Optional[Union[Expression, Q, str]]=None) -> int:
"""Creates a new record or updates the existing one with the specified
data.
Arguments:
conflict_target:
Fields to pass into the ON CONFLICT clause.
fields:
Fields to insert/update.
index_predicate:
The index predicate to satisfy an arbiter partial index (i.e. what partial index to use for checking
conflicts)
using:
The name of the database connection to
use for this query.
update_condition:
Only update if this SQL expression evaluates to true.
Returns:
The primary key of the row that was created/updated.
"""
<DeepExtract>
self.conflict_target = conflict_target
self.conflict_action = ConflictAction.UPDATE
self.conflict_update_condition = update_condition
self.index_predicate = index_predicate
return self
</DeepExtract>
return self.insert(**fields, using=using)
|
def upsert(self, conflict_target: ConflictTarget, fields: dict, index_predicate: Optional[Union[Expression, Q, str]]=None, using: Optional[str]=None, update_condition: Optional[Union[Expression, Q, str]]=None) -> int:
"""Creates a new record or updates the existing one with the specified
data.
Arguments:
conflict_target:
Fields to pass into the ON CONFLICT clause.
fields:
Fields to insert/update.
index_predicate:
The index predicate to satisfy an arbiter partial index (i.e. what partial index to use for checking
conflicts)
using:
The name of the database connection to
use for this query.
update_condition:
Only update if this SQL expression evaluates to true.
Returns:
The primary key of the row that was created/updated.
"""
self.conflict_target = conflict_target
self.conflict_action = ConflictAction.UPDATE
self.conflict_update_condition = update_condition
self.index_predicate = index_predicate
return self
return self.insert(**fields, using=using)
|
django-postgres-extra
|
positive
|
def _checkDigits(self, data):
try:
for (field, cd) in data:
<DeepExtract>
cpt = 0
res = 0
for x in field:
tmp = self._weight[str(x)] * self._weighting[cpt % 3]
res += tmp
cpt += 1
res = str(res % 10)
</DeepExtract>
if str(res) != str(cd):
return False
return True
except KeyError:
return False
|
def _checkDigits(self, data):
try:
for (field, cd) in data:
cpt = 0
res = 0
for x in field:
tmp = self._weight[str(x)] * self._weighting[cpt % 3]
res += tmp
cpt += 1
res = str(res % 10)
if str(res) != str(cd):
return False
return True
except KeyError:
return False
|
epassportviewer
|
positive
|
def random_sent(index):
"""
Get one sample from corpus consisting of two sentences. With prob. 50% these are two subsequent sentences
from one doc. With 50% the second sentence will be a random one from another doc.
:param index: int, index of sample.
:return: (str, str, int), sentence 1, sentence 2, isNextSentence Label
"""
<DeepExtract>
t1 = ''
t2 = ''
assert index < self.corpus_lines
if self.on_memory:
sample = self.sample_to_doc[index]
t1 = self.all_docs[sample['doc_id']][sample['line']]
t2 = self.all_docs[sample['doc_id']][sample['line'] + 1]
self.current_doc = sample['doc_id']
(t1, t2) = (t1, t2)
else:
if self.line_buffer is None:
while t1 == '':
t1 = next(self.file).strip()
t2 = next(self.file).strip()
else:
t1 = self.line_buffer
t2 = next(self.file).strip()
while t2 == '' or t1 == '':
t1 = next(self.file).strip()
t2 = next(self.file).strip()
self.current_doc = self.current_doc + 1
self.line_buffer = t2
assert t1 != ''
assert t2 != ''
(t1, t2) = (t1, t2)
</DeepExtract>
if random.random() > 0.5:
label = 0
else:
<DeepExtract>
for _ in range(10):
if self.on_memory:
rand_doc_idx = random.randint(0, len(self.all_docs) - 1)
rand_doc = self.all_docs[rand_doc_idx]
line = rand_doc[random.randrange(len(rand_doc))]
else:
rand_index = random.randint(1, self.corpus_lines if self.corpus_lines < 1000 else 1000)
for _ in range(rand_index):
line = self.get_next_line()
if self.current_random_doc != self.current_doc:
break
t2 = line
</DeepExtract>
label = 1
assert len(t1) > 0
assert len(t2) > 0
return (t1, t2, label)
|
def random_sent(index):
"""
Get one sample from corpus consisting of two sentences. With prob. 50% these are two subsequent sentences
from one doc. With 50% the second sentence will be a random one from another doc.
:param index: int, index of sample.
:return: (str, str, int), sentence 1, sentence 2, isNextSentence Label
"""
t1 = ''
t2 = ''
assert index < self.corpus_lines
if self.on_memory:
sample = self.sample_to_doc[index]
t1 = self.all_docs[sample['doc_id']][sample['line']]
t2 = self.all_docs[sample['doc_id']][sample['line'] + 1]
self.current_doc = sample['doc_id']
(t1, t2) = (t1, t2)
else:
if self.line_buffer is None:
while t1 == '':
t1 = next(self.file).strip()
t2 = next(self.file).strip()
else:
t1 = self.line_buffer
t2 = next(self.file).strip()
while t2 == '' or t1 == '':
t1 = next(self.file).strip()
t2 = next(self.file).strip()
self.current_doc = self.current_doc + 1
self.line_buffer = t2
assert t1 != ''
assert t2 != ''
(t1, t2) = (t1, t2)
if random.random() > 0.5:
label = 0
else:
for _ in range(10):
if self.on_memory:
rand_doc_idx = random.randint(0, len(self.all_docs) - 1)
rand_doc = self.all_docs[rand_doc_idx]
line = rand_doc[random.randrange(len(rand_doc))]
else:
rand_index = random.randint(1, self.corpus_lines if self.corpus_lines < 1000 else 1000)
for _ in range(rand_index):
line = self.get_next_line()
if self.current_random_doc != self.current_doc:
break
t2 = line
label = 1
assert len(t1) > 0
assert len(t2) > 0
return (t1, t2, label)
|
bert_on_stilts
|
positive
|
def _create_attribute_node(node, context, step):
<DeepExtract>
if not step.node_test.prefix:
nsmap = {}
ns_uri = None
node_name = step.node_test.name
node_xpath = '@%s' % node_name
else:
if 'namespaces' in context and step.node_test.prefix in context['namespaces']:
ns_uri = context['namespaces'][step.node_test.prefix]
else:
ns_uri = None
node_xpath = '@%s:%s' % (step.node_test.prefix, step.node_test.name)
node_name = '{%s}%s' % (ns_uri, step.node_test.name)
nsmap = {step.node_test.prefix: ns_uri}
(node_name, node_xpath, nsmap) = (node_name, node_xpath, nsmap)
</DeepExtract>
node.set(node_name, '')
result = node.xpath(node_xpath, namespaces=nsmap)
return result[0]
|
def _create_attribute_node(node, context, step):
if not step.node_test.prefix:
nsmap = {}
ns_uri = None
node_name = step.node_test.name
node_xpath = '@%s' % node_name
else:
if 'namespaces' in context and step.node_test.prefix in context['namespaces']:
ns_uri = context['namespaces'][step.node_test.prefix]
else:
ns_uri = None
node_xpath = '@%s:%s' % (step.node_test.prefix, step.node_test.name)
node_name = '{%s}%s' % (ns_uri, step.node_test.name)
nsmap = {step.node_test.prefix: ns_uri}
(node_name, node_xpath, nsmap) = (node_name, node_xpath, nsmap)
node.set(node_name, '')
result = node.xpath(node_xpath, namespaces=nsmap)
return result[0]
|
eulxml
|
positive
|
@pytest.mark.parametrize('seed', (10, 17))
@pytest.mark.parametrize('n_categories', (2, 5))
@pytest.mark.parametrize('weight_distribution', ['uniform', 'gaussian'])
@pytest.mark.parametrize('intercept_distribution', ['uniform', 'gaussian'])
def test_mixed_type_independence(self, seed, n_categories, weight_distribution, intercept_distribution):
"""
Test whether the relation is accurate, implicitly tests sequence of
nodes.
"""
np.random.seed(seed)
sm = StructureModel()
nodes = list((str(x) for x in range(6)))
np.random.shuffle(nodes)
sm.add_nodes_from(nodes)
sm.add_weighted_edges_from([('0', '1', 10)])
sm.add_weighted_edges_from([('2', '4', None)])
sm.add_weighted_edges_from([('2', '6', 100)])
schema = {'0': 'binary', '1': f'categorical:{n_categories}', '2': 'binary', '4': 'continuous', '5': f'categorical:{n_categories}', '6': 'count'}
df = sem_generator(graph=sm, schema=schema, default_type='continuous', distributions={'weight': weight_distribution, 'intercept': intercept_distribution, 'count': 0.05}, noise_std=2, n_samples=100000, intercept=True, seed=seed)
atol = 0.02
(c, _) = max(((i, np.abs(df[f'1_{i}'].mean() - 1 / n_categories)) for i in range(n_categories)), key=operator.itemgetter(1))
<DeepExtract>
if isinstance(df, pd.DataFrame):
marginal_0 = df['0'].mean()
marginal_1 = df[f'1_{c}'].mean()
joint_proba = (df['0'] * df[f'1_{c}']).mean()
else:
marginal_0 = df[:, '0'].mean()
marginal_1 = df[:, f'1_{c}'].mean()
joint_proba = (df[:, '0'] * df[:, f'1_{c}']).mean()
factored_proba = marginal_0 * marginal_1
(joint_proba, factored_proba) = (joint_proba, factored_proba)
</DeepExtract>
assert not np.isclose(joint_proba, factored_proba, rtol=0, atol=atol)
assert not np.isclose(df['4'].mean(), df['4'][df['2'] == 1].mean(), rtol=0, atol=atol)
assert not np.isclose(df.loc[df['2'] == 0, '6'].mean(), df.loc[df['2'] == 1, '6'].mean(), rtol=0, atol=atol)
tol = 0.2
(c, _) = max(((i, np.abs(df[f'1_{i}'].mean() - 1 / n_categories)) for i in range(n_categories)), key=operator.itemgetter(1))
<DeepExtract>
if isinstance(df, pd.DataFrame):
marginal_0 = df['0'].mean()
marginal_1 = df[f'5_{c}'].mean()
joint_proba = (df['0'] * df[f'5_{c}']).mean()
else:
marginal_0 = df[:, '0'].mean()
marginal_1 = df[:, f'5_{c}'].mean()
joint_proba = (df[:, '0'] * df[:, f'5_{c}']).mean()
factored_proba = marginal_0 * marginal_1
(joint_proba, factored_proba) = (joint_proba, factored_proba)
</DeepExtract>
assert np.isclose(joint_proba, factored_proba, rtol=tol, atol=0)
<DeepExtract>
if isinstance(df, pd.DataFrame):
marginal_0 = df['0'].mean()
marginal_1 = df['2'].mean()
joint_proba = (df['0'] * df['2']).mean()
else:
marginal_0 = df[:, '0'].mean()
marginal_1 = df[:, '2'].mean()
joint_proba = (df[:, '0'] * df[:, '2']).mean()
factored_proba = marginal_0 * marginal_1
(joint_proba, factored_proba) = (joint_proba, factored_proba)
</DeepExtract>
assert np.isclose(joint_proba, factored_proba, rtol=tol, atol=0)
(c, _) = max(((i, np.abs(df[f'1_{i}'].mean() - 1 / n_categories)) for i in range(n_categories)), key=operator.itemgetter(1))
(d, _) = max(((d, np.abs(df[f'5_{d}'].mean() - 1 / n_categories)) for d in range(n_categories)), key=operator.itemgetter(1))
<DeepExtract>
if isinstance(df, pd.DataFrame):
marginal_0 = df[f'1_{d}'].mean()
marginal_1 = df[f'5_{c}'].mean()
joint_proba = (df[f'1_{d}'] * df[f'5_{c}']).mean()
else:
marginal_0 = df[:, f'1_{d}'].mean()
marginal_1 = df[:, f'5_{c}'].mean()
joint_proba = (df[:, f'1_{d}'] * df[:, f'5_{c}']).mean()
factored_proba = marginal_0 * marginal_1
(joint_proba, factored_proba) = (joint_proba, factored_proba)
</DeepExtract>
assert np.isclose(joint_proba, factored_proba, rtol=tol, atol=0)
assert np.isclose(df[['3', '4']].corr().values[0, 1], 0, atol=tol)
|
@pytest.mark.parametrize('seed', (10, 17))
@pytest.mark.parametrize('n_categories', (2, 5))
@pytest.mark.parametrize('weight_distribution', ['uniform', 'gaussian'])
@pytest.mark.parametrize('intercept_distribution', ['uniform', 'gaussian'])
def test_mixed_type_independence(self, seed, n_categories, weight_distribution, intercept_distribution):
"""
Test whether the relation is accurate, implicitly tests sequence of
nodes.
"""
np.random.seed(seed)
sm = StructureModel()
nodes = list((str(x) for x in range(6)))
np.random.shuffle(nodes)
sm.add_nodes_from(nodes)
sm.add_weighted_edges_from([('0', '1', 10)])
sm.add_weighted_edges_from([('2', '4', None)])
sm.add_weighted_edges_from([('2', '6', 100)])
schema = {'0': 'binary', '1': f'categorical:{n_categories}', '2': 'binary', '4': 'continuous', '5': f'categorical:{n_categories}', '6': 'count'}
df = sem_generator(graph=sm, schema=schema, default_type='continuous', distributions={'weight': weight_distribution, 'intercept': intercept_distribution, 'count': 0.05}, noise_std=2, n_samples=100000, intercept=True, seed=seed)
atol = 0.02
(c, _) = max(((i, np.abs(df[f'1_{i}'].mean() - 1 / n_categories)) for i in range(n_categories)), key=operator.itemgetter(1))
if isinstance(df, pd.DataFrame):
marginal_0 = df['0'].mean()
marginal_1 = df[f'1_{c}'].mean()
joint_proba = (df['0'] * df[f'1_{c}']).mean()
else:
marginal_0 = df[:, '0'].mean()
marginal_1 = df[:, f'1_{c}'].mean()
joint_proba = (df[:, '0'] * df[:, f'1_{c}']).mean()
factored_proba = marginal_0 * marginal_1
(joint_proba, factored_proba) = (joint_proba, factored_proba)
assert not np.isclose(joint_proba, factored_proba, rtol=0, atol=atol)
assert not np.isclose(df['4'].mean(), df['4'][df['2'] == 1].mean(), rtol=0, atol=atol)
assert not np.isclose(df.loc[df['2'] == 0, '6'].mean(), df.loc[df['2'] == 1, '6'].mean(), rtol=0, atol=atol)
tol = 0.2
(c, _) = max(((i, np.abs(df[f'1_{i}'].mean() - 1 / n_categories)) for i in range(n_categories)), key=operator.itemgetter(1))
if isinstance(df, pd.DataFrame):
marginal_0 = df['0'].mean()
marginal_1 = df[f'5_{c}'].mean()
joint_proba = (df['0'] * df[f'5_{c}']).mean()
else:
marginal_0 = df[:, '0'].mean()
marginal_1 = df[:, f'5_{c}'].mean()
joint_proba = (df[:, '0'] * df[:, f'5_{c}']).mean()
factored_proba = marginal_0 * marginal_1
(joint_proba, factored_proba) = (joint_proba, factored_proba)
assert np.isclose(joint_proba, factored_proba, rtol=tol, atol=0)
if isinstance(df, pd.DataFrame):
marginal_0 = df['0'].mean()
marginal_1 = df['2'].mean()
joint_proba = (df['0'] * df['2']).mean()
else:
marginal_0 = df[:, '0'].mean()
marginal_1 = df[:, '2'].mean()
joint_proba = (df[:, '0'] * df[:, '2']).mean()
factored_proba = marginal_0 * marginal_1
(joint_proba, factored_proba) = (joint_proba, factored_proba)
assert np.isclose(joint_proba, factored_proba, rtol=tol, atol=0)
(c, _) = max(((i, np.abs(df[f'1_{i}'].mean() - 1 / n_categories)) for i in range(n_categories)), key=operator.itemgetter(1))
(d, _) = max(((d, np.abs(df[f'5_{d}'].mean() - 1 / n_categories)) for d in range(n_categories)), key=operator.itemgetter(1))
if isinstance(df, pd.DataFrame):
marginal_0 = df[f'1_{d}'].mean()
marginal_1 = df[f'5_{c}'].mean()
joint_proba = (df[f'1_{d}'] * df[f'5_{c}']).mean()
else:
marginal_0 = df[:, f'1_{d}'].mean()
marginal_1 = df[:, f'5_{c}'].mean()
joint_proba = (df[:, f'1_{d}'] * df[:, f'5_{c}']).mean()
factored_proba = marginal_0 * marginal_1
(joint_proba, factored_proba) = (joint_proba, factored_proba)
assert np.isclose(joint_proba, factored_proba, rtol=tol, atol=0)
assert np.isclose(df[['3', '4']].corr().values[0, 1], 0, atol=tol)
|
causalnex
|
positive
|
def test_only_amiv_token_auth(self):
"""Make sure functions is called with auth as arg for AmivTokenAuth."""
dec = only_amiv_token_auth
auth = self.app.config['DOMAIN']['fake']['authentication']
for resource in ['fake_nothing', 'fake_no_amiv']:
<DeepExtract>
self.was_called = False
self.call_args = []
if not [resource]:
[resource] = []
decorated = dec(self.test_func)
with self._init_context(**{}):
decorated(*[resource])
self.assertEqual(False, self.was_called)
if call_args:
self.assertItemsEqual(call_args, self.call_args)
</DeepExtract>
<DeepExtract>
self.was_called = False
self.call_args = []
if not ['fake']:
['fake'] = []
decorated = dec(self.test_func)
with self._init_context(**{}):
decorated(*['fake'])
self.assertEqual(True, self.was_called)
if [auth, 'fake']:
self.assertItemsEqual([auth, 'fake'], self.call_args)
</DeepExtract>
|
def test_only_amiv_token_auth(self):
"""Make sure functions is called with auth as arg for AmivTokenAuth."""
dec = only_amiv_token_auth
auth = self.app.config['DOMAIN']['fake']['authentication']
for resource in ['fake_nothing', 'fake_no_amiv']:
self.was_called = False
self.call_args = []
if not [resource]:
[resource] = []
decorated = dec(self.test_func)
with self._init_context(**{}):
decorated(*[resource])
self.assertEqual(False, self.was_called)
if call_args:
self.assertItemsEqual(call_args, self.call_args)
self.was_called = False
self.call_args = []
if not ['fake']:
['fake'] = []
decorated = dec(self.test_func)
with self._init_context(**{}):
decorated(*['fake'])
self.assertEqual(True, self.was_called)
if [auth, 'fake']:
self.assertItemsEqual([auth, 'fake'], self.call_args)
</DeepExtract>
|
amivapi
|
positive
|
def _check_container_sparse_pre_checkout(self, overall, tree):
self.assertEqual(overall, 0)
<DeepExtract>
name = './{0}/simp_tag'.format(directory)
self._check_generic_empty_default_required(tree, name)
</DeepExtract>
<DeepExtract>
name = './{0}/simp_sparse'.format(directory)
self._check_generic_empty_default_required(tree, name)
</DeepExtract>
|
def _check_container_sparse_pre_checkout(self, overall, tree):
self.assertEqual(overall, 0)
name = './{0}/simp_tag'.format(directory)
self._check_generic_empty_default_required(tree, name)
name = './{0}/simp_sparse'.format(directory)
self._check_generic_empty_default_required(tree, name)
</DeepExtract>
|
CESM
|
positive
|
def get_scalar_props(self, armature):
bones = armature.data.edit_bones if armature.mode == 'EDIT' else armature.data.bones
self.is_editing = True
self.bones.clear()
self.constraints.clear()
self.drivers.clear()
if bones.active:
self.target.end = bones.active.name
<DeepExtract>
(parent, parents) = (self.get(self.target.end), [])
while len(parents) < self.target.length:
parents.append(parent)
parent = parent.parent if parent else None
parents = parents
</DeepExtract>
for parent in reversed(parents):
bone = self.bones.add()
if parent:
bone.source = parent.name
copy_rot = self.constraints.add()
copy_rot.flavour = 'COPY_ROTATION'
copy_sca = self.constraints.add()
copy_sca.flavour = 'COPY_SCALE'
limit_sca = self.constraints.add()
limit_sca.flavour = 'LIMIT_SCALE'
limit_rot = self.constraints.add()
limit_rot.flavour = 'LIMIT_ROTATION'
limit_rot = self.constraints.add()
limit_rot.flavour = 'LIMIT_ROTATION'
for _ in [self.bones[-1].gizmo, self.bones[-1].stretch]:
self.constraints.add()
self.constraints.add()
for _ in [self.bones[0].gizmo, self.bones[0].stretch]:
self.constraints.add()
self.constraints.add()
ik_settings = ['ik_stretch', 'lock_ik_x', 'lock_ik_y', 'lock_ik_z', 'ik_stiffness_x', 'ik_stiffness_y', 'ik_stiffness_z', 'use_ik_limit_x', 'ik_min_x', 'ik_max_x', 'use_ik_limit_y', 'ik_min_y', 'ik_max_y', 'use_ik_limit_z', 'ik_min_z', 'ik_max_z']
for bone in self.bones:
for _ in [bone.gizmo, bone.stretch]:
for _ in ik_settings:
self.drivers.add()
for _ in [b.gizmo for b in self.bones]:
self.drivers.add()
self.is_editing = False
|
def get_scalar_props(self, armature):
bones = armature.data.edit_bones if armature.mode == 'EDIT' else armature.data.bones
self.is_editing = True
self.bones.clear()
self.constraints.clear()
self.drivers.clear()
if bones.active:
self.target.end = bones.active.name
(parent, parents) = (self.get(self.target.end), [])
while len(parents) < self.target.length:
parents.append(parent)
parent = parent.parent if parent else None
parents = parents
for parent in reversed(parents):
bone = self.bones.add()
if parent:
bone.source = parent.name
copy_rot = self.constraints.add()
copy_rot.flavour = 'COPY_ROTATION'
copy_sca = self.constraints.add()
copy_sca.flavour = 'COPY_SCALE'
limit_sca = self.constraints.add()
limit_sca.flavour = 'LIMIT_SCALE'
limit_rot = self.constraints.add()
limit_rot.flavour = 'LIMIT_ROTATION'
limit_rot = self.constraints.add()
limit_rot.flavour = 'LIMIT_ROTATION'
for _ in [self.bones[-1].gizmo, self.bones[-1].stretch]:
self.constraints.add()
self.constraints.add()
for _ in [self.bones[0].gizmo, self.bones[0].stretch]:
self.constraints.add()
self.constraints.add()
ik_settings = ['ik_stretch', 'lock_ik_x', 'lock_ik_y', 'lock_ik_z', 'ik_stiffness_x', 'ik_stiffness_y', 'ik_stiffness_z', 'use_ik_limit_x', 'ik_min_x', 'ik_max_x', 'use_ik_limit_y', 'ik_min_y', 'ik_max_y', 'use_ik_limit_z', 'ik_min_z', 'ik_max_z']
for bone in self.bones:
for _ in [bone.gizmo, bone.stretch]:
for _ in ik_settings:
self.drivers.add()
for _ in [b.gizmo for b in self.bones]:
self.drivers.add()
self.is_editing = False
|
B.L.E.N.D
|
positive
|
def geteditstats(forest1, forest2):
"""Recursively get edit distance."""
try:
return geteditstats.mem[forest1, forest2]
except KeyError:
pass
flatforest1 = forest1 if forest1 == () else forest1[:-1] + tuple(forest1[-1][:])
flatforest2 = forest2 if forest2 == () else forest2[:-1] + tuple(forest2[-1][:])
if forest2 == ():
if forest1 == ():
result = EditStats(0, 0, ())
else:
<DeepExtract>
try:
tmp = geteditstats.mem[flatforest1, ()]
except KeyError:
pass
flatforest1 = flatforest1 if flatforest1 == () else flatforest1[:-1] + tuple(flatforest1[-1][:])
flatforest2 = () if () == () else ()[:-1] + tuple(()[-1][:])
if () == ():
if flatforest1 == ():
result = EditStats(0, 0, ())
else:
tmp = geteditstats(flatforest1, ())
result = EditStats(tmp.distance + 1, tmp.matched, (('D', flatforest1[-1], None),) + tmp.editscript)
elif flatforest1 == ():
tmp = geteditstats((), flatforest2)
result = EditStats(tmp.distance + 1, tmp.matched, (('I', None, ()[-1]),) + tmp.editscript)
else:
node1 = flatforest1[-1]
node2 = ()[-1]
tmp = geteditstats(flatforest1, ())
deletestats = EditStats(tmp.distance + 1, tmp.matched, (('D', node1, None),) + tmp.editscript)
tmp = geteditstats(flatforest1, flatforest2)
insertstats = EditStats(tmp.distance + 1, tmp.matched, (('I', None, node2),) + tmp.editscript)
matchorswapstats = geteditstats(tuple(node1[:]), tuple(node2[:])) + geteditstats(flatforest1[:-1], ()[:-1])
if node1.label == node2.label:
matchorswapstats = EditStats(matchorswapstats.distance, matchorswapstats.matched + 1, matchorswapstats.editscript)
else:
matchorswapstats = EditStats(matchorswapstats.distance + 1, matchorswapstats.matched, (('S', node1, node2),) + matchorswapstats.editscript)
result = min(deletestats, insertstats, matchorswapstats)
geteditstats.mem[flatforest1, ()] = result
tmp = result
</DeepExtract>
result = EditStats(tmp.distance + 1, tmp.matched, (('D', forest1[-1], None),) + tmp.editscript)
elif forest1 == ():
<DeepExtract>
try:
tmp = geteditstats.mem[(), flatforest2]
except KeyError:
pass
flatforest1 = () if () == () else ()[:-1] + tuple(()[-1][:])
flatforest2 = flatforest2 if flatforest2 == () else flatforest2[:-1] + tuple(flatforest2[-1][:])
if flatforest2 == ():
if () == ():
result = EditStats(0, 0, ())
else:
tmp = geteditstats(flatforest1, ())
result = EditStats(tmp.distance + 1, tmp.matched, (('D', ()[-1], None),) + tmp.editscript)
elif () == ():
tmp = geteditstats((), flatforest2)
result = EditStats(tmp.distance + 1, tmp.matched, (('I', None, flatforest2[-1]),) + tmp.editscript)
else:
node1 = ()[-1]
node2 = flatforest2[-1]
tmp = geteditstats(flatforest1, flatforest2)
deletestats = EditStats(tmp.distance + 1, tmp.matched, (('D', node1, None),) + tmp.editscript)
tmp = geteditstats((), flatforest2)
insertstats = EditStats(tmp.distance + 1, tmp.matched, (('I', None, node2),) + tmp.editscript)
matchorswapstats = geteditstats(tuple(node1[:]), tuple(node2[:])) + geteditstats(()[:-1], flatforest2[:-1])
if node1.label == node2.label:
matchorswapstats = EditStats(matchorswapstats.distance, matchorswapstats.matched + 1, matchorswapstats.editscript)
else:
matchorswapstats = EditStats(matchorswapstats.distance + 1, matchorswapstats.matched, (('S', node1, node2),) + matchorswapstats.editscript)
result = min(deletestats, insertstats, matchorswapstats)
geteditstats.mem[(), flatforest2] = result
tmp = result
</DeepExtract>
result = EditStats(tmp.distance + 1, tmp.matched, (('I', None, forest2[-1]),) + tmp.editscript)
else:
node1 = forest1[-1]
node2 = forest2[-1]
<DeepExtract>
try:
tmp = geteditstats.mem[flatforest1, forest2]
except KeyError:
pass
flatforest1 = flatforest1 if flatforest1 == () else flatforest1[:-1] + tuple(flatforest1[-1][:])
flatforest2 = forest2 if forest2 == () else forest2[:-1] + tuple(forest2[-1][:])
if forest2 == ():
if flatforest1 == ():
result = EditStats(0, 0, ())
else:
tmp = geteditstats(flatforest1, ())
result = EditStats(tmp.distance + 1, tmp.matched, (('D', flatforest1[-1], None),) + tmp.editscript)
elif flatforest1 == ():
tmp = geteditstats((), flatforest2)
result = EditStats(tmp.distance + 1, tmp.matched, (('I', None, forest2[-1]),) + tmp.editscript)
else:
node1 = flatforest1[-1]
node2 = forest2[-1]
tmp = geteditstats(flatforest1, forest2)
deletestats = EditStats(tmp.distance + 1, tmp.matched, (('D', node1, None),) + tmp.editscript)
tmp = geteditstats(flatforest1, flatforest2)
insertstats = EditStats(tmp.distance + 1, tmp.matched, (('I', None, node2),) + tmp.editscript)
matchorswapstats = geteditstats(tuple(node1[:]), tuple(node2[:])) + geteditstats(flatforest1[:-1], forest2[:-1])
if node1.label == node2.label:
matchorswapstats = EditStats(matchorswapstats.distance, matchorswapstats.matched + 1, matchorswapstats.editscript)
else:
matchorswapstats = EditStats(matchorswapstats.distance + 1, matchorswapstats.matched, (('S', node1, node2),) + matchorswapstats.editscript)
result = min(deletestats, insertstats, matchorswapstats)
geteditstats.mem[flatforest1, forest2] = result
tmp = result
</DeepExtract>
deletestats = EditStats(tmp.distance + 1, tmp.matched, (('D', node1, None),) + tmp.editscript)
<DeepExtract>
try:
tmp = geteditstats.mem[forest1, flatforest2]
except KeyError:
pass
flatforest1 = forest1 if forest1 == () else forest1[:-1] + tuple(forest1[-1][:])
flatforest2 = flatforest2 if flatforest2 == () else flatforest2[:-1] + tuple(flatforest2[-1][:])
if flatforest2 == ():
if forest1 == ():
result = EditStats(0, 0, ())
else:
tmp = geteditstats(flatforest1, ())
result = EditStats(tmp.distance + 1, tmp.matched, (('D', forest1[-1], None),) + tmp.editscript)
elif forest1 == ():
tmp = geteditstats((), flatforest2)
result = EditStats(tmp.distance + 1, tmp.matched, (('I', None, flatforest2[-1]),) + tmp.editscript)
else:
node1 = forest1[-1]
node2 = flatforest2[-1]
tmp = geteditstats(flatforest1, flatforest2)
deletestats = EditStats(tmp.distance + 1, tmp.matched, (('D', node1, None),) + tmp.editscript)
tmp = geteditstats(forest1, flatforest2)
insertstats = EditStats(tmp.distance + 1, tmp.matched, (('I', None, node2),) + tmp.editscript)
matchorswapstats = geteditstats(tuple(node1[:]), tuple(node2[:])) + geteditstats(forest1[:-1], flatforest2[:-1])
if node1.label == node2.label:
matchorswapstats = EditStats(matchorswapstats.distance, matchorswapstats.matched + 1, matchorswapstats.editscript)
else:
matchorswapstats = EditStats(matchorswapstats.distance + 1, matchorswapstats.matched, (('S', node1, node2),) + matchorswapstats.editscript)
result = min(deletestats, insertstats, matchorswapstats)
geteditstats.mem[forest1, flatforest2] = result
tmp = result
</DeepExtract>
insertstats = EditStats(tmp.distance + 1, tmp.matched, (('I', None, node2),) + tmp.editscript)
matchorswapstats = geteditstats(tuple(node1[:]), tuple(node2[:])) + geteditstats(forest1[:-1], forest2[:-1])
if node1.label == node2.label:
matchorswapstats = EditStats(matchorswapstats.distance, matchorswapstats.matched + 1, matchorswapstats.editscript)
else:
matchorswapstats = EditStats(matchorswapstats.distance + 1, matchorswapstats.matched, (('S', node1, node2),) + matchorswapstats.editscript)
result = min(deletestats, insertstats, matchorswapstats)
geteditstats.mem[forest1, forest2] = result
return result
|
def geteditstats(forest1, forest2):
"""Recursively get edit distance."""
try:
return geteditstats.mem[forest1, forest2]
except KeyError:
pass
flatforest1 = forest1 if forest1 == () else forest1[:-1] + tuple(forest1[-1][:])
flatforest2 = forest2 if forest2 == () else forest2[:-1] + tuple(forest2[-1][:])
if forest2 == ():
if forest1 == ():
result = EditStats(0, 0, ())
else:
try:
tmp = geteditstats.mem[flatforest1, ()]
except KeyError:
pass
flatforest1 = flatforest1 if flatforest1 == () else flatforest1[:-1] + tuple(flatforest1[-1][:])
flatforest2 = () if () == () else ()[:-1] + tuple(()[-1][:])
if () == ():
if flatforest1 == ():
result = EditStats(0, 0, ())
else:
tmp = geteditstats(flatforest1, ())
result = EditStats(tmp.distance + 1, tmp.matched, (('D', flatforest1[-1], None),) + tmp.editscript)
elif flatforest1 == ():
tmp = geteditstats((), flatforest2)
result = EditStats(tmp.distance + 1, tmp.matched, (('I', None, ()[-1]),) + tmp.editscript)
else:
node1 = flatforest1[-1]
node2 = ()[-1]
tmp = geteditstats(flatforest1, ())
deletestats = EditStats(tmp.distance + 1, tmp.matched, (('D', node1, None),) + tmp.editscript)
tmp = geteditstats(flatforest1, flatforest2)
insertstats = EditStats(tmp.distance + 1, tmp.matched, (('I', None, node2),) + tmp.editscript)
matchorswapstats = geteditstats(tuple(node1[:]), tuple(node2[:])) + geteditstats(flatforest1[:-1], ()[:-1])
if node1.label == node2.label:
matchorswapstats = EditStats(matchorswapstats.distance, matchorswapstats.matched + 1, matchorswapstats.editscript)
else:
matchorswapstats = EditStats(matchorswapstats.distance + 1, matchorswapstats.matched, (('S', node1, node2),) + matchorswapstats.editscript)
result = min(deletestats, insertstats, matchorswapstats)
geteditstats.mem[flatforest1, ()] = result
tmp = result
result = EditStats(tmp.distance + 1, tmp.matched, (('D', forest1[-1], None),) + tmp.editscript)
elif forest1 == ():
try:
tmp = geteditstats.mem[(), flatforest2]
except KeyError:
pass
flatforest1 = () if () == () else ()[:-1] + tuple(()[-1][:])
flatforest2 = flatforest2 if flatforest2 == () else flatforest2[:-1] + tuple(flatforest2[-1][:])
if flatforest2 == ():
if () == ():
result = EditStats(0, 0, ())
else:
tmp = geteditstats(flatforest1, ())
result = EditStats(tmp.distance + 1, tmp.matched, (('D', ()[-1], None),) + tmp.editscript)
elif () == ():
tmp = geteditstats((), flatforest2)
result = EditStats(tmp.distance + 1, tmp.matched, (('I', None, flatforest2[-1]),) + tmp.editscript)
else:
node1 = ()[-1]
node2 = flatforest2[-1]
tmp = geteditstats(flatforest1, flatforest2)
deletestats = EditStats(tmp.distance + 1, tmp.matched, (('D', node1, None),) + tmp.editscript)
tmp = geteditstats((), flatforest2)
insertstats = EditStats(tmp.distance + 1, tmp.matched, (('I', None, node2),) + tmp.editscript)
matchorswapstats = geteditstats(tuple(node1[:]), tuple(node2[:])) + geteditstats(()[:-1], flatforest2[:-1])
if node1.label == node2.label:
matchorswapstats = EditStats(matchorswapstats.distance, matchorswapstats.matched + 1, matchorswapstats.editscript)
else:
matchorswapstats = EditStats(matchorswapstats.distance + 1, matchorswapstats.matched, (('S', node1, node2),) + matchorswapstats.editscript)
result = min(deletestats, insertstats, matchorswapstats)
geteditstats.mem[(), flatforest2] = result
tmp = result
result = EditStats(tmp.distance + 1, tmp.matched, (('I', None, forest2[-1]),) + tmp.editscript)
else:
node1 = forest1[-1]
node2 = forest2[-1]
try:
tmp = geteditstats.mem[flatforest1, forest2]
except KeyError:
pass
flatforest1 = flatforest1 if flatforest1 == () else flatforest1[:-1] + tuple(flatforest1[-1][:])
flatforest2 = forest2 if forest2 == () else forest2[:-1] + tuple(forest2[-1][:])
if forest2 == ():
if flatforest1 == ():
result = EditStats(0, 0, ())
else:
tmp = geteditstats(flatforest1, ())
result = EditStats(tmp.distance + 1, tmp.matched, (('D', flatforest1[-1], None),) + tmp.editscript)
elif flatforest1 == ():
tmp = geteditstats((), flatforest2)
result = EditStats(tmp.distance + 1, tmp.matched, (('I', None, forest2[-1]),) + tmp.editscript)
else:
node1 = flatforest1[-1]
node2 = forest2[-1]
tmp = geteditstats(flatforest1, forest2)
deletestats = EditStats(tmp.distance + 1, tmp.matched, (('D', node1, None),) + tmp.editscript)
tmp = geteditstats(flatforest1, flatforest2)
insertstats = EditStats(tmp.distance + 1, tmp.matched, (('I', None, node2),) + tmp.editscript)
matchorswapstats = geteditstats(tuple(node1[:]), tuple(node2[:])) + geteditstats(flatforest1[:-1], forest2[:-1])
if node1.label == node2.label:
matchorswapstats = EditStats(matchorswapstats.distance, matchorswapstats.matched + 1, matchorswapstats.editscript)
else:
matchorswapstats = EditStats(matchorswapstats.distance + 1, matchorswapstats.matched, (('S', node1, node2),) + matchorswapstats.editscript)
result = min(deletestats, insertstats, matchorswapstats)
geteditstats.mem[flatforest1, forest2] = result
tmp = result
deletestats = EditStats(tmp.distance + 1, tmp.matched, (('D', node1, None),) + tmp.editscript)
try:
tmp = geteditstats.mem[forest1, flatforest2]
except KeyError:
pass
flatforest1 = forest1 if forest1 == () else forest1[:-1] + tuple(forest1[-1][:])
flatforest2 = flatforest2 if flatforest2 == () else flatforest2[:-1] + tuple(flatforest2[-1][:])
if flatforest2 == ():
if forest1 == ():
result = EditStats(0, 0, ())
else:
tmp = geteditstats(flatforest1, ())
result = EditStats(tmp.distance + 1, tmp.matched, (('D', forest1[-1], None),) + tmp.editscript)
elif forest1 == ():
tmp = geteditstats((), flatforest2)
result = EditStats(tmp.distance + 1, tmp.matched, (('I', None, flatforest2[-1]),) + tmp.editscript)
else:
node1 = forest1[-1]
node2 = flatforest2[-1]
tmp = geteditstats(flatforest1, flatforest2)
deletestats = EditStats(tmp.distance + 1, tmp.matched, (('D', node1, None),) + tmp.editscript)
tmp = geteditstats(forest1, flatforest2)
insertstats = EditStats(tmp.distance + 1, tmp.matched, (('I', None, node2),) + tmp.editscript)
matchorswapstats = geteditstats(tuple(node1[:]), tuple(node2[:])) + geteditstats(forest1[:-1], flatforest2[:-1])
if node1.label == node2.label:
matchorswapstats = EditStats(matchorswapstats.distance, matchorswapstats.matched + 1, matchorswapstats.editscript)
else:
matchorswapstats = EditStats(matchorswapstats.distance + 1, matchorswapstats.matched, (('S', node1, node2),) + matchorswapstats.editscript)
result = min(deletestats, insertstats, matchorswapstats)
geteditstats.mem[forest1, flatforest2] = result
tmp = result
insertstats = EditStats(tmp.distance + 1, tmp.matched, (('I', None, node2),) + tmp.editscript)
matchorswapstats = geteditstats(tuple(node1[:]), tuple(node2[:])) + geteditstats(forest1[:-1], forest2[:-1])
if node1.label == node2.label:
matchorswapstats = EditStats(matchorswapstats.distance, matchorswapstats.matched + 1, matchorswapstats.editscript)
else:
matchorswapstats = EditStats(matchorswapstats.distance + 1, matchorswapstats.matched, (('S', node1, node2),) + matchorswapstats.editscript)
result = min(deletestats, insertstats, matchorswapstats)
geteditstats.mem[forest1, forest2] = result
return result
|
disco-dop
|
positive
|
def inspect_gnomad_low_ac(args):
stats = Counter()
<DeepExtract>
gnomads = {}
contig_prefix = 'chr' if '38' in args.reference_fasta else ''
prefix = defines.gnomad_prefix_hg38 if '38' in args.reference_fasta else defines.gnomad_prefix
postfix = '.liftover.b38.vcf.gz' if '38' in args.reference_fasta else '.vcf.gz'
for i in range(1, 23):
gnomads[contig_prefix + str(i)] = vcf.Reader(open(prefix + str(i) + postfix, 'r'))
gnomads[contig_prefix + 'X'] = vcf.Reader(open(prefix + 'X' + postfix, 'r'))
gnomads = gnomads
</DeepExtract>
for variant in gnomads['1']:
for (i, a) in enumerate(variant.ALT):
if int(variant.INFO['AC'][i]) < 2:
stats['low_ac'] += 1
stats['total'] += 1
if stats['total'] > 300000:
break
for (k, v) in sorted(stats.items()):
print(k, 'has:', stats[k])
print('ratio: %0.2f' % (stats['low_ac'] / stats['total']))
|
def inspect_gnomad_low_ac(args):
stats = Counter()
gnomads = {}
contig_prefix = 'chr' if '38' in args.reference_fasta else ''
prefix = defines.gnomad_prefix_hg38 if '38' in args.reference_fasta else defines.gnomad_prefix
postfix = '.liftover.b38.vcf.gz' if '38' in args.reference_fasta else '.vcf.gz'
for i in range(1, 23):
gnomads[contig_prefix + str(i)] = vcf.Reader(open(prefix + str(i) + postfix, 'r'))
gnomads[contig_prefix + 'X'] = vcf.Reader(open(prefix + 'X' + postfix, 'r'))
gnomads = gnomads
for variant in gnomads['1']:
for (i, a) in enumerate(variant.ALT):
if int(variant.INFO['AC'][i]) < 2:
stats['low_ac'] += 1
stats['total'] += 1
if stats['total'] > 300000:
break
for (k, v) in sorted(stats.items()):
print(k, 'has:', stats[k])
print('ratio: %0.2f' % (stats['low_ac'] / stats['total']))
|
dsde-deep-learning
|
positive
|
def _parse_help(node):
if node.help is not None:
<DeepExtract>
if not self.warn:
return
node.item.name_and_loc + ' defined with more than one help text -- only the last one will be used' = 'warning: ' + node.item.name_and_loc + ' defined with more than one help text -- only the last one will be used'
if filename is not None:
node.item.name_and_loc + ' defined with more than one help text -- only the last one will be used' = '{}:{}: {}'.format(filename, linenr, node.item.name_and_loc + ' defined with more than one help text -- only the last one will be used')
self.warnings.append(node.item.name_and_loc + ' defined with more than one help text -- only the last one will be used')
if self.warn_to_stderr:
sys.stderr.write(node.item.name_and_loc + ' defined with more than one help text -- only the last one will be used' + '\n')
</DeepExtract>
readline = self._readline
while 1:
line = readline()
self.linenr += 1
if not line:
<DeepExtract>
self._warn(node.item.name_and_loc + " has 'help' but empty help text")
node.help = ''
if line:
self._line_after_help(line)
</DeepExtract>
return
if not line.isspace():
break
len_ = len
expline = line.expandtabs()
indent = len_(expline) - len_(expline.lstrip())
if not indent:
<DeepExtract>
self._warn(node.item.name_and_loc + " has 'help' but empty help text")
node.help = ''
if line:
self._line_after_help(line)
</DeepExtract>
return
lines = [expline[indent:]]
add_line = lines.append
while 1:
line = readline()
if line.isspace():
add_line('\n')
elif not line:
break
else:
expline = line.expandtabs()
if len_(expline) - len_(expline.lstrip()) < indent:
break
add_line(expline[indent:])
self.linenr += len_(lines)
node.help = ''.join(lines).rstrip()
if line:
<DeepExtract>
while line.endswith('\\\n'):
line = line[:-2] + self._readline()
self.linenr += 1
self._tokens = self._tokenize(line)
self._reuse_tokens = True
</DeepExtract>
|
def _parse_help(node):
if node.help is not None:
if not self.warn:
return
node.item.name_and_loc + ' defined with more than one help text -- only the last one will be used' = 'warning: ' + node.item.name_and_loc + ' defined with more than one help text -- only the last one will be used'
if filename is not None:
node.item.name_and_loc + ' defined with more than one help text -- only the last one will be used' = '{}:{}: {}'.format(filename, linenr, node.item.name_and_loc + ' defined with more than one help text -- only the last one will be used')
self.warnings.append(node.item.name_and_loc + ' defined with more than one help text -- only the last one will be used')
if self.warn_to_stderr:
sys.stderr.write(node.item.name_and_loc + ' defined with more than one help text -- only the last one will be used' + '\n')
readline = self._readline
while 1:
line = readline()
self.linenr += 1
if not line:
self._warn(node.item.name_and_loc + " has 'help' but empty help text")
node.help = ''
if line:
self._line_after_help(line)
return
if not line.isspace():
break
len_ = len
expline = line.expandtabs()
indent = len_(expline) - len_(expline.lstrip())
if not indent:
self._warn(node.item.name_and_loc + " has 'help' but empty help text")
node.help = ''
if line:
self._line_after_help(line)
return
lines = [expline[indent:]]
add_line = lines.append
while 1:
line = readline()
if line.isspace():
add_line('\n')
elif not line:
break
else:
expline = line.expandtabs()
if len_(expline) - len_(expline.lstrip()) < indent:
break
add_line(expline[indent:])
self.linenr += len_(lines)
node.help = ''.join(lines).rstrip()
if line:
while line.endswith('\\\n'):
line = line[:-2] + self._readline()
self.linenr += 1
self._tokens = self._tokenize(line)
self._reuse_tokens = True
</DeepExtract>
|
cello
|
positive
|
def main(args):
<DeepExtract>
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
cfg = cfg
</DeepExtract>
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(cfg.MODEL.WEIGHTS, resume=args.resume)
res = Trainer.test(cfg, model)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
|
def main(args):
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
cfg = cfg
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(cfg.MODEL.WEIGHTS, resume=args.resume)
res = Trainer.test(cfg, model)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
|
DenseCL
|
positive
|
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params):
"""The `model_fn` for TPUEstimator."""
tf.logging.info('*** Features ***')
for name in sorted(features.keys()):
tf.logging.info(' name = %s, shape = %s' % (name, features[name].shape))
input_ids = features['input_ids']
input_mask = features['input_mask']
segment_ids = features['segment_ids']
label_ids = features['label_ids']
is_real_example = None
if 'is_real_example' in features:
is_real_example = tf.cast(features['is_real_example'], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
is_training = mode == tf.estimator.ModeKeys.TRAIN
<DeepExtract>
model = modeling.BertModel(config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings)
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable('output_weights', [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable('output_bias', [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope('loss'):
if is_training:
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(label_ids, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
(total_loss, per_example_loss, logits, probabilities) = (loss, per_example_loss, logits, probabilities)
</DeepExtract>
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info('**** Trainable Variables ****')
for var in tvars:
init_string = ''
if var.name in initialized_variable_names:
init_string = ', *INIT_FROM_CKPT*'
tf.logging.info(' name = %s, shape = %s%s', var.name, var.shape, init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
return {'eval_accuracy': accuracy, 'eval_loss': loss}
eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn)
else:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, predictions={'probabilities': probabilities}, scaffold_fn=scaffold_fn)
return output_spec
return model_fn
|
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params):
"""The `model_fn` for TPUEstimator."""
tf.logging.info('*** Features ***')
for name in sorted(features.keys()):
tf.logging.info(' name = %s, shape = %s' % (name, features[name].shape))
input_ids = features['input_ids']
input_mask = features['input_mask']
segment_ids = features['segment_ids']
label_ids = features['label_ids']
is_real_example = None
if 'is_real_example' in features:
is_real_example = tf.cast(features['is_real_example'], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
is_training = mode == tf.estimator.ModeKeys.TRAIN
model = modeling.BertModel(config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings)
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable('output_weights', [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable('output_bias', [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope('loss'):
if is_training:
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(label_ids, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
(total_loss, per_example_loss, logits, probabilities) = (loss, per_example_loss, logits, probabilities)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info('**** Trainable Variables ****')
for var in tvars:
init_string = ''
if var.name in initialized_variable_names:
init_string = ', *INIT_FROM_CKPT*'
tf.logging.info(' name = %s, shape = %s%s', var.name, var.shape, init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
return {'eval_accuracy': accuracy, 'eval_loss': loss}
eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn)
else:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, predictions={'probabilities': probabilities}, scaffold_fn=scaffold_fn)
return output_spec
return model_fn
|
BERT-for-Sequence-Labeling-and-Text-Classification
|
positive
|
def _apply_metric_at_given_lead(verif, verif_dates, lead, initialized=None, hist=None, inits=None, reference=None, metric=None, comparison=None, dim=None, **metric_kwargs):
"""Apply a metric between two time series at a given lead.
Args:
verif (xr.Dataset): Verification data.
verif_dates (dict): Lead-dependent verification dates for alignment.
lead (int): Given lead to score.
initialized (xr.Dataset): Initialized hindcast. Not required in a persistence
forecast.
hist (xr.Dataset): Uninitialized/historical simulation. Required when
``reference='uninitialized'``.
inits (dict): Lead-dependent initialization dates for alignment.
reference (str): If not ``None``, return score for this reference forecast.
* 'persistence'
* 'uninitialized'
metric (Metric): Metric class for scoring.
comparison (Comparison): Comparison class.
dim (str): Dimension to apply metric over.
Returns:
result (xr.Dataset): Metric results for the given lead for the initialized
forecast or reference forecast.
"""
if reference is None:
lforecast = initialized.sel(lead=lead).where(initialized['time'].isin(inits[lead]), drop=True)
lverif = verif.sel(time=verif_dates[lead])
elif reference == 'persistence':
(lforecast, lverif) = persistence(verif, inits, verif_dates, lead)
elif reference == 'uninitialized':
(lforecast, lverif) = uninitialized(hist, verif, verif_dates, lead)
elif reference == 'climatology':
(lforecast, lverif) = climatology(verif, inits, verif_dates, lead)
if reference is not None:
(lforecast, dim) = _adapt_member_for_reference_forecast(lforecast, lverif, metric, comparison, dim)
assert lforecast.time.size == lverif.time.size, print(lforecast.time.to_index(), lverif.time.to_index(), reference)
lforecast['time'] = lverif['time']
<DeepExtract>
if 'init' in dim and 'time' in initialized.dims and ('time' in verif.dims):
dim = dim.copy()
dim.remove('init')
dim = dim + ['time']
elif 'time' in dim and 'init' in initialized.dims and ('init' in verif.dims):
dim = dim.copy()
dim.remove('time')
dim = dim + ['init']
elif 'init' in dim and 'time' in initialized.dims and ('time' in verif.dims):
dim = dim.copy()
dim.remove('init')
dim = dim + ['time']
dim = dim
</DeepExtract>
if metric.normalize or metric.allows_logical:
metric_kwargs['comparison'] = comparison
result = metric.function(lforecast, lverif, dim=dim, **metric_kwargs)
log_hindcast_verify_inits_and_verifs(dim, lead, inits, verif_dates, reference)
if 'time' in result.dims:
(n, freq) = get_lead_cftime_shift_args(initialized.lead.attrs['units'], lead)
result = result.assign_coords(time=shift_cftime_singular(result.time, -n, freq))
if 'valid_time' in result.coords:
if is_dask_collection(result.coords['valid_time']):
result.coords['valid_time'] = result.coords['valid_time'].compute()
return result
|
def _apply_metric_at_given_lead(verif, verif_dates, lead, initialized=None, hist=None, inits=None, reference=None, metric=None, comparison=None, dim=None, **metric_kwargs):
"""Apply a metric between two time series at a given lead.
Args:
verif (xr.Dataset): Verification data.
verif_dates (dict): Lead-dependent verification dates for alignment.
lead (int): Given lead to score.
initialized (xr.Dataset): Initialized hindcast. Not required in a persistence
forecast.
hist (xr.Dataset): Uninitialized/historical simulation. Required when
``reference='uninitialized'``.
inits (dict): Lead-dependent initialization dates for alignment.
reference (str): If not ``None``, return score for this reference forecast.
* 'persistence'
* 'uninitialized'
metric (Metric): Metric class for scoring.
comparison (Comparison): Comparison class.
dim (str): Dimension to apply metric over.
Returns:
result (xr.Dataset): Metric results for the given lead for the initialized
forecast or reference forecast.
"""
if reference is None:
lforecast = initialized.sel(lead=lead).where(initialized['time'].isin(inits[lead]), drop=True)
lverif = verif.sel(time=verif_dates[lead])
elif reference == 'persistence':
(lforecast, lverif) = persistence(verif, inits, verif_dates, lead)
elif reference == 'uninitialized':
(lforecast, lverif) = uninitialized(hist, verif, verif_dates, lead)
elif reference == 'climatology':
(lforecast, lverif) = climatology(verif, inits, verif_dates, lead)
if reference is not None:
(lforecast, dim) = _adapt_member_for_reference_forecast(lforecast, lverif, metric, comparison, dim)
assert lforecast.time.size == lverif.time.size, print(lforecast.time.to_index(), lverif.time.to_index(), reference)
lforecast['time'] = lverif['time']
if 'init' in dim and 'time' in initialized.dims and ('time' in verif.dims):
dim = dim.copy()
dim.remove('init')
dim = dim + ['time']
elif 'time' in dim and 'init' in initialized.dims and ('init' in verif.dims):
dim = dim.copy()
dim.remove('time')
dim = dim + ['init']
elif 'init' in dim and 'time' in initialized.dims and ('time' in verif.dims):
dim = dim.copy()
dim.remove('init')
dim = dim + ['time']
dim = dim
if metric.normalize or metric.allows_logical:
metric_kwargs['comparison'] = comparison
result = metric.function(lforecast, lverif, dim=dim, **metric_kwargs)
log_hindcast_verify_inits_and_verifs(dim, lead, inits, verif_dates, reference)
if 'time' in result.dims:
(n, freq) = get_lead_cftime_shift_args(initialized.lead.attrs['units'], lead)
result = result.assign_coords(time=shift_cftime_singular(result.time, -n, freq))
if 'valid_time' in result.coords:
if is_dask_collection(result.coords['valid_time']):
result.coords['valid_time'] = result.coords['valid_time'].compute()
return result
|
climpred
|
positive
|
def b2share_pid_minter(rec_pid, data):
"""Mint EPIC PID for published record."""
epic_pids = [p for p in data['_pid'] if p.get('type') == 'ePIC_PID']
assert len(epic_pids) == 0
<DeepExtract>
endpoint = 'b2share_records_rest.b2rec_item'
url = url_for(endpoint, pid_value=rec_pid.pid_value, _external=True)
url = url.replace('/api/records/', '/records/')
url = url
</DeepExtract>
throw_on_failure = current_app.config.get('CFG_FAIL_ON_MISSING_PID', True)
try:
pid = current_handle.create_handle(url)
if pid is None:
raise EpicPIDError('EPIC PID allocation failed')
data['_pid'].append({'value': pid, 'type': 'ePIC_PID'})
except EpicPIDError as e:
if throw_on_failure:
raise e
else:
current_app.logger.warning(e)
|
def b2share_pid_minter(rec_pid, data):
"""Mint EPIC PID for published record."""
epic_pids = [p for p in data['_pid'] if p.get('type') == 'ePIC_PID']
assert len(epic_pids) == 0
endpoint = 'b2share_records_rest.b2rec_item'
url = url_for(endpoint, pid_value=rec_pid.pid_value, _external=True)
url = url.replace('/api/records/', '/records/')
url = url
throw_on_failure = current_app.config.get('CFG_FAIL_ON_MISSING_PID', True)
try:
pid = current_handle.create_handle(url)
if pid is None:
raise EpicPIDError('EPIC PID allocation failed')
data['_pid'].append({'value': pid, 'type': 'ePIC_PID'})
except EpicPIDError as e:
if throw_on_failure:
raise e
else:
current_app.logger.warning(e)
|
b2share
|
positive
|
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. every encodings from ``<meta ... charset=XXX>``
3. fall back and replace all unicode characters
"""
tried_encodings = []
<DeepExtract>
content_type = r.headers.get('content-type')
if not content_type:
encoding = None
(content_type, params) = cgi.parse_header(content_type)
if 'charset' in params:
encoding = params['charset'].strip('\'"')
if 'text' in content_type:
encoding = 'ISO-8859-1'
</DeepExtract>
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
|
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. every encodings from ``<meta ... charset=XXX>``
3. fall back and replace all unicode characters
"""
tried_encodings = []
content_type = r.headers.get('content-type')
if not content_type:
encoding = None
(content_type, params) = cgi.parse_header(content_type)
if 'charset' in params:
encoding = params['charset'].strip('\'"')
if 'text' in content_type:
encoding = 'ISO-8859-1'
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
|
CMS-Exploit-Framework
|
positive
|
def forward(self, input):
<DeepExtract>
code = self.act(self.E_conv5(self.act(self.E_conv4(self.act(self.E_conv3(self.act(self.E_conv2(self.act(self.E_conv1(self.act(self.E_Conv(input))))))))))))
</DeepExtract>
return self._decoder(code)
|
def forward(self, input):
code = self.act(self.E_conv5(self.act(self.E_conv4(self.act(self.E_conv3(self.act(self.E_conv2(self.act(self.E_conv1(self.act(self.E_Conv(input))))))))))))
return self._decoder(code)
|
Component-Divide-and-Conquer-for-Real-World-Image-Super-Resolution
|
positive
|
def testFunctionLengthCheckDefinitionAboveSeverity0(self):
old_verbosity = cpplint._SetVerboseLevel(0)
<DeepExtract>
self.TestFunctionLengthCheckDefinition(self.TriggerLines(0) + 1, 0)
</DeepExtract>
cpplint._SetVerboseLevel(old_verbosity)
|
def testFunctionLengthCheckDefinitionAboveSeverity0(self):
old_verbosity = cpplint._SetVerboseLevel(0)
self.TestFunctionLengthCheckDefinition(self.TriggerLines(0) + 1, 0)
cpplint._SetVerboseLevel(old_verbosity)
|
cpplint
|
positive
|
def _validate_mapping(value, key_validator=None, value_validator=None, required=True):
if value is None:
if not required:
return
raise TypeError('required value is None')
if not isinstance(value, dict):
raise TypeError("expected 'dict', but value is of type {cls!r}".format(cls=value.__class__.__name__))
for (item_key, item_value) in value.items():
if key_validator is not None:
try:
key_validator(item_key)
except (TypeError, ValueError, KeyError):
<DeepExtract>
(exc_type, exc_value, exc_traceback) = sys.exc_info()
supported_exceptions = (TypeError, ValueError, KeyError, IndexError, AssertionError)
if exc_type not in supported_exceptions:
return
if len(exc_value.args) != 1:
return
if not isinstance(exc_value.args[0], str):
return
message = '{context}: {message}'.format(context='invalid key {key!r}'.format(key=item_key), message=exc_value.args[0])
six.raise_from(exc_type(message), exc_value)
</DeepExtract>
raise
if value_validator is not None:
try:
value_validator(item_value)
except (TypeError, ValueError, KeyError):
<DeepExtract>
(exc_type, exc_value, exc_traceback) = sys.exc_info()
supported_exceptions = (TypeError, ValueError, KeyError, IndexError, AssertionError)
if exc_type not in supported_exceptions:
return
if len(exc_value.args) != 1:
return
if not isinstance(exc_value.args[0], str):
return
message = '{context}: {message}'.format(context='invalid value for key {key!r}'.format(key=item_key), message=exc_value.args[0])
six.raise_from(exc_type(message), exc_value)
</DeepExtract>
raise
|
def _validate_mapping(value, key_validator=None, value_validator=None, required=True):
if value is None:
if not required:
return
raise TypeError('required value is None')
if not isinstance(value, dict):
raise TypeError("expected 'dict', but value is of type {cls!r}".format(cls=value.__class__.__name__))
for (item_key, item_value) in value.items():
if key_validator is not None:
try:
key_validator(item_key)
except (TypeError, ValueError, KeyError):
(exc_type, exc_value, exc_traceback) = sys.exc_info()
supported_exceptions = (TypeError, ValueError, KeyError, IndexError, AssertionError)
if exc_type not in supported_exceptions:
return
if len(exc_value.args) != 1:
return
if not isinstance(exc_value.args[0], str):
return
message = '{context}: {message}'.format(context='invalid key {key!r}'.format(key=item_key), message=exc_value.args[0])
six.raise_from(exc_type(message), exc_value)
raise
if value_validator is not None:
try:
value_validator(item_value)
except (TypeError, ValueError, KeyError):
(exc_type, exc_value, exc_traceback) = sys.exc_info()
supported_exceptions = (TypeError, ValueError, KeyError, IndexError, AssertionError)
if exc_type not in supported_exceptions:
return
if len(exc_value.args) != 1:
return
if not isinstance(exc_value.args[0], str):
return
message = '{context}: {message}'.format(context='invalid value for key {key!r}'.format(key=item_key), message=exc_value.args[0])
six.raise_from(exc_type(message), exc_value)
raise
|
dirty-leds
|
positive
|
def convert_tptp_exists(expression):
variable = convert_tptp(expression.variable).upper()
<DeepExtract>
if isinstance(expression.term, ApplicationExpression):
tptp_str = convert_tptp_application(expression.term)
elif isinstance(expression.term, EqualityExpression):
tptp_str = convert_tptp_equality(expression.term)
elif isinstance(expression.term, AndExpression):
tptp_str = convert_tptp_and(expression.term)
elif isinstance(expression.term, OrExpression):
tptp_str = convert_tptp_or(expression.term)
elif isinstance(expression.term, ImpExpression):
tptp_str = convert_tptp_imp(expression.term)
elif isinstance(expression.term, IffExpression):
tptp_str = convert_tptp_iff(expression.term)
elif isinstance(expression.term, NegatedExpression):
tptp_str = convert_tptp_not(expression.term)
elif isinstance(expression.term, ExistsExpression):
tptp_str = convert_tptp_exists(expression.term)
elif isinstance(expression.term, AllExpression):
tptp_str = convert_tptp_all(expression.term)
elif isinstance(expression.term, LambdaExpression):
tptp_str = convert_tptp_lambda(expression.term)
elif isinstance(expression.term, IndividualVariableExpression):
tptp_str = str(expression.term.variable).upper()
elif isinstance(expression.term, EventVariableExpression):
tptp_str = str(expression.term.variable).upper()
elif isinstance(expression.term, FunctionVariableExpression):
tptp_str = str(expression.term.variable).upper()
elif isinstance(expression.term, ConstantExpression):
tptp_str = str(expression.term.variable).lower()
if tptp_str[0] == '_':
tptp_str = tptp_str[1:]
else:
tptp_str = str(expression.term)
term = tptp_str
</DeepExtract>
if variable[0] == 'D':
tptp_str = '?[' + variable + ':$int]: ' + Tokens.OPEN + term + Tokens.CLOSE
else:
tptp_str = '?[' + variable + ']: ' + Tokens.OPEN + term + Tokens.CLOSE
return tptp_str
|
def convert_tptp_exists(expression):
variable = convert_tptp(expression.variable).upper()
if isinstance(expression.term, ApplicationExpression):
tptp_str = convert_tptp_application(expression.term)
elif isinstance(expression.term, EqualityExpression):
tptp_str = convert_tptp_equality(expression.term)
elif isinstance(expression.term, AndExpression):
tptp_str = convert_tptp_and(expression.term)
elif isinstance(expression.term, OrExpression):
tptp_str = convert_tptp_or(expression.term)
elif isinstance(expression.term, ImpExpression):
tptp_str = convert_tptp_imp(expression.term)
elif isinstance(expression.term, IffExpression):
tptp_str = convert_tptp_iff(expression.term)
elif isinstance(expression.term, NegatedExpression):
tptp_str = convert_tptp_not(expression.term)
elif isinstance(expression.term, ExistsExpression):
tptp_str = convert_tptp_exists(expression.term)
elif isinstance(expression.term, AllExpression):
tptp_str = convert_tptp_all(expression.term)
elif isinstance(expression.term, LambdaExpression):
tptp_str = convert_tptp_lambda(expression.term)
elif isinstance(expression.term, IndividualVariableExpression):
tptp_str = str(expression.term.variable).upper()
elif isinstance(expression.term, EventVariableExpression):
tptp_str = str(expression.term.variable).upper()
elif isinstance(expression.term, FunctionVariableExpression):
tptp_str = str(expression.term.variable).upper()
elif isinstance(expression.term, ConstantExpression):
tptp_str = str(expression.term.variable).lower()
if tptp_str[0] == '_':
tptp_str = tptp_str[1:]
else:
tptp_str = str(expression.term)
term = tptp_str
if variable[0] == 'D':
tptp_str = '?[' + variable + ':$int]: ' + Tokens.OPEN + term + Tokens.CLOSE
else:
tptp_str = '?[' + variable + ']: ' + Tokens.OPEN + term + Tokens.CLOSE
return tptp_str
|
ccg2lambda
|
positive
|
def update_edge(self, _id, data, keys=None):
"""
Updates the edge with the _id and returns the Response.
:param _id: Edge ID.
:type _id: dict
:param data: Property data.
:type data: dict
:rtype: Neo4jResponse
"""
if keys or self.config.autoindex is True:
index_name = self.config.edge_index
return self.update_indexed_edge(_id, data, index_name, keys=keys)
path = build_path(edge_path, _id, 'properties')
<DeepExtract>
data = data or {}
clean_data = [(k, data[k]) for k in data if data[k] is not None]
params = dict(clean_data)
</DeepExtract>
return self.request.put(path, params)
|
def update_edge(self, _id, data, keys=None):
"""
Updates the edge with the _id and returns the Response.
:param _id: Edge ID.
:type _id: dict
:param data: Property data.
:type data: dict
:rtype: Neo4jResponse
"""
if keys or self.config.autoindex is True:
index_name = self.config.edge_index
return self.update_indexed_edge(_id, data, index_name, keys=keys)
path = build_path(edge_path, _id, 'properties')
data = data or {}
clean_data = [(k, data[k]) for k in data if data[k] is not None]
params = dict(clean_data)
return self.request.put(path, params)
|
bulbs
|
positive
|
def test_peer_reviews_page(self):
<DeepExtract>
client = Client()
client.login(username=TEST_USER_USERNAME, password=TEST_USER_PASSWORD)
client = client
</DeepExtract>
response = client.post('/course/1/peer_reviews')
self.assertEqual(response.status_code, 200)
self.assertIn(b'Comics Book Course', response.content)
self.assertIn(b'Peer Review', response.content)
self.assertIn(b'view_submission(1);', response.content)
|
def test_peer_reviews_page(self):
client = Client()
client.login(username=TEST_USER_USERNAME, password=TEST_USER_PASSWORD)
client = client
response = client.post('/course/1/peer_reviews')
self.assertEqual(response.status_code, 200)
self.assertIn(b'Comics Book Course', response.content)
self.assertIn(b'Peer Review', response.content)
self.assertIn(b'view_submission(1);', response.content)
|
academicstoday-django
|
positive
|
def _combine_unused_states(net):
"""Concatenate the unused hidden states of the cell."""
used_hiddenstates = self._used_hiddenstates
final_height = int(net[-1].shape[2])
<DeepExtract>
assert data_format != INVALID
assert len(net[-1].shape) == 4
if data_format == 'NHWC':
final_num_filters = int(net[-1].shape[3])
elif data_format == 'NCHW':
final_num_filters = int(net[-1].shape[1])
else:
raise ValueError('Not a valid data_format', data_format)
</DeepExtract>
assert len(used_hiddenstates) == len(net)
for (idx, used_h) in enumerate(used_hiddenstates):
curr_height = int(net[idx].shape[2])
<DeepExtract>
assert data_format != INVALID
assert len(net[idx].shape) == 4
if data_format == 'NHWC':
curr_num_filters = int(net[idx].shape[3])
elif data_format == 'NCHW':
curr_num_filters = int(net[idx].shape[1])
else:
raise ValueError('Not a valid data_format', data_format)
</DeepExtract>
should_reduce = final_num_filters != curr_num_filters
should_reduce = final_height != curr_height or should_reduce
should_reduce = should_reduce and (not used_h)
if should_reduce:
stride = 2 if final_height != curr_height else 1
with tf.variable_scope('reduction_{}'.format(idx)):
<DeepExtract>
assert final_num_filters % 2 == 0, 'Need even number of filters when using this factorized reduction.'
assert data_format != INVALID
if stride == 1:
net[idx] = slim.conv2d(net[idx], final_num_filters, 1, scope='path_conv')
net[idx] = slim.batch_norm(net[idx], scope='path_bn')
net[idx][idx] = net[idx]
if data_format == 'NHWC':
stride_spec = [1, stride, stride, 1]
else:
stride_spec = [1, 1, stride, stride]
path1 = tf.nn.avg_pool(net[idx], [1, 1, 1, 1], stride_spec, 'VALID', data_format=data_format)
path1 = slim.conv2d(path1, int(final_num_filters / 2), 1, scope='path1_conv')
if data_format == 'NHWC':
pad_arr = [[0, 0], [0, 1], [0, 1], [0, 0]]
path2 = tf.pad(net[idx], pad_arr)[:, 1:, 1:, :]
concat_axis = 3
else:
pad_arr = [[0, 0], [0, 0], [0, 1], [0, 1]]
path2 = tf.pad(net[idx], pad_arr)[:, :, 1:, 1:]
concat_axis = 1
path2 = tf.nn.avg_pool(path2, [1, 1, 1, 1], stride_spec, 'VALID', data_format=data_format)
path2 = slim.conv2d(path2, int(final_num_filters / 2), 1, scope='path2_conv')
final_path = tf.concat(values=[path1, path2], axis=concat_axis)
final_path = slim.batch_norm(final_path, scope='final_path_bn')
net[idx][idx] = final_path
</DeepExtract>
states_to_combine = [h for (h, is_used) in zip(net, used_hiddenstates) if not is_used]
<DeepExtract>
assert data_format != INVALID
axis = 3 if data_format == 'NHWC' else 1
concat_axis = axis
</DeepExtract>
net = tf.concat(values=states_to_combine, axis=concat_axis)
return net
|
def _combine_unused_states(net):
"""Concatenate the unused hidden states of the cell."""
used_hiddenstates = self._used_hiddenstates
final_height = int(net[-1].shape[2])
assert data_format != INVALID
assert len(net[-1].shape) == 4
if data_format == 'NHWC':
final_num_filters = int(net[-1].shape[3])
elif data_format == 'NCHW':
final_num_filters = int(net[-1].shape[1])
else:
raise ValueError('Not a valid data_format', data_format)
assert len(used_hiddenstates) == len(net)
for (idx, used_h) in enumerate(used_hiddenstates):
curr_height = int(net[idx].shape[2])
assert data_format != INVALID
assert len(net[idx].shape) == 4
if data_format == 'NHWC':
curr_num_filters = int(net[idx].shape[3])
elif data_format == 'NCHW':
curr_num_filters = int(net[idx].shape[1])
else:
raise ValueError('Not a valid data_format', data_format)
should_reduce = final_num_filters != curr_num_filters
should_reduce = final_height != curr_height or should_reduce
should_reduce = should_reduce and (not used_h)
if should_reduce:
stride = 2 if final_height != curr_height else 1
with tf.variable_scope('reduction_{}'.format(idx)):
assert final_num_filters % 2 == 0, 'Need even number of filters when using this factorized reduction.'
assert data_format != INVALID
if stride == 1:
net[idx] = slim.conv2d(net[idx], final_num_filters, 1, scope='path_conv')
net[idx] = slim.batch_norm(net[idx], scope='path_bn')
net[idx][idx] = net[idx]
if data_format == 'NHWC':
stride_spec = [1, stride, stride, 1]
else:
stride_spec = [1, 1, stride, stride]
path1 = tf.nn.avg_pool(net[idx], [1, 1, 1, 1], stride_spec, 'VALID', data_format=data_format)
path1 = slim.conv2d(path1, int(final_num_filters / 2), 1, scope='path1_conv')
if data_format == 'NHWC':
pad_arr = [[0, 0], [0, 1], [0, 1], [0, 0]]
path2 = tf.pad(net[idx], pad_arr)[:, 1:, 1:, :]
concat_axis = 3
else:
pad_arr = [[0, 0], [0, 0], [0, 1], [0, 1]]
path2 = tf.pad(net[idx], pad_arr)[:, :, 1:, 1:]
concat_axis = 1
path2 = tf.nn.avg_pool(path2, [1, 1, 1, 1], stride_spec, 'VALID', data_format=data_format)
path2 = slim.conv2d(path2, int(final_num_filters / 2), 1, scope='path2_conv')
final_path = tf.concat(values=[path1, path2], axis=concat_axis)
final_path = slim.batch_norm(final_path, scope='final_path_bn')
net[idx][idx] = final_path
states_to_combine = [h for (h, is_used) in zip(net, used_hiddenstates) if not is_used]
assert data_format != INVALID
axis = 3 if data_format == 'NHWC' else 1
concat_axis = axis
net = tf.concat(values=states_to_combine, axis=concat_axis)
return net
|
CBAM-tensorflow-slim
|
positive
|
def __init__(self, x, rootDir=None, data=None):
"""Create a Song oject."""
self.tags = {}
self.fingerprint = None
self._decode_properties = None
self.cuesheet = []
if type(x) == sqlite3.Row or hasattr(x, 'keys'):
self.id = x['id']
self._root = x['root']
self._path = x['path']
self._mtime = float(x['mtime'])
self._coverWidth = x['coverwidth']
self._coverHeight = x['coverheight']
self._coverMD5 = x['covermd5']
self.isValid = True
return
self.isValid = False
self._root = rootDir or ''
if data:
self._path = None
self._description = '<song in memory>'
<DeepExtract>
fileinfo = data if isinstance(data, str) else 'file-like object'
if hasattr(data, 'seek'):
data.seek(0)
try:
self.metadata = mutagen.File(data)
except mutagen.mp3.HeaderNotFoundError as e:
print(TerminalColors.Error + 'Error' + TerminalColors.ENDC + ' reading %s:' % data, e)
raise
if not self.metadata:
print('No metadata found for %s : This will probably cause problems' % data)
formattext = {mutagen.mp3.EasyMP3: 'mp3', mutagen.mp3.MP3: 'mp3', mutagen.easymp4.EasyMP4: 'mp4', mutagen.mp4.MP4: 'mp4', mutagen.asf.ASF: 'asf', mutagen.flac.FLAC: 'flac', mutagen.oggvorbis.OggVorbis: 'ogg', mutagen.oggopus.OggOpus: 'opus', mutagen.wavpack.WavPack: 'wv', mutagen.monkeysaudio.MonkeysAudio: 'ape', mutagen.musepack.Musepack: 'mpc', mutagen.wave.WAVE: 'wav', mutagen.dsf.DSF: 'dsf'}
self._format = formattext[type(self.metadata)]
(audiodata, properties) = decodeAudio(data)
self._audioSha256sum = calculateSHA256_data(audiodata)
self._decode_properties = properties
if self.metadata:
if not getattr(self.metadata.info, 'bits_per_sample', None):
self.metadata.info.bits_per_sample = properties.decoded_bytes_per_sample * 8
if not getattr(self.metadata.info, 'bitrate', None):
self.metadata.info.bitrate = properties.stream_bitrate or properties.container_bitrate
if getattr(self.metadata.info, 'length', 0) == 0:
warning_level = DecodeMessageRecord.level_value('Warning')
record = DecodeMessageRecord(0, warning_level, 'Length cannot be read with mutagen. Using decoded data to obtain length')
self._decode_properties.messages.append(record)
self.metadata.info.length = properties.decoded_duration
if properties.messages:
print('\n'.join([str(x) for x in properties.messages]))
if config.config['enable_internal_checks']:
ffprobe_metadata = FFProbeMetadata(self.path())
try:
tmp_bits = int(ffprobe_metadata['streams.stream.0.bits_per_raw_sample'])
except ValueError:
print("ffprobe doesn't provide bits per sample")
else:
if self.metadata.info.bits_per_sample != tmp_bits:
msg = 'bits_per_sample different! ffprobe: %d != bard_audiofile: %d' % (tmp_bits, self.metadata.info.bits_per_sample)
raise Exception(msg)
tmp_bitrate = int(ffprobe_metadata['format.bit_rate'])
if properties.stream_bitrate != tmp_bitrate and properties.container_bitrate != tmp_bitrate:
msg = 'bit_rate different! ffprobe: %d != bard_audiofile: %d' % (tmp_bitrate or 0, properties.bitrate or 0)
raise Exception(msg)
print('ffprobe check ' + TerminalColors.Ok + 'OK' + TerminalColors.ENDC)
try:
audio_segment = audioSegmentFromDataProperties(audiodata, properties)
except ValueError as exc:
print(f'Error processing {fileinfo}: {exc}')
raise
thr = Song.silence_threshold
minlen = Song.min_silence_length
silences = detect_silence_at_beginning_and_end(audio_segment, min_silence_len=minlen, silence_thresh=thr)
if silences:
(silence1, silence2) = silences
self._silenceAtStart = (silence1[1] - silence1[0]) / 1000
self._silenceAtEnd = (silence2[1] - silence2[0]) / 1000
if self.metadata:
try:
image = extractFrontCover(self.metadata)
except OSError:
print('Error extracting image from %s' % fileinfo)
raise
if image:
(image, imagedata) = image
self._coverWidth = image.width
self._coverHeight = image.height
self._coverMD5 = md5FromData(imagedata)
try:
self._mtime = os.path.getmtime(data)
except TypeError:
self._mtime = None
self._fileSha256sum = calculateFileSHA256(data)
self.fingerprint = self.getAcoustidFingerprint_data(audiodata, properties)
if self.metadata and getattr(self.metadata, 'cuesheet', None):
for track in self.metadata.cuesheet.tracks:
if track.track_number == 255 or track.start_offset == self.metadata.info.total_samples:
continue
timepos = self.metadata.info.length * track.start_offset / self.metadata.info.total_samples
try:
title = self.metadata['SUBTRACKTITLES'][track.track_number - 1]
except (KeyError, IndexError):
title = None
ct = CueTrack(track.track_number, track.start_offset, timepos, title)
self.cuesheet.append(ct)
self.isValid = True
</DeepExtract>
else:
self._path = os.path.normpath(x)
self._description = self._path
<DeepExtract>
fileinfo = x if isinstance(x, str) else 'file-like object'
if hasattr(x, 'seek'):
x.seek(0)
try:
self.metadata = mutagen.File(x)
except mutagen.mp3.HeaderNotFoundError as e:
print(TerminalColors.Error + 'Error' + TerminalColors.ENDC + ' reading %s:' % x, e)
raise
if not self.metadata:
print('No metadata found for %s : This will probably cause problems' % x)
formattext = {mutagen.mp3.EasyMP3: 'mp3', mutagen.mp3.MP3: 'mp3', mutagen.easymp4.EasyMP4: 'mp4', mutagen.mp4.MP4: 'mp4', mutagen.asf.ASF: 'asf', mutagen.flac.FLAC: 'flac', mutagen.oggvorbis.OggVorbis: 'ogg', mutagen.oggopus.OggOpus: 'opus', mutagen.wavpack.WavPack: 'wv', mutagen.monkeysaudio.MonkeysAudio: 'ape', mutagen.musepack.Musepack: 'mpc', mutagen.wave.WAVE: 'wav', mutagen.dsf.DSF: 'dsf'}
self._format = formattext[type(self.metadata)]
(audiodata, properties) = decodeAudio(x)
self._audioSha256sum = calculateSHA256_data(audiodata)
self._decode_properties = properties
if self.metadata:
if not getattr(self.metadata.info, 'bits_per_sample', None):
self.metadata.info.bits_per_sample = properties.decoded_bytes_per_sample * 8
if not getattr(self.metadata.info, 'bitrate', None):
self.metadata.info.bitrate = properties.stream_bitrate or properties.container_bitrate
if getattr(self.metadata.info, 'length', 0) == 0:
warning_level = DecodeMessageRecord.level_value('Warning')
record = DecodeMessageRecord(0, warning_level, 'Length cannot be read with mutagen. Using decoded data to obtain length')
self._decode_properties.messages.append(record)
self.metadata.info.length = properties.decoded_duration
if properties.messages:
print('\n'.join([str(x) for x in properties.messages]))
if config.config['enable_internal_checks']:
ffprobe_metadata = FFProbeMetadata(self.path())
try:
tmp_bits = int(ffprobe_metadata['streams.stream.0.bits_per_raw_sample'])
except ValueError:
print("ffprobe doesn't provide bits per sample")
else:
if self.metadata.info.bits_per_sample != tmp_bits:
msg = 'bits_per_sample different! ffprobe: %d != bard_audiofile: %d' % (tmp_bits, self.metadata.info.bits_per_sample)
raise Exception(msg)
tmp_bitrate = int(ffprobe_metadata['format.bit_rate'])
if properties.stream_bitrate != tmp_bitrate and properties.container_bitrate != tmp_bitrate:
msg = 'bit_rate different! ffprobe: %d != bard_audiofile: %d' % (tmp_bitrate or 0, properties.bitrate or 0)
raise Exception(msg)
print('ffprobe check ' + TerminalColors.Ok + 'OK' + TerminalColors.ENDC)
try:
audio_segment = audioSegmentFromDataProperties(audiodata, properties)
except ValueError as exc:
print(f'Error processing {fileinfo}: {exc}')
raise
thr = Song.silence_threshold
minlen = Song.min_silence_length
silences = detect_silence_at_beginning_and_end(audio_segment, min_silence_len=minlen, silence_thresh=thr)
if silences:
(silence1, silence2) = silences
self._silenceAtStart = (silence1[1] - silence1[0]) / 1000
self._silenceAtEnd = (silence2[1] - silence2[0]) / 1000
if self.metadata:
try:
image = extractFrontCover(self.metadata)
except OSError:
print('Error extracting image from %s' % fileinfo)
raise
if image:
(image, imagedata) = image
self._coverWidth = image.width
self._coverHeight = image.height
self._coverMD5 = md5FromData(imagedata)
try:
self._mtime = os.path.getmtime(x)
except TypeError:
self._mtime = None
self._fileSha256sum = calculateFileSHA256(x)
self.fingerprint = self.getAcoustidFingerprint_data(audiodata, properties)
if self.metadata and getattr(self.metadata, 'cuesheet', None):
for track in self.metadata.cuesheet.tracks:
if track.track_number == 255 or track.start_offset == self.metadata.info.total_samples:
continue
timepos = self.metadata.info.length * track.start_offset / self.metadata.info.total_samples
try:
title = self.metadata['SUBTRACKTITLES'][track.track_number - 1]
except (KeyError, IndexError):
title = None
ct = CueTrack(track.track_number, track.start_offset, timepos, title)
self.cuesheet.append(ct)
self.isValid = True
</DeepExtract>
|
def __init__(self, x, rootDir=None, data=None):
"""Create a Song oject."""
self.tags = {}
self.fingerprint = None
self._decode_properties = None
self.cuesheet = []
if type(x) == sqlite3.Row or hasattr(x, 'keys'):
self.id = x['id']
self._root = x['root']
self._path = x['path']
self._mtime = float(x['mtime'])
self._coverWidth = x['coverwidth']
self._coverHeight = x['coverheight']
self._coverMD5 = x['covermd5']
self.isValid = True
return
self.isValid = False
self._root = rootDir or ''
if data:
self._path = None
self._description = '<song in memory>'
fileinfo = data if isinstance(data, str) else 'file-like object'
if hasattr(data, 'seek'):
data.seek(0)
try:
self.metadata = mutagen.File(data)
except mutagen.mp3.HeaderNotFoundError as e:
print(TerminalColors.Error + 'Error' + TerminalColors.ENDC + ' reading %s:' % data, e)
raise
if not self.metadata:
print('No metadata found for %s : This will probably cause problems' % data)
formattext = {mutagen.mp3.EasyMP3: 'mp3', mutagen.mp3.MP3: 'mp3', mutagen.easymp4.EasyMP4: 'mp4', mutagen.mp4.MP4: 'mp4', mutagen.asf.ASF: 'asf', mutagen.flac.FLAC: 'flac', mutagen.oggvorbis.OggVorbis: 'ogg', mutagen.oggopus.OggOpus: 'opus', mutagen.wavpack.WavPack: 'wv', mutagen.monkeysaudio.MonkeysAudio: 'ape', mutagen.musepack.Musepack: 'mpc', mutagen.wave.WAVE: 'wav', mutagen.dsf.DSF: 'dsf'}
self._format = formattext[type(self.metadata)]
(audiodata, properties) = decodeAudio(data)
self._audioSha256sum = calculateSHA256_data(audiodata)
self._decode_properties = properties
if self.metadata:
if not getattr(self.metadata.info, 'bits_per_sample', None):
self.metadata.info.bits_per_sample = properties.decoded_bytes_per_sample * 8
if not getattr(self.metadata.info, 'bitrate', None):
self.metadata.info.bitrate = properties.stream_bitrate or properties.container_bitrate
if getattr(self.metadata.info, 'length', 0) == 0:
warning_level = DecodeMessageRecord.level_value('Warning')
record = DecodeMessageRecord(0, warning_level, 'Length cannot be read with mutagen. Using decoded data to obtain length')
self._decode_properties.messages.append(record)
self.metadata.info.length = properties.decoded_duration
if properties.messages:
print('\n'.join([str(x) for x in properties.messages]))
if config.config['enable_internal_checks']:
ffprobe_metadata = FFProbeMetadata(self.path())
try:
tmp_bits = int(ffprobe_metadata['streams.stream.0.bits_per_raw_sample'])
except ValueError:
print("ffprobe doesn't provide bits per sample")
else:
if self.metadata.info.bits_per_sample != tmp_bits:
msg = 'bits_per_sample different! ffprobe: %d != bard_audiofile: %d' % (tmp_bits, self.metadata.info.bits_per_sample)
raise Exception(msg)
tmp_bitrate = int(ffprobe_metadata['format.bit_rate'])
if properties.stream_bitrate != tmp_bitrate and properties.container_bitrate != tmp_bitrate:
msg = 'bit_rate different! ffprobe: %d != bard_audiofile: %d' % (tmp_bitrate or 0, properties.bitrate or 0)
raise Exception(msg)
print('ffprobe check ' + TerminalColors.Ok + 'OK' + TerminalColors.ENDC)
try:
audio_segment = audioSegmentFromDataProperties(audiodata, properties)
except ValueError as exc:
print(f'Error processing {fileinfo}: {exc}')
raise
thr = Song.silence_threshold
minlen = Song.min_silence_length
silences = detect_silence_at_beginning_and_end(audio_segment, min_silence_len=minlen, silence_thresh=thr)
if silences:
(silence1, silence2) = silences
self._silenceAtStart = (silence1[1] - silence1[0]) / 1000
self._silenceAtEnd = (silence2[1] - silence2[0]) / 1000
if self.metadata:
try:
image = extractFrontCover(self.metadata)
except OSError:
print('Error extracting image from %s' % fileinfo)
raise
if image:
(image, imagedata) = image
self._coverWidth = image.width
self._coverHeight = image.height
self._coverMD5 = md5FromData(imagedata)
try:
self._mtime = os.path.getmtime(data)
except TypeError:
self._mtime = None
self._fileSha256sum = calculateFileSHA256(data)
self.fingerprint = self.getAcoustidFingerprint_data(audiodata, properties)
if self.metadata and getattr(self.metadata, 'cuesheet', None):
for track in self.metadata.cuesheet.tracks:
if track.track_number == 255 or track.start_offset == self.metadata.info.total_samples:
continue
timepos = self.metadata.info.length * track.start_offset / self.metadata.info.total_samples
try:
title = self.metadata['SUBTRACKTITLES'][track.track_number - 1]
except (KeyError, IndexError):
title = None
ct = CueTrack(track.track_number, track.start_offset, timepos, title)
self.cuesheet.append(ct)
self.isValid = True
else:
self._path = os.path.normpath(x)
self._description = self._path
fileinfo = x if isinstance(x, str) else 'file-like object'
if hasattr(x, 'seek'):
x.seek(0)
try:
self.metadata = mutagen.File(x)
except mutagen.mp3.HeaderNotFoundError as e:
print(TerminalColors.Error + 'Error' + TerminalColors.ENDC + ' reading %s:' % x, e)
raise
if not self.metadata:
print('No metadata found for %s : This will probably cause problems' % x)
formattext = {mutagen.mp3.EasyMP3: 'mp3', mutagen.mp3.MP3: 'mp3', mutagen.easymp4.EasyMP4: 'mp4', mutagen.mp4.MP4: 'mp4', mutagen.asf.ASF: 'asf', mutagen.flac.FLAC: 'flac', mutagen.oggvorbis.OggVorbis: 'ogg', mutagen.oggopus.OggOpus: 'opus', mutagen.wavpack.WavPack: 'wv', mutagen.monkeysaudio.MonkeysAudio: 'ape', mutagen.musepack.Musepack: 'mpc', mutagen.wave.WAVE: 'wav', mutagen.dsf.DSF: 'dsf'}
self._format = formattext[type(self.metadata)]
(audiodata, properties) = decodeAudio(x)
self._audioSha256sum = calculateSHA256_data(audiodata)
self._decode_properties = properties
if self.metadata:
if not getattr(self.metadata.info, 'bits_per_sample', None):
self.metadata.info.bits_per_sample = properties.decoded_bytes_per_sample * 8
if not getattr(self.metadata.info, 'bitrate', None):
self.metadata.info.bitrate = properties.stream_bitrate or properties.container_bitrate
if getattr(self.metadata.info, 'length', 0) == 0:
warning_level = DecodeMessageRecord.level_value('Warning')
record = DecodeMessageRecord(0, warning_level, 'Length cannot be read with mutagen. Using decoded data to obtain length')
self._decode_properties.messages.append(record)
self.metadata.info.length = properties.decoded_duration
if properties.messages:
print('\n'.join([str(x) for x in properties.messages]))
if config.config['enable_internal_checks']:
ffprobe_metadata = FFProbeMetadata(self.path())
try:
tmp_bits = int(ffprobe_metadata['streams.stream.0.bits_per_raw_sample'])
except ValueError:
print("ffprobe doesn't provide bits per sample")
else:
if self.metadata.info.bits_per_sample != tmp_bits:
msg = 'bits_per_sample different! ffprobe: %d != bard_audiofile: %d' % (tmp_bits, self.metadata.info.bits_per_sample)
raise Exception(msg)
tmp_bitrate = int(ffprobe_metadata['format.bit_rate'])
if properties.stream_bitrate != tmp_bitrate and properties.container_bitrate != tmp_bitrate:
msg = 'bit_rate different! ffprobe: %d != bard_audiofile: %d' % (tmp_bitrate or 0, properties.bitrate or 0)
raise Exception(msg)
print('ffprobe check ' + TerminalColors.Ok + 'OK' + TerminalColors.ENDC)
try:
audio_segment = audioSegmentFromDataProperties(audiodata, properties)
except ValueError as exc:
print(f'Error processing {fileinfo}: {exc}')
raise
thr = Song.silence_threshold
minlen = Song.min_silence_length
silences = detect_silence_at_beginning_and_end(audio_segment, min_silence_len=minlen, silence_thresh=thr)
if silences:
(silence1, silence2) = silences
self._silenceAtStart = (silence1[1] - silence1[0]) / 1000
self._silenceAtEnd = (silence2[1] - silence2[0]) / 1000
if self.metadata:
try:
image = extractFrontCover(self.metadata)
except OSError:
print('Error extracting image from %s' % fileinfo)
raise
if image:
(image, imagedata) = image
self._coverWidth = image.width
self._coverHeight = image.height
self._coverMD5 = md5FromData(imagedata)
try:
self._mtime = os.path.getmtime(x)
except TypeError:
self._mtime = None
self._fileSha256sum = calculateFileSHA256(x)
self.fingerprint = self.getAcoustidFingerprint_data(audiodata, properties)
if self.metadata and getattr(self.metadata, 'cuesheet', None):
for track in self.metadata.cuesheet.tracks:
if track.track_number == 255 or track.start_offset == self.metadata.info.total_samples:
continue
timepos = self.metadata.info.length * track.start_offset / self.metadata.info.total_samples
try:
title = self.metadata['SUBTRACKTITLES'][track.track_number - 1]
except (KeyError, IndexError):
title = None
ct = CueTrack(track.track_number, track.start_offset, timepos, title)
self.cuesheet.append(ct)
self.isValid = True
</DeepExtract>
|
bard
|
positive
|
def multi_gpu_test(model, data_loader, tmpdir=None):
model.eval()
results = []
dataset = data_loader.dataset
(rank, world_size) = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
for (i, data) in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
results.append(result)
if rank == 0:
batch_size = data['img'][0].size(0)
for _ in range(batch_size * world_size):
prog_bar.update()
<DeepExtract>
(rank, world_size) = get_dist_info()
if tmpdir is None:
MAX_LEN = 512
dir_tensor = torch.full((MAX_LEN,), 32, dtype=torch.uint8, device='cuda')
if rank == 0:
tmpdir = tempfile.mkdtemp()
tmpdir = torch.tensor(bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
mmcv.dump(results, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))
dist.barrier()
if rank != 0:
results = None
else:
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
part_list.append(mmcv.load(part_file))
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
ordered_results = ordered_results[:len(dataset)]
shutil.rmtree(tmpdir)
results = ordered_results
</DeepExtract>
return results
|
def multi_gpu_test(model, data_loader, tmpdir=None):
model.eval()
results = []
dataset = data_loader.dataset
(rank, world_size) = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
for (i, data) in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
results.append(result)
if rank == 0:
batch_size = data['img'][0].size(0)
for _ in range(batch_size * world_size):
prog_bar.update()
(rank, world_size) = get_dist_info()
if tmpdir is None:
MAX_LEN = 512
dir_tensor = torch.full((MAX_LEN,), 32, dtype=torch.uint8, device='cuda')
if rank == 0:
tmpdir = tempfile.mkdtemp()
tmpdir = torch.tensor(bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
mmcv.dump(results, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))
dist.barrier()
if rank != 0:
results = None
else:
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
part_list.append(mmcv.load(part_file))
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
ordered_results = ordered_results[:len(dataset)]
shutil.rmtree(tmpdir)
results = ordered_results
return results
|
EfficientDet-bifpn
|
positive
|
def test_multiple_disulfides_target(self):
<DeepExtract>
pdb_path = os.path.join(absltest.get_default_test_srcdir(), 'alphafold/relax/testdata/multiple_disulfides_target.pdb')
with open(pdb_path, 'r') as f:
prot = protein.from_pdb_string(f.read())
</DeepExtract>
ret = amber_minimize.run_pipeline(prot, max_iterations=10, max_attempts=1, stiffness=10.0, use_gpu=_USE_GPU)
self.assertIn('opt_time', ret)
self.assertIn('min_attempts', ret)
|
def test_multiple_disulfides_target(self):
pdb_path = os.path.join(absltest.get_default_test_srcdir(), 'alphafold/relax/testdata/multiple_disulfides_target.pdb')
with open(pdb_path, 'r') as f:
prot = protein.from_pdb_string(f.read())
ret = amber_minimize.run_pipeline(prot, max_iterations=10, max_attempts=1, stiffness=10.0, use_gpu=_USE_GPU)
self.assertIn('opt_time', ret)
self.assertIn('min_attempts', ret)
|
alphafold
|
positive
|
@classmethod
def get_all_fields(cls, class_obj=None, fields=None):
"""
TODO: This needs to be properly used
"""
def return_fields(obj):
internal_fields = fields
if internal_fields is None:
internal_fields = {}
for attribute in dir(obj):
try:
attr_val = getattr(obj, attribute)
attr_cls = attr_val.__class__
if issubclass(attr_cls, ModelField):
internal_fields[attribute] = attr_val
except:
pass
return internal_fields
if class_obj is None:
class_obj = cls
<DeepExtract>
internal_fields = fields
if internal_fields is None:
internal_fields = {}
for attribute in dir(class_obj):
try:
attr_val = getattr(class_obj, attribute)
attr_cls = attr_val.__class__
if issubclass(attr_cls, ModelField):
internal_fields[attribute] = attr_val
except:
pass
fields = internal_fields
</DeepExtract>
for parent_class in cls.__bases__:
parent_fields = cls.get_all_fields(parent_class, fields)
for (field_name, field_value) in list(parent_fields.items()):
if not field_name in fields:
fields[field_name] = field_value
return fields
elif not isinstance(class_obj, CollectionModel):
return fields
|
@classmethod
def get_all_fields(cls, class_obj=None, fields=None):
"""
TODO: This needs to be properly used
"""
def return_fields(obj):
internal_fields = fields
if internal_fields is None:
internal_fields = {}
for attribute in dir(obj):
try:
attr_val = getattr(obj, attribute)
attr_cls = attr_val.__class__
if issubclass(attr_cls, ModelField):
internal_fields[attribute] = attr_val
except:
pass
return internal_fields
if class_obj is None:
class_obj = cls
internal_fields = fields
if internal_fields is None:
internal_fields = {}
for attribute in dir(class_obj):
try:
attr_val = getattr(class_obj, attribute)
attr_cls = attr_val.__class__
if issubclass(attr_cls, ModelField):
internal_fields[attribute] = attr_val
except:
pass
fields = internal_fields
for parent_class in cls.__bases__:
parent_fields = cls.get_all_fields(parent_class, fields)
for (field_name, field_value) in list(parent_fields.items()):
if not field_name in fields:
fields[field_name] = field_value
return fields
elif not isinstance(class_obj, CollectionModel):
return fields
|
ArangoPy
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.