before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def _prune_catalog(self) -> cat.Catalog:
"""Prune the controls in the current catalog."""
if self._import is None:
return self._catalog
<DeepExtract>
if self._import.include_controls is not None:
include_ids = self._controls_selected(self._import.include_controls)
else:
if self._import.include_all is None:
logger.warning('Profile does not specify include-controls, so including all.')
include_ids = self._catalog_interface.get_control_ids()
exclude_ids = self._controls_selected(self._import.exclude_controls)
if not set(include_ids).issuperset(set(exclude_ids)):
logger.debug(f'include_ids is not a superset of exclude_ids in import {self._import.href}')
needed_ids = sorted([id_ for id_ in include_ids if id_ not in exclude_ids])
</DeepExtract>
<DeepExtract>
loaded_ids = []
final_ids: List[str] = []
for control_id in needed_ids:
if control_id not in loaded_ids:
control = self._catalog_interface.get_control(control_id)
if control is None:
msg = f'Profile titled "{self._profile.metadata.title}" references control {control_id} but it is not in catalog titled "{self._catalog.metadata.title}"'
raise TrestleError(msg)
control = self._prune_control(needed_ids, control, loaded_ids)
self._catalog_interface.replace_control(control)
loaded_ids.append(control_id)
final_ids.append(control_id)
final_control_ids = final_ids
</DeepExtract>
<DeepExtract>
for control in self._catalog_interface.get_all_controls_from_dict():
_ = self._re_insert_child_controls(control)
</DeepExtract>
cat_controls = []
group_dict: Dict[str, cat.Group] = {}
for control_id in final_control_ids:
control = self._catalog_interface.get_control(control_id)
(group_id, group_title, group_class) = self._catalog_interface.get_group_info_by_control(control_id)
if not group_id:
cat_controls.append(control)
continue
group = group_dict.get(group_id)
if group is None:
group = cat.Group(id=group_id, title=group_title, class_=group_class, controls=[control])
group_dict[group_id] = group
else:
group_dict[group_id].controls.append(control)
new_groups: Optional[List[cat.Group]] = list(group_dict.values())
new_groups = none_if_empty(new_groups)
cat_controls = none_if_empty(cat_controls)
new_params = self._catalog.params
new_cat = cat.Catalog(uuid=str(uuid4()), metadata=self._catalog.metadata, back_matter=common.BackMatter(), controls=cat_controls, groups=new_groups, params=new_params)
needed_uuid_refs = ModelUtils.find_uuid_refs(new_cat)
new_resources: Optional[List[common.Resource]] = []
if self._catalog.back_matter and self._catalog.back_matter.resources:
new_resources = [res for res in self._catalog.back_matter.resources if res.uuid in needed_uuid_refs]
new_resources = none_if_empty(new_resources)
new_cat.back_matter.resources = new_resources
return new_cat
|
def _prune_catalog(self) -> cat.Catalog:
"""Prune the controls in the current catalog."""
if self._import is None:
return self._catalog
if self._import.include_controls is not None:
include_ids = self._controls_selected(self._import.include_controls)
else:
if self._import.include_all is None:
logger.warning('Profile does not specify include-controls, so including all.')
include_ids = self._catalog_interface.get_control_ids()
exclude_ids = self._controls_selected(self._import.exclude_controls)
if not set(include_ids).issuperset(set(exclude_ids)):
logger.debug(f'include_ids is not a superset of exclude_ids in import {self._import.href}')
needed_ids = sorted([id_ for id_ in include_ids if id_ not in exclude_ids])
loaded_ids = []
final_ids: List[str] = []
for control_id in needed_ids:
if control_id not in loaded_ids:
control = self._catalog_interface.get_control(control_id)
if control is None:
msg = f'Profile titled "{self._profile.metadata.title}" references control {control_id} but it is not in catalog titled "{self._catalog.metadata.title}"'
raise TrestleError(msg)
control = self._prune_control(needed_ids, control, loaded_ids)
self._catalog_interface.replace_control(control)
loaded_ids.append(control_id)
final_ids.append(control_id)
final_control_ids = final_ids
for control in self._catalog_interface.get_all_controls_from_dict():
_ = self._re_insert_child_controls(control)
cat_controls = []
group_dict: Dict[str, cat.Group] = {}
for control_id in final_control_ids:
control = self._catalog_interface.get_control(control_id)
(group_id, group_title, group_class) = self._catalog_interface.get_group_info_by_control(control_id)
if not group_id:
cat_controls.append(control)
continue
group = group_dict.get(group_id)
if group is None:
group = cat.Group(id=group_id, title=group_title, class_=group_class, controls=[control])
group_dict[group_id] = group
else:
group_dict[group_id].controls.append(control)
new_groups: Optional[List[cat.Group]] = list(group_dict.values())
new_groups = none_if_empty(new_groups)
cat_controls = none_if_empty(cat_controls)
new_params = self._catalog.params
new_cat = cat.Catalog(uuid=str(uuid4()), metadata=self._catalog.metadata, back_matter=common.BackMatter(), controls=cat_controls, groups=new_groups, params=new_params)
needed_uuid_refs = ModelUtils.find_uuid_refs(new_cat)
new_resources: Optional[List[common.Resource]] = []
if self._catalog.back_matter and self._catalog.back_matter.resources:
new_resources = [res for res in self._catalog.back_matter.resources if res.uuid in needed_uuid_refs]
new_resources = none_if_empty(new_resources)
new_cat.back_matter.resources = new_resources
return new_cat
|
compliance-trestle
|
positive
|
def get_journal_log(self, conf):
""" /var/log/zzz.service.log or /var/log/default.unit.log """
filename = os.path.basename(strE(conf.filename()))
unitname = (conf.name() or 'default') + '.unit'
name = filename or unitname
<DeepExtract>
HOME = get_HOME(conf.root_mode())
RUN = get_RUN(conf.root_mode())
LOG = get_LOG_DIR(conf.root_mode())
XDG_DATA_HOME = get_DATA_HOME(conf.root_mode())
XDG_CONFIG_HOME = get_CONFIG_HOME(conf.root_mode())
XDG_RUNTIME_DIR = get_RUNTIME_DIR(conf.root_mode())
log_folder = os.path.expanduser(self._journal_log_folder.replace('${', '{').format(**locals()))
</DeepExtract>
log_file = name.replace(os.path.sep, '.') + '.log'
if log_file.startswith('.'):
log_file = 'dot.' + log_file
return os.path.join(log_folder, log_file)
|
def get_journal_log(self, conf):
""" /var/log/zzz.service.log or /var/log/default.unit.log """
filename = os.path.basename(strE(conf.filename()))
unitname = (conf.name() or 'default') + '.unit'
name = filename or unitname
HOME = get_HOME(conf.root_mode())
RUN = get_RUN(conf.root_mode())
LOG = get_LOG_DIR(conf.root_mode())
XDG_DATA_HOME = get_DATA_HOME(conf.root_mode())
XDG_CONFIG_HOME = get_CONFIG_HOME(conf.root_mode())
XDG_RUNTIME_DIR = get_RUNTIME_DIR(conf.root_mode())
log_folder = os.path.expanduser(self._journal_log_folder.replace('${', '{').format(**locals()))
log_file = name.replace(os.path.sep, '.') + '.log'
if log_file.startswith('.'):
log_file = 'dot.' + log_file
return os.path.join(log_folder, log_file)
|
docker-systemctl-images
|
positive
|
def process_files(args):
(assemblies_1, features_1) = gff.get_gff3_features(args.annotation_1)
(assemblies_2, features_2) = gff.get_gff3_features(args.annotation_2)
a_exons = []
p_exons = []
a_gene = []
p_gene = []
a_mrna = []
p_mrna = []
exon_pred_all = set()
gene_true = set()
mrna_true = set()
chr = []
a_cds = []
p_cds = []
a_cd = []
p_cd = []
chr = []
true_pred_file = args.output_dir + '/true_predicted_genes.txt'
true_file = open(true_pred_file, 'w')
true_file.write('Known\tPredicted\n')
for asm_id in assemblies_1:
assembly_1 = assemblies_1[asm_id]
assembly_2 = assemblies_2.get(asm_id, -1)
genes_1 = assembly_1.genes()
anno_exons = set()
for gene_1 in sorted(genes_1):
gene_1_loc = gene_1.location_on(assembly_1)
<DeepExtract>
cord_a = asm_id + ':' + str(gene_1_loc.fmin) + ':' + str(gene_1_loc.fmax) + ':' + str(gene_1_loc.strand)
</DeepExtract>
if cord_a not in a_gene:
a_gene.append(cord_a)
ex_start = []
ex_stop = []
for mrna_1 in sorted(gene_1.mRNAs()):
mrna_1_loc = mrna_1.location_on(assembly_1)
<DeepExtract>
cord = asm_id + ':' + str(mrna_1_loc.fmin) + ':' + str(mrna_1_loc.fmax) + ':' + str(mrna_1_loc.strand)
</DeepExtract>
if cord not in a_mrna:
a_mrna.append(cord)
if args.feature == 'Exon':
feat_1 = mrna_1.exons()
if args.feature == 'CDS':
feat_1 = mrna_1.CDSs()
for exon_1 in sorted(feat_1):
exon_1_loc = exon_1.location_on(assembly_1)
<DeepExtract>
cord = asm_id + ':' + str(exon_1_loc.fmin) + ':' + str(exon_1_loc.fmax) + ':' + str(exon_1_loc.strand)
</DeepExtract>
if cord not in a_exons:
a_exons.append(cord)
anno_exons.add(cord)
ex_start.append(exon_1_loc.fmin)
ex_stop.append(exon_1_loc.fmax)
ex_start.sort()
ex_stop.sort()
if len(ex_start) >= 1:
cds1 = asm_id + ':' + gene_1.id + ':' + str(ex_start[0]) + ':' + str(ex_stop[-1]) + ':' + str(gene_1_loc.strand)
else:
cds1 = asm_id + ':' + gene_1.id + ':' + str(gene_1_loc.fmin) + ':' + str(gene_1_loc.fmax) + ':' + str(gene_1_loc.strand)
if cord_a not in a_cd:
a_cds.append(cds1)
a_cd.append(cord_a)
if type(assembly_2) is int:
continue
genes_2 = assembly_2.genes()
chr.append(asm_id)
pred_exons = set()
for gene_2 in sorted(genes_2):
gene_2_loc = gene_2.location_on(assembly_2)
<DeepExtract>
cord_p = asm_id + ':' + str(gene_2_loc.fmin) + ':' + str(gene_2_loc.fmax) + ':' + str(gene_2_loc.strand)
</DeepExtract>
if cord_p not in p_gene:
p_gene.append(cord_p)
ex_start = []
ex_stop = []
for mrna_2 in sorted(gene_2.mRNAs()):
mrna_2_loc = mrna_2.location_on(assembly_2)
<DeepExtract>
cord = asm_id + ':' + str(mrna_2_loc.fmin) + ':' + str(mrna_2_loc.fmax) + ':' + str(mrna_2_loc.strand)
</DeepExtract>
if cord not in p_mrna:
p_mrna.append(cord)
if args.feature == 'Exon':
feat_2 = mrna_2.exons()
if args.feature == 'CDS':
feat_2 = mrna_2.CDSs()
for exon_2 in sorted(feat_2):
exon_2_loc = exon_2.location_on(assembly_2)
<DeepExtract>
cord = asm_id + ':' + str(exon_2_loc.fmin) + ':' + str(exon_2_loc.fmax) + ':' + str(exon_2_loc.strand)
</DeepExtract>
pred_exons.add(cord)
if cord not in p_exons:
p_exons.append(cord)
ex_start.append(exon_2_loc.fmin)
ex_stop.append(exon_2_loc.fmax)
ex_start.sort()
ex_stop.sort()
if len(ex_start) >= 1:
cds2 = asm_id + ':' + gene_2.id + ':' + str(ex_start[0]) + ':' + str(ex_stop[-1]) + ':' + str(gene_2_loc.strand)
else:
cds2 = asm_id + ':' + gene_2.id + ':' + str(gene_2_loc.fmin) + ':' + str(gene_2_loc.fmax) + ':' + str(gene_2_loc.strand)
if cord_p not in p_cd:
p_cds.append(cds2)
p_cd.append(cord_p)
exon_pred_all.update(pred_exons.intersection(anno_exons))
for gene_2 in sorted(genes_2):
gene_2_loc = gene_2.location_on(assembly_2)
<DeepExtract>
cord_g = asm_id + ':' + str(gene_2_loc.fmin) + ':' + str(gene_2_loc.fmax) + ':' + str(gene_2_loc.strand)
</DeepExtract>
if cord_g in gene_true:
continue
ex_mrna1 = set()
ex_mrna2 = set()
for gene_1 in sorted(genes_1):
gene_1_loc = gene_1.location_on(assembly_1)
if gene_1_loc.strand != gene_2_loc.strand:
continue
if gene_2.overlaps_with(gene_1):
for mrna_2 in sorted(gene_2.mRNAs()):
if args.feature == 'Exon':
feat_2 = mrna_2.exons()
if args.feature == 'CDS':
feat_2 = mrna_2.CDSs()
for exon_2 in sorted(feat_2):
exon_2_loc = exon_2.location_on(assembly_2)
<DeepExtract>
cord2 = asm_id + ':' + str(exon_2_loc.fmin) + ':' + str(exon_2_loc.fmax) + ':' + str(exon_2_loc.strand)
</DeepExtract>
ex_mrna2.add(cord2)
for mrna_1 in sorted(gene_1.mRNAs()):
if args.feature == 'Exon':
feat_1 = mrna_1.exons()
if args.feature == 'CDS':
feat_1 = mrna_1.CDSs()
for exon_1 in sorted(feat_1):
exon_1_loc = exon_1.location_on(assembly_1)
<DeepExtract>
cord1 = asm_id + ':' + str(exon_1_loc.fmin) + ':' + str(exon_1_loc.fmax) + ':' + str(exon_1_loc.strand)
</DeepExtract>
ex_mrna1.add(cord1)
ex_union = ex_mrna1.union(ex_mrna2)
if len(ex_union) == len(ex_mrna1) and len(ex_union) == len(ex_mrna2):
gene_true.add(cord_g)
true_file.write(gene_1.id + '\t' + gene_2.id + '\n')
break
for asm_id in assemblies_2:
if asm_id not in chr:
assembly_2 = assemblies_2.get(asm_id, -1)
genes_2 = assembly_2.genes()
for gene_2 in sorted(genes_2):
gene_2_loc = gene_2.location_on(assembly_2)
<DeepExtract>
cord_p = asm_id + ':' + str(gene_2_loc.fmin) + ':' + str(gene_2_loc.fmax) + ':' + str(gene_2_loc.strand)
</DeepExtract>
if cord_p not in p_gene:
p_gene.append(cord_p)
ex_start = []
ex_stop = []
for mrna_2 in sorted(gene_2.mRNAs()):
mrna_2_loc = mrna_2.location_on(assembly_2)
<DeepExtract>
cord = asm_id + ':' + str(mrna_2_loc.fmin) + ':' + str(mrna_2_loc.fmax) + ':' + str(mrna_2_loc.strand)
</DeepExtract>
if cord not in p_mrna:
p_mrna.append(cord)
if args.feature == 'Exon':
feat_2 = mrna_2.exons()
if args.feature == 'CDS':
feat_2 = mrna_2.CDSs()
for exon_2 in sorted(feat_2):
exon_2_loc = exon_2.location_on(assembly_2)
<DeepExtract>
cord = asm_id + ':' + str(exon_2_loc.fmin) + ':' + str(exon_2_loc.fmax) + ':' + str(exon_2_loc.strand)
</DeepExtract>
if cord not in p_exons:
p_exons.append(cord)
ex_start.append(exon_2_loc.fmin)
ex_stop.append(exon_2_loc.fmax)
ex_start.sort()
ex_stop.sort()
if len(ex_start) >= 1:
cds2 = asm_id + ':' + gene_2.id + ':' + str(ex_start[0]) + ':' + str(ex_stop[-1]) + ':' + str(gene_2_loc.strand)
else:
cds2 = asm_id + ':' + gene_2.id + ':' + str(gene_2_loc.fmin) + ':' + str(gene_2_loc.fmax) + ':' + str(gene_2_loc.strand)
if cord_p not in p_cd:
p_cds.append(cds2)
p_cd.append(cord_p)
<DeepExtract>
a_base = 0
p_base = 0
true_base_value = 0
exon2_bed = args.output_dir + '/exon_2.bed'
e_bed = open(exon2_bed, 'w')
for exon in p_exons:
chrom = exon.split(':')[0]
start = int(exon.split(':')[1])
stop = int(exon.split(':')[2])
strand = exon.split(':')[3]
if strand == str(1):
strand = '+'
else:
strand = '-'
e_bed.write(chrom + '\t' + str(start) + '\t' + str(stop) + '\texon\t' + str(0) + '\t' + strand + '\n')
e_bed.close()
out2 = args.output_dir + '/exon_2_merged.bed'
cmd = 'bedtools merge -nms -scores sum -i ' + exon2_bed + ' -s >' + out2
os.system(cmd)
exon1_bed = args.output_dir + '/exon_1.bed'
e_bed = open(exon1_bed, 'w')
for exon in a_exons:
chrom = exon.split(':')[0]
start = int(exon.split(':')[1])
stop = int(exon.split(':')[2])
strand = exon.split(':')[3]
if strand == str(1):
strand = '+'
else:
strand = '-'
e_bed.write(chrom + '\t' + str(start) + '\t' + str(stop) + '\texon\t' + str(0) + '\t' + strand + '\n')
e_bed.close()
out1 = args.output_dir + '/exon_1_merged.bed'
cmd = 'bedtools merge -nms -scores sum -i ' + exon1_bed + ' -s >' + out1
os.system(cmd)
out_intersect = args.output_dir + '/exon_1_2_intersect.bed'
cmd = 'bedtools intersect -s -wo -a ' + out1 + ' -b ' + out2 + ' >' + out_intersect
os.system(cmd)
a_base_file = open(out1, 'r')
for line in a_base_file:
arr = line.split('\t')
a_base = a_base + (int(arr[2]) - int(arr[1]))
a_base_file.close()
p_base_file = open(out2, 'r')
for line in p_base_file:
arr = line.split('\t')
p_base = p_base + (int(arr[2]) - int(arr[1]))
p_base_file.close()
true_base_file = open(out_intersect, 'r')
for line in true_base_file:
arr = line.split('\t')
true_base_value = true_base_value + int(arr[12])
true_base_file.close()
(a_base_val, p_base_val, true_base) = (a_base, p_base, true_base_value)
</DeepExtract>
base_sn = true_base / a_base_val * 100
base_sp = true_base / p_base_val * 100
annotated_exon = len(a_exons)
predicted_exon = len(p_exons)
true_pred_exon = len(exon_pred_all)
exon_sn = true_pred_exon / annotated_exon * 100
exon_sp = true_pred_exon / predicted_exon * 100
annotated_gene = len(a_gene)
predicted_gene = len(p_gene)
true_pred_gene = len(gene_true)
gene_sn = true_pred_gene / annotated_gene * 100
gene_sp = true_pred_gene / predicted_gene * 100
print('Feature\tKnown\tPredicted\tTrue_Predicted\tSN\tPPV\n')
print('Gene\t' + str(annotated_gene) + '\t' + str(predicted_gene) + '\t' + str(true_pred_gene) + '\t' + str(gene_sn) + '\t' + str(gene_sp))
print(args.feature + '\t' + str(annotated_exon) + '\t' + str(predicted_exon) + '\t' + str(true_pred_exon) + '\t' + str(exon_sn) + '\t' + str(exon_sp))
print('Base\t' + str(a_base_val) + '\t' + str(p_base_val) + '\t' + str(true_base) + '\t' + str(base_sn) + '\t' + str(base_sp))
out_file = args.output_dir + '/summary.txt'
if not os.path.exists(args.output_dir):
sys.exit('Directory does not exist.')
fout = open(out_file, 'w')
fout.write('Feature\tKnown\tPredicted\tTrue_Predicted\tSN\tPPV\n')
fout.write('Gene\t' + str(annotated_gene) + '\t' + str(predicted_gene) + '\t' + str(true_pred_gene) + '\t' + str(gene_sn) + '\t' + str(gene_sp) + '\n')
fout.write(args.feature + '\t' + str(annotated_exon) + '\t' + str(predicted_exon) + '\t' + str(true_pred_exon) + '\t' + str(exon_sn) + '\t' + str(exon_sp) + '\n')
fout.write('Base\t' + str(a_base_val) + '\t' + str(p_base_val) + '\t' + str(true_base) + '\t' + str(base_sn) + '\t' + str(base_sp) + '\n\n')
<DeepExtract>
gene_found = 0
gene_opp = 0
gene_no_overlap = 0
gene_more_than_one_overlap = 0
temp_file1 = args.output_dir + '/' + 'pred' + '.found.txt'
ft1 = open(temp_file1, 'w')
temp_file2 = args.output_dir + '/' + 'pred' + '.opposite.txt'
ft2 = open(temp_file2, 'w')
temp_file3 = args.output_dir + '/' + 'pred' + '.no_overlap.txt'
ft3 = open(temp_file3, 'w')
temp_file4 = args.output_dir + '/' + 'pred' + '.overlap_more_than_one.txt'
ft4 = open(temp_file4, 'w')
for c1 in p_cds:
gene_overlap_same = []
gene_overlap_opp = []
chrom1 = c1.split(':')[0]
cds_id1 = c1.split(':')[1]
start1 = int(c1.split(':')[2])
stop1 = int(c1.split(':')[3])
strand1 = c1.split(':')[4]
for c2 in a_cds:
chrom2 = c2.split(':')[0]
cds_id2 = c2.split(':')[1]
start2 = int(c2.split(':')[2])
stop2 = int(c2.split(':')[3])
strand2 = c2.split(':')[4]
if chrom1 != chrom2:
continue
if start1 <= stop2 and start2 <= stop1:
arr = [start1, stop1, start2, stop2]
arr.sort()
len_overlap = arr[2] - arr[1]
per_overlap = len_overlap / (stop1 - start1) * 100
if strand1 == strand2:
gene_overlap_same.append(per_overlap)
else:
gene_overlap_opp.append(per_overlap)
if len(gene_overlap_same) == 1:
gene_found += 1
ft1.write(chrom1 + '\t' + str(start1) + '\t' + str(stop1) + '\t' + strand1 + '\t' + cds_id1 + '\n')
if len(gene_overlap_same) == 0 and len(gene_overlap_opp) >= 1:
gene_opp += 1
ft2.write(chrom1 + '\t' + str(start1) + '\t' + str(stop1) + '\t' + strand1 + '\t' + cds_id1 + '\n')
if len(gene_overlap_same) == 0 and len(gene_overlap_opp) == 0:
gene_no_overlap += 1
ft3.write(chrom1 + '\t' + str(start1) + '\t' + str(stop1) + '\t' + strand1 + '\t' + cds_id1 + '\n')
if len(gene_overlap_same) > 1:
gene_more_than_one_overlap += 1
ft4.write(chrom1 + '\t' + str(start1) + '\t' + str(stop1) + '\t' + strand1 + '\t' + cds_id1 + '\n')
arr = [gene_found, gene_opp, gene_no_overlap, gene_more_than_one_overlap]
arr_pred = arr
</DeepExtract>
<DeepExtract>
gene_found = 0
gene_opp = 0
gene_no_overlap = 0
gene_more_than_one_overlap = 0
temp_file1 = args.output_dir + '/' + 'known' + '.found.txt'
ft1 = open(temp_file1, 'w')
temp_file2 = args.output_dir + '/' + 'known' + '.opposite.txt'
ft2 = open(temp_file2, 'w')
temp_file3 = args.output_dir + '/' + 'known' + '.no_overlap.txt'
ft3 = open(temp_file3, 'w')
temp_file4 = args.output_dir + '/' + 'known' + '.overlap_more_than_one.txt'
ft4 = open(temp_file4, 'w')
for c1 in a_cds:
gene_overlap_same = []
gene_overlap_opp = []
chrom1 = c1.split(':')[0]
cds_id1 = c1.split(':')[1]
start1 = int(c1.split(':')[2])
stop1 = int(c1.split(':')[3])
strand1 = c1.split(':')[4]
for c2 in p_cds:
chrom2 = c2.split(':')[0]
cds_id2 = c2.split(':')[1]
start2 = int(c2.split(':')[2])
stop2 = int(c2.split(':')[3])
strand2 = c2.split(':')[4]
if chrom1 != chrom2:
continue
if start1 <= stop2 and start2 <= stop1:
arr = [start1, stop1, start2, stop2]
arr.sort()
len_overlap = arr[2] - arr[1]
per_overlap = len_overlap / (stop1 - start1) * 100
if strand1 == strand2:
gene_overlap_same.append(per_overlap)
else:
gene_overlap_opp.append(per_overlap)
if len(gene_overlap_same) == 1:
gene_found += 1
ft1.write(chrom1 + '\t' + str(start1) + '\t' + str(stop1) + '\t' + strand1 + '\t' + cds_id1 + '\n')
if len(gene_overlap_same) == 0 and len(gene_overlap_opp) >= 1:
gene_opp += 1
ft2.write(chrom1 + '\t' + str(start1) + '\t' + str(stop1) + '\t' + strand1 + '\t' + cds_id1 + '\n')
if len(gene_overlap_same) == 0 and len(gene_overlap_opp) == 0:
gene_no_overlap += 1
ft3.write(chrom1 + '\t' + str(start1) + '\t' + str(stop1) + '\t' + strand1 + '\t' + cds_id1 + '\n')
if len(gene_overlap_same) > 1:
gene_more_than_one_overlap += 1
ft4.write(chrom1 + '\t' + str(start1) + '\t' + str(stop1) + '\t' + strand1 + '\t' + cds_id1 + '\n')
arr = [gene_found, gene_opp, gene_no_overlap, gene_more_than_one_overlap]
arr_known = arr
</DeepExtract>
<DeepExtract>
gene_found = 0
gene_opp = 0
gene_no_overlap = 0
gene_more_than_one_overlap = 0
temp_file1 = args.output_dir + '/' + 'pred_same' + '.found.txt'
ft1 = open(temp_file1, 'w')
temp_file2 = args.output_dir + '/' + 'pred_same' + '.opposite.txt'
ft2 = open(temp_file2, 'w')
temp_file3 = args.output_dir + '/' + 'pred_same' + '.no_overlap.txt'
ft3 = open(temp_file3, 'w')
temp_file4 = args.output_dir + '/' + 'pred_same' + '.overlap_more_than_one.txt'
ft4 = open(temp_file4, 'w')
for c1 in p_cds:
gene_overlap_same = []
gene_overlap_opp = []
chrom1 = c1.split(':')[0]
cds_id1 = c1.split(':')[1]
start1 = int(c1.split(':')[2])
stop1 = int(c1.split(':')[3])
strand1 = c1.split(':')[4]
for c2 in p_cds:
chrom2 = c2.split(':')[0]
cds_id2 = c2.split(':')[1]
start2 = int(c2.split(':')[2])
stop2 = int(c2.split(':')[3])
strand2 = c2.split(':')[4]
if chrom1 != chrom2:
continue
if start1 <= stop2 and start2 <= stop1:
arr = [start1, stop1, start2, stop2]
arr.sort()
len_overlap = arr[2] - arr[1]
per_overlap = len_overlap / (stop1 - start1) * 100
if strand1 == strand2:
gene_overlap_same.append(per_overlap)
else:
gene_overlap_opp.append(per_overlap)
if len(gene_overlap_same) == 1:
gene_found += 1
ft1.write(chrom1 + '\t' + str(start1) + '\t' + str(stop1) + '\t' + strand1 + '\t' + cds_id1 + '\n')
if len(gene_overlap_same) == 0 and len(gene_overlap_opp) >= 1:
gene_opp += 1
ft2.write(chrom1 + '\t' + str(start1) + '\t' + str(stop1) + '\t' + strand1 + '\t' + cds_id1 + '\n')
if len(gene_overlap_same) == 0 and len(gene_overlap_opp) == 0:
gene_no_overlap += 1
ft3.write(chrom1 + '\t' + str(start1) + '\t' + str(stop1) + '\t' + strand1 + '\t' + cds_id1 + '\n')
if len(gene_overlap_same) > 1:
gene_more_than_one_overlap += 1
ft4.write(chrom1 + '\t' + str(start1) + '\t' + str(stop1) + '\t' + strand1 + '\t' + cds_id1 + '\n')
arr = [gene_found, gene_opp, gene_no_overlap, gene_more_than_one_overlap]
arr_pred_same = arr
</DeepExtract>
new_gene = arr_pred[2]
gene_merge = arr_pred[3]
gene_found = arr_pred[0]
gene_opp = arr_pred[1]
gene_missing = arr_known[2]
gene = arr_known[0]
gene_opp_known = arr_known[1]
gene_split = arr_known[3]
gene_pred_overlap_opp = arr_pred_same[1]
print('1. No. of known gene : ', len(a_cds))
print('2. No. of predicted gene : ', len(p_cds))
print('3. No. of predicted gene overlapping 0 known gene (new gene): ', new_gene)
print('4. No. of predicted gene overlapping > 1 known gene (gene merge) : ', gene_merge)
print('5. No. of predicted gene overlaping 1 known gene : ', gene_found)
print('6. No. of predicted gene overlapping >= 1 known gene in opp strand : ', gene_opp)
print('7. No. of predicted gene overlapping 1 known gene (exact intron/exon boundaries) : ', true_pred_gene)
print('8. No. of predicted gene overlapping >= 1 predicted gene in opp strand : ', gene_pred_overlap_opp)
print('9. No. of known gene overlapping 0 predicted gene (gene missing): ', gene_missing)
print('10. No. of known gene overlapping > 1 predicted gene(gene split) : ', gene_split)
print('11. No. of known gene overlaping 1 predicted gene : ', gene)
print('12. No. of known gene overlapping >= 1 predicted gene in opp strand : ', gene_opp_known)
out_file = args.output_dir + '/final_stats.txt'
if not os.path.exists(args.output_dir):
sys.exit('Directory does not exist.')
fout = open(out_file, 'w')
fout.write('1. No. of known gene : ' + str(len(a_cds)) + '\n')
fout.write('2. No. of predicted gene : ' + str(len(p_cds)) + '\n')
fout.write('3. No. of predicted gene overlapping 0 known gene (new gene): ' + str(new_gene) + '\n')
fout.write('4. No. of predicted gene overlapping > 1 known gene (gene merge) : ' + str(gene_merge) + '\n')
fout.write('5. No. of predicted gene overlaping 1 known gene : ' + str(gene_found) + '\n')
fout.write('6. No. of predicted gene overlapping >= 1 known gene in opp strand : ' + str(gene_opp) + '\n')
fout.write('7. No. of predicted gene overlapping 1 known gene (exact intron/exon boundary) : ' + str(true_pred_gene) + '\n')
fout.write('8. No. of predicted gene overlapping >= 1 predicted gene in opp strand : ' + str(gene_pred_overlap_opp) + '\n')
fout.write('9. No. of known gene overlapping 0 predicted gene (gene missing): ' + str(gene_missing) + '\n')
fout.write('10. No. of known gene overlapping > 1 predicted gene (gene_split): ' + str(gene_split) + '\n')
fout.write('11. No. of known gene overlaping 1 predicted gene : ' + str(gene) + '\n')
fout.write('12. No. of known gene overlapping >= 1 predicted gene in opp strand : ' + str(gene_opp_known) + '\n')
true_pred_file = args.output_dir + '/true_pred.txt'
fout_true = open(true_pred_file, 'w')
for true_gene in gene_true:
fout_true.write(true_gene + '\n')
delete_file = ['exon_1.bed', 'exon_2.bed', 'exon_1_merged.bed', 'exon_2_merged.bed', 'exon_1_2_intersect.bed']
for f in delete_file:
cmd = 'rm ' + args.output_dir + '/' + f
os.system(cmd)
|
def process_files(args):
(assemblies_1, features_1) = gff.get_gff3_features(args.annotation_1)
(assemblies_2, features_2) = gff.get_gff3_features(args.annotation_2)
a_exons = []
p_exons = []
a_gene = []
p_gene = []
a_mrna = []
p_mrna = []
exon_pred_all = set()
gene_true = set()
mrna_true = set()
chr = []
a_cds = []
p_cds = []
a_cd = []
p_cd = []
chr = []
true_pred_file = args.output_dir + '/true_predicted_genes.txt'
true_file = open(true_pred_file, 'w')
true_file.write('Known\tPredicted\n')
for asm_id in assemblies_1:
assembly_1 = assemblies_1[asm_id]
assembly_2 = assemblies_2.get(asm_id, -1)
genes_1 = assembly_1.genes()
anno_exons = set()
for gene_1 in sorted(genes_1):
gene_1_loc = gene_1.location_on(assembly_1)
cord_a = asm_id + ':' + str(gene_1_loc.fmin) + ':' + str(gene_1_loc.fmax) + ':' + str(gene_1_loc.strand)
if cord_a not in a_gene:
a_gene.append(cord_a)
ex_start = []
ex_stop = []
for mrna_1 in sorted(gene_1.mRNAs()):
mrna_1_loc = mrna_1.location_on(assembly_1)
cord = asm_id + ':' + str(mrna_1_loc.fmin) + ':' + str(mrna_1_loc.fmax) + ':' + str(mrna_1_loc.strand)
if cord not in a_mrna:
a_mrna.append(cord)
if args.feature == 'Exon':
feat_1 = mrna_1.exons()
if args.feature == 'CDS':
feat_1 = mrna_1.CDSs()
for exon_1 in sorted(feat_1):
exon_1_loc = exon_1.location_on(assembly_1)
cord = asm_id + ':' + str(exon_1_loc.fmin) + ':' + str(exon_1_loc.fmax) + ':' + str(exon_1_loc.strand)
if cord not in a_exons:
a_exons.append(cord)
anno_exons.add(cord)
ex_start.append(exon_1_loc.fmin)
ex_stop.append(exon_1_loc.fmax)
ex_start.sort()
ex_stop.sort()
if len(ex_start) >= 1:
cds1 = asm_id + ':' + gene_1.id + ':' + str(ex_start[0]) + ':' + str(ex_stop[-1]) + ':' + str(gene_1_loc.strand)
else:
cds1 = asm_id + ':' + gene_1.id + ':' + str(gene_1_loc.fmin) + ':' + str(gene_1_loc.fmax) + ':' + str(gene_1_loc.strand)
if cord_a not in a_cd:
a_cds.append(cds1)
a_cd.append(cord_a)
if type(assembly_2) is int:
continue
genes_2 = assembly_2.genes()
chr.append(asm_id)
pred_exons = set()
for gene_2 in sorted(genes_2):
gene_2_loc = gene_2.location_on(assembly_2)
cord_p = asm_id + ':' + str(gene_2_loc.fmin) + ':' + str(gene_2_loc.fmax) + ':' + str(gene_2_loc.strand)
if cord_p not in p_gene:
p_gene.append(cord_p)
ex_start = []
ex_stop = []
for mrna_2 in sorted(gene_2.mRNAs()):
mrna_2_loc = mrna_2.location_on(assembly_2)
cord = asm_id + ':' + str(mrna_2_loc.fmin) + ':' + str(mrna_2_loc.fmax) + ':' + str(mrna_2_loc.strand)
if cord not in p_mrna:
p_mrna.append(cord)
if args.feature == 'Exon':
feat_2 = mrna_2.exons()
if args.feature == 'CDS':
feat_2 = mrna_2.CDSs()
for exon_2 in sorted(feat_2):
exon_2_loc = exon_2.location_on(assembly_2)
cord = asm_id + ':' + str(exon_2_loc.fmin) + ':' + str(exon_2_loc.fmax) + ':' + str(exon_2_loc.strand)
pred_exons.add(cord)
if cord not in p_exons:
p_exons.append(cord)
ex_start.append(exon_2_loc.fmin)
ex_stop.append(exon_2_loc.fmax)
ex_start.sort()
ex_stop.sort()
if len(ex_start) >= 1:
cds2 = asm_id + ':' + gene_2.id + ':' + str(ex_start[0]) + ':' + str(ex_stop[-1]) + ':' + str(gene_2_loc.strand)
else:
cds2 = asm_id + ':' + gene_2.id + ':' + str(gene_2_loc.fmin) + ':' + str(gene_2_loc.fmax) + ':' + str(gene_2_loc.strand)
if cord_p not in p_cd:
p_cds.append(cds2)
p_cd.append(cord_p)
exon_pred_all.update(pred_exons.intersection(anno_exons))
for gene_2 in sorted(genes_2):
gene_2_loc = gene_2.location_on(assembly_2)
cord_g = asm_id + ':' + str(gene_2_loc.fmin) + ':' + str(gene_2_loc.fmax) + ':' + str(gene_2_loc.strand)
if cord_g in gene_true:
continue
ex_mrna1 = set()
ex_mrna2 = set()
for gene_1 in sorted(genes_1):
gene_1_loc = gene_1.location_on(assembly_1)
if gene_1_loc.strand != gene_2_loc.strand:
continue
if gene_2.overlaps_with(gene_1):
for mrna_2 in sorted(gene_2.mRNAs()):
if args.feature == 'Exon':
feat_2 = mrna_2.exons()
if args.feature == 'CDS':
feat_2 = mrna_2.CDSs()
for exon_2 in sorted(feat_2):
exon_2_loc = exon_2.location_on(assembly_2)
cord2 = asm_id + ':' + str(exon_2_loc.fmin) + ':' + str(exon_2_loc.fmax) + ':' + str(exon_2_loc.strand)
ex_mrna2.add(cord2)
for mrna_1 in sorted(gene_1.mRNAs()):
if args.feature == 'Exon':
feat_1 = mrna_1.exons()
if args.feature == 'CDS':
feat_1 = mrna_1.CDSs()
for exon_1 in sorted(feat_1):
exon_1_loc = exon_1.location_on(assembly_1)
cord1 = asm_id + ':' + str(exon_1_loc.fmin) + ':' + str(exon_1_loc.fmax) + ':' + str(exon_1_loc.strand)
ex_mrna1.add(cord1)
ex_union = ex_mrna1.union(ex_mrna2)
if len(ex_union) == len(ex_mrna1) and len(ex_union) == len(ex_mrna2):
gene_true.add(cord_g)
true_file.write(gene_1.id + '\t' + gene_2.id + '\n')
break
for asm_id in assemblies_2:
if asm_id not in chr:
assembly_2 = assemblies_2.get(asm_id, -1)
genes_2 = assembly_2.genes()
for gene_2 in sorted(genes_2):
gene_2_loc = gene_2.location_on(assembly_2)
cord_p = asm_id + ':' + str(gene_2_loc.fmin) + ':' + str(gene_2_loc.fmax) + ':' + str(gene_2_loc.strand)
if cord_p not in p_gene:
p_gene.append(cord_p)
ex_start = []
ex_stop = []
for mrna_2 in sorted(gene_2.mRNAs()):
mrna_2_loc = mrna_2.location_on(assembly_2)
cord = asm_id + ':' + str(mrna_2_loc.fmin) + ':' + str(mrna_2_loc.fmax) + ':' + str(mrna_2_loc.strand)
if cord not in p_mrna:
p_mrna.append(cord)
if args.feature == 'Exon':
feat_2 = mrna_2.exons()
if args.feature == 'CDS':
feat_2 = mrna_2.CDSs()
for exon_2 in sorted(feat_2):
exon_2_loc = exon_2.location_on(assembly_2)
cord = asm_id + ':' + str(exon_2_loc.fmin) + ':' + str(exon_2_loc.fmax) + ':' + str(exon_2_loc.strand)
if cord not in p_exons:
p_exons.append(cord)
ex_start.append(exon_2_loc.fmin)
ex_stop.append(exon_2_loc.fmax)
ex_start.sort()
ex_stop.sort()
if len(ex_start) >= 1:
cds2 = asm_id + ':' + gene_2.id + ':' + str(ex_start[0]) + ':' + str(ex_stop[-1]) + ':' + str(gene_2_loc.strand)
else:
cds2 = asm_id + ':' + gene_2.id + ':' + str(gene_2_loc.fmin) + ':' + str(gene_2_loc.fmax) + ':' + str(gene_2_loc.strand)
if cord_p not in p_cd:
p_cds.append(cds2)
p_cd.append(cord_p)
a_base = 0
p_base = 0
true_base_value = 0
exon2_bed = args.output_dir + '/exon_2.bed'
e_bed = open(exon2_bed, 'w')
for exon in p_exons:
chrom = exon.split(':')[0]
start = int(exon.split(':')[1])
stop = int(exon.split(':')[2])
strand = exon.split(':')[3]
if strand == str(1):
strand = '+'
else:
strand = '-'
e_bed.write(chrom + '\t' + str(start) + '\t' + str(stop) + '\texon\t' + str(0) + '\t' + strand + '\n')
e_bed.close()
out2 = args.output_dir + '/exon_2_merged.bed'
cmd = 'bedtools merge -nms -scores sum -i ' + exon2_bed + ' -s >' + out2
os.system(cmd)
exon1_bed = args.output_dir + '/exon_1.bed'
e_bed = open(exon1_bed, 'w')
for exon in a_exons:
chrom = exon.split(':')[0]
start = int(exon.split(':')[1])
stop = int(exon.split(':')[2])
strand = exon.split(':')[3]
if strand == str(1):
strand = '+'
else:
strand = '-'
e_bed.write(chrom + '\t' + str(start) + '\t' + str(stop) + '\texon\t' + str(0) + '\t' + strand + '\n')
e_bed.close()
out1 = args.output_dir + '/exon_1_merged.bed'
cmd = 'bedtools merge -nms -scores sum -i ' + exon1_bed + ' -s >' + out1
os.system(cmd)
out_intersect = args.output_dir + '/exon_1_2_intersect.bed'
cmd = 'bedtools intersect -s -wo -a ' + out1 + ' -b ' + out2 + ' >' + out_intersect
os.system(cmd)
a_base_file = open(out1, 'r')
for line in a_base_file:
arr = line.split('\t')
a_base = a_base + (int(arr[2]) - int(arr[1]))
a_base_file.close()
p_base_file = open(out2, 'r')
for line in p_base_file:
arr = line.split('\t')
p_base = p_base + (int(arr[2]) - int(arr[1]))
p_base_file.close()
true_base_file = open(out_intersect, 'r')
for line in true_base_file:
arr = line.split('\t')
true_base_value = true_base_value + int(arr[12])
true_base_file.close()
(a_base_val, p_base_val, true_base) = (a_base, p_base, true_base_value)
base_sn = true_base / a_base_val * 100
base_sp = true_base / p_base_val * 100
annotated_exon = len(a_exons)
predicted_exon = len(p_exons)
true_pred_exon = len(exon_pred_all)
exon_sn = true_pred_exon / annotated_exon * 100
exon_sp = true_pred_exon / predicted_exon * 100
annotated_gene = len(a_gene)
predicted_gene = len(p_gene)
true_pred_gene = len(gene_true)
gene_sn = true_pred_gene / annotated_gene * 100
gene_sp = true_pred_gene / predicted_gene * 100
print('Feature\tKnown\tPredicted\tTrue_Predicted\tSN\tPPV\n')
print('Gene\t' + str(annotated_gene) + '\t' + str(predicted_gene) + '\t' + str(true_pred_gene) + '\t' + str(gene_sn) + '\t' + str(gene_sp))
print(args.feature + '\t' + str(annotated_exon) + '\t' + str(predicted_exon) + '\t' + str(true_pred_exon) + '\t' + str(exon_sn) + '\t' + str(exon_sp))
print('Base\t' + str(a_base_val) + '\t' + str(p_base_val) + '\t' + str(true_base) + '\t' + str(base_sn) + '\t' + str(base_sp))
out_file = args.output_dir + '/summary.txt'
if not os.path.exists(args.output_dir):
sys.exit('Directory does not exist.')
fout = open(out_file, 'w')
fout.write('Feature\tKnown\tPredicted\tTrue_Predicted\tSN\tPPV\n')
fout.write('Gene\t' + str(annotated_gene) + '\t' + str(predicted_gene) + '\t' + str(true_pred_gene) + '\t' + str(gene_sn) + '\t' + str(gene_sp) + '\n')
fout.write(args.feature + '\t' + str(annotated_exon) + '\t' + str(predicted_exon) + '\t' + str(true_pred_exon) + '\t' + str(exon_sn) + '\t' + str(exon_sp) + '\n')
fout.write('Base\t' + str(a_base_val) + '\t' + str(p_base_val) + '\t' + str(true_base) + '\t' + str(base_sn) + '\t' + str(base_sp) + '\n\n')
gene_found = 0
gene_opp = 0
gene_no_overlap = 0
gene_more_than_one_overlap = 0
temp_file1 = args.output_dir + '/' + 'pred' + '.found.txt'
ft1 = open(temp_file1, 'w')
temp_file2 = args.output_dir + '/' + 'pred' + '.opposite.txt'
ft2 = open(temp_file2, 'w')
temp_file3 = args.output_dir + '/' + 'pred' + '.no_overlap.txt'
ft3 = open(temp_file3, 'w')
temp_file4 = args.output_dir + '/' + 'pred' + '.overlap_more_than_one.txt'
ft4 = open(temp_file4, 'w')
for c1 in p_cds:
gene_overlap_same = []
gene_overlap_opp = []
chrom1 = c1.split(':')[0]
cds_id1 = c1.split(':')[1]
start1 = int(c1.split(':')[2])
stop1 = int(c1.split(':')[3])
strand1 = c1.split(':')[4]
for c2 in a_cds:
chrom2 = c2.split(':')[0]
cds_id2 = c2.split(':')[1]
start2 = int(c2.split(':')[2])
stop2 = int(c2.split(':')[3])
strand2 = c2.split(':')[4]
if chrom1 != chrom2:
continue
if start1 <= stop2 and start2 <= stop1:
arr = [start1, stop1, start2, stop2]
arr.sort()
len_overlap = arr[2] - arr[1]
per_overlap = len_overlap / (stop1 - start1) * 100
if strand1 == strand2:
gene_overlap_same.append(per_overlap)
else:
gene_overlap_opp.append(per_overlap)
if len(gene_overlap_same) == 1:
gene_found += 1
ft1.write(chrom1 + '\t' + str(start1) + '\t' + str(stop1) + '\t' + strand1 + '\t' + cds_id1 + '\n')
if len(gene_overlap_same) == 0 and len(gene_overlap_opp) >= 1:
gene_opp += 1
ft2.write(chrom1 + '\t' + str(start1) + '\t' + str(stop1) + '\t' + strand1 + '\t' + cds_id1 + '\n')
if len(gene_overlap_same) == 0 and len(gene_overlap_opp) == 0:
gene_no_overlap += 1
ft3.write(chrom1 + '\t' + str(start1) + '\t' + str(stop1) + '\t' + strand1 + '\t' + cds_id1 + '\n')
if len(gene_overlap_same) > 1:
gene_more_than_one_overlap += 1
ft4.write(chrom1 + '\t' + str(start1) + '\t' + str(stop1) + '\t' + strand1 + '\t' + cds_id1 + '\n')
arr = [gene_found, gene_opp, gene_no_overlap, gene_more_than_one_overlap]
arr_pred = arr
gene_found = 0
gene_opp = 0
gene_no_overlap = 0
gene_more_than_one_overlap = 0
temp_file1 = args.output_dir + '/' + 'known' + '.found.txt'
ft1 = open(temp_file1, 'w')
temp_file2 = args.output_dir + '/' + 'known' + '.opposite.txt'
ft2 = open(temp_file2, 'w')
temp_file3 = args.output_dir + '/' + 'known' + '.no_overlap.txt'
ft3 = open(temp_file3, 'w')
temp_file4 = args.output_dir + '/' + 'known' + '.overlap_more_than_one.txt'
ft4 = open(temp_file4, 'w')
for c1 in a_cds:
gene_overlap_same = []
gene_overlap_opp = []
chrom1 = c1.split(':')[0]
cds_id1 = c1.split(':')[1]
start1 = int(c1.split(':')[2])
stop1 = int(c1.split(':')[3])
strand1 = c1.split(':')[4]
for c2 in p_cds:
chrom2 = c2.split(':')[0]
cds_id2 = c2.split(':')[1]
start2 = int(c2.split(':')[2])
stop2 = int(c2.split(':')[3])
strand2 = c2.split(':')[4]
if chrom1 != chrom2:
continue
if start1 <= stop2 and start2 <= stop1:
arr = [start1, stop1, start2, stop2]
arr.sort()
len_overlap = arr[2] - arr[1]
per_overlap = len_overlap / (stop1 - start1) * 100
if strand1 == strand2:
gene_overlap_same.append(per_overlap)
else:
gene_overlap_opp.append(per_overlap)
if len(gene_overlap_same) == 1:
gene_found += 1
ft1.write(chrom1 + '\t' + str(start1) + '\t' + str(stop1) + '\t' + strand1 + '\t' + cds_id1 + '\n')
if len(gene_overlap_same) == 0 and len(gene_overlap_opp) >= 1:
gene_opp += 1
ft2.write(chrom1 + '\t' + str(start1) + '\t' + str(stop1) + '\t' + strand1 + '\t' + cds_id1 + '\n')
if len(gene_overlap_same) == 0 and len(gene_overlap_opp) == 0:
gene_no_overlap += 1
ft3.write(chrom1 + '\t' + str(start1) + '\t' + str(stop1) + '\t' + strand1 + '\t' + cds_id1 + '\n')
if len(gene_overlap_same) > 1:
gene_more_than_one_overlap += 1
ft4.write(chrom1 + '\t' + str(start1) + '\t' + str(stop1) + '\t' + strand1 + '\t' + cds_id1 + '\n')
arr = [gene_found, gene_opp, gene_no_overlap, gene_more_than_one_overlap]
arr_known = arr
gene_found = 0
gene_opp = 0
gene_no_overlap = 0
gene_more_than_one_overlap = 0
temp_file1 = args.output_dir + '/' + 'pred_same' + '.found.txt'
ft1 = open(temp_file1, 'w')
temp_file2 = args.output_dir + '/' + 'pred_same' + '.opposite.txt'
ft2 = open(temp_file2, 'w')
temp_file3 = args.output_dir + '/' + 'pred_same' + '.no_overlap.txt'
ft3 = open(temp_file3, 'w')
temp_file4 = args.output_dir + '/' + 'pred_same' + '.overlap_more_than_one.txt'
ft4 = open(temp_file4, 'w')
for c1 in p_cds:
gene_overlap_same = []
gene_overlap_opp = []
chrom1 = c1.split(':')[0]
cds_id1 = c1.split(':')[1]
start1 = int(c1.split(':')[2])
stop1 = int(c1.split(':')[3])
strand1 = c1.split(':')[4]
for c2 in p_cds:
chrom2 = c2.split(':')[0]
cds_id2 = c2.split(':')[1]
start2 = int(c2.split(':')[2])
stop2 = int(c2.split(':')[3])
strand2 = c2.split(':')[4]
if chrom1 != chrom2:
continue
if start1 <= stop2 and start2 <= stop1:
arr = [start1, stop1, start2, stop2]
arr.sort()
len_overlap = arr[2] - arr[1]
per_overlap = len_overlap / (stop1 - start1) * 100
if strand1 == strand2:
gene_overlap_same.append(per_overlap)
else:
gene_overlap_opp.append(per_overlap)
if len(gene_overlap_same) == 1:
gene_found += 1
ft1.write(chrom1 + '\t' + str(start1) + '\t' + str(stop1) + '\t' + strand1 + '\t' + cds_id1 + '\n')
if len(gene_overlap_same) == 0 and len(gene_overlap_opp) >= 1:
gene_opp += 1
ft2.write(chrom1 + '\t' + str(start1) + '\t' + str(stop1) + '\t' + strand1 + '\t' + cds_id1 + '\n')
if len(gene_overlap_same) == 0 and len(gene_overlap_opp) == 0:
gene_no_overlap += 1
ft3.write(chrom1 + '\t' + str(start1) + '\t' + str(stop1) + '\t' + strand1 + '\t' + cds_id1 + '\n')
if len(gene_overlap_same) > 1:
gene_more_than_one_overlap += 1
ft4.write(chrom1 + '\t' + str(start1) + '\t' + str(stop1) + '\t' + strand1 + '\t' + cds_id1 + '\n')
arr = [gene_found, gene_opp, gene_no_overlap, gene_more_than_one_overlap]
arr_pred_same = arr
new_gene = arr_pred[2]
gene_merge = arr_pred[3]
gene_found = arr_pred[0]
gene_opp = arr_pred[1]
gene_missing = arr_known[2]
gene = arr_known[0]
gene_opp_known = arr_known[1]
gene_split = arr_known[3]
gene_pred_overlap_opp = arr_pred_same[1]
print('1. No. of known gene : ', len(a_cds))
print('2. No. of predicted gene : ', len(p_cds))
print('3. No. of predicted gene overlapping 0 known gene (new gene): ', new_gene)
print('4. No. of predicted gene overlapping > 1 known gene (gene merge) : ', gene_merge)
print('5. No. of predicted gene overlaping 1 known gene : ', gene_found)
print('6. No. of predicted gene overlapping >= 1 known gene in opp strand : ', gene_opp)
print('7. No. of predicted gene overlapping 1 known gene (exact intron/exon boundaries) : ', true_pred_gene)
print('8. No. of predicted gene overlapping >= 1 predicted gene in opp strand : ', gene_pred_overlap_opp)
print('9. No. of known gene overlapping 0 predicted gene (gene missing): ', gene_missing)
print('10. No. of known gene overlapping > 1 predicted gene(gene split) : ', gene_split)
print('11. No. of known gene overlaping 1 predicted gene : ', gene)
print('12. No. of known gene overlapping >= 1 predicted gene in opp strand : ', gene_opp_known)
out_file = args.output_dir + '/final_stats.txt'
if not os.path.exists(args.output_dir):
sys.exit('Directory does not exist.')
fout = open(out_file, 'w')
fout.write('1. No. of known gene : ' + str(len(a_cds)) + '\n')
fout.write('2. No. of predicted gene : ' + str(len(p_cds)) + '\n')
fout.write('3. No. of predicted gene overlapping 0 known gene (new gene): ' + str(new_gene) + '\n')
fout.write('4. No. of predicted gene overlapping > 1 known gene (gene merge) : ' + str(gene_merge) + '\n')
fout.write('5. No. of predicted gene overlaping 1 known gene : ' + str(gene_found) + '\n')
fout.write('6. No. of predicted gene overlapping >= 1 known gene in opp strand : ' + str(gene_opp) + '\n')
fout.write('7. No. of predicted gene overlapping 1 known gene (exact intron/exon boundary) : ' + str(true_pred_gene) + '\n')
fout.write('8. No. of predicted gene overlapping >= 1 predicted gene in opp strand : ' + str(gene_pred_overlap_opp) + '\n')
fout.write('9. No. of known gene overlapping 0 predicted gene (gene missing): ' + str(gene_missing) + '\n')
fout.write('10. No. of known gene overlapping > 1 predicted gene (gene_split): ' + str(gene_split) + '\n')
fout.write('11. No. of known gene overlaping 1 predicted gene : ' + str(gene) + '\n')
fout.write('12. No. of known gene overlapping >= 1 predicted gene in opp strand : ' + str(gene_opp_known) + '\n')
true_pred_file = args.output_dir + '/true_pred.txt'
fout_true = open(true_pred_file, 'w')
for true_gene in gene_true:
fout_true.write(true_gene + '\n')
delete_file = ['exon_1.bed', 'exon_2.bed', 'exon_1_merged.bed', 'exon_2_merged.bed', 'exon_1_2_intersect.bed']
for f in delete_file:
cmd = 'rm ' + args.output_dir + '/' + f
os.system(cmd)
|
biocode
|
positive
|
def main(_):
images = config.dataset.images()
labels = config.dataset.labels()
if not images:
print('No images specified.', file=sys.stderr)
return 1
if not labels:
print('No labels specified.', file=sys.stderr)
else:
assert len(images) == len(labels)
print('Validating %d images.' % len(images))
<DeepExtract>
errors = ''
classes = get_class_dict()
counts = {}
if config.dataset.labels().nodata_value():
counts[len(config.dataset.classes)] = 0
header = classes_string(classes, classes, 'Label')
print(header)
print('-' * len(header))
for i in range(len(labels)):
errors += check_label(images, labels, classes, counts, i)
print('-' * len(header))
print(classes_string(classes, counts, 'Total'))
print()
if config.dataset.labels().nodata_value():
nodata_c = counts[len(config.dataset.classes)]
total = sum(counts.values())
print('Nodata is %6.2f%% of the data. Total Pixels: %.2f million.' % (nodata_c / total * 100, (total - nodata_c) / 1000000))
counts = []
print()
measures = {0: 'min', 1: 'max', 2: 'mean', 3: 'stddev'}
header = classes_string(classes, measures, 'Image')
print(header)
print('-' * len(header))
for i in range(len(images)):
errors += check_image(images, measures, counts, i)
print('-' * len(header))
print_image_totals(images, measures, counts)
print()
errors = errors
</DeepExtract>
tc = config.train.spec()
if tc.validation.images:
print('Validating %d validation images.' % len(tc.validation.images))
errors += evaluate_images(tc.validation.images, tc.validation.labels)
if errors:
print(errors, file=sys.stderr)
return -1
print('Validation successful.')
return 0
|
def main(_):
images = config.dataset.images()
labels = config.dataset.labels()
if not images:
print('No images specified.', file=sys.stderr)
return 1
if not labels:
print('No labels specified.', file=sys.stderr)
else:
assert len(images) == len(labels)
print('Validating %d images.' % len(images))
errors = ''
classes = get_class_dict()
counts = {}
if config.dataset.labels().nodata_value():
counts[len(config.dataset.classes)] = 0
header = classes_string(classes, classes, 'Label')
print(header)
print('-' * len(header))
for i in range(len(labels)):
errors += check_label(images, labels, classes, counts, i)
print('-' * len(header))
print(classes_string(classes, counts, 'Total'))
print()
if config.dataset.labels().nodata_value():
nodata_c = counts[len(config.dataset.classes)]
total = sum(counts.values())
print('Nodata is %6.2f%% of the data. Total Pixels: %.2f million.' % (nodata_c / total * 100, (total - nodata_c) / 1000000))
counts = []
print()
measures = {0: 'min', 1: 'max', 2: 'mean', 3: 'stddev'}
header = classes_string(classes, measures, 'Image')
print(header)
print('-' * len(header))
for i in range(len(images)):
errors += check_image(images, measures, counts, i)
print('-' * len(header))
print_image_totals(images, measures, counts)
print()
errors = errors
tc = config.train.spec()
if tc.validation.images:
print('Validating %d validation images.' % len(tc.validation.images))
errors += evaluate_images(tc.validation.images, tc.validation.labels)
if errors:
print(errors, file=sys.stderr)
return -1
print('Validation successful.')
return 0
|
delta
|
positive
|
def add_perturbations(self):
<DeepExtract>
meters = []
if self.means:
final = np.mean
else:
final = None
for name in self.perturbation:
metric = metrics.get(name)
meter = Meter(f'perturbation_{name}', metric, 'scenario.x', 'scenario.x_adv', final=final, final_name=f'perturbation_mean_{name}', record_final_only=self.record_final_only)
meters.append(meter)
meters = meters
</DeepExtract>
<DeepExtract>
hub = get_hub()
for m in meters:
hub.connect_meter(m)
if writer is not None:
hub.connect_writer(writer, meters=meters)
</DeepExtract>
|
def add_perturbations(self):
meters = []
if self.means:
final = np.mean
else:
final = None
for name in self.perturbation:
metric = metrics.get(name)
meter = Meter(f'perturbation_{name}', metric, 'scenario.x', 'scenario.x_adv', final=final, final_name=f'perturbation_mean_{name}', record_final_only=self.record_final_only)
meters.append(meter)
meters = meters
hub = get_hub()
for m in meters:
hub.connect_meter(m)
if writer is not None:
hub.connect_writer(writer, meters=meters)
</DeepExtract>
|
armory
|
positive
|
def __init__(self, roidb, config, batch_size=1, shuffle=False, has_rpn=False):
super(TestLoader, self).__init__()
self.cfg = config
self.roidb = roidb
self.batch_size = batch_size
self.shuffle = shuffle
self.has_rpn = has_rpn
self.size = np.sum([x['frame_seg_len'] for x in self.roidb])
self.index = np.arange(self.size)
self.data_name = ['data', 'im_info', 'data_key', 'feat_key']
self.label_name = None
self.cur_roidb_index = 0
self.cur_frameid = 0
self.data_key = None
self.key_frameid = 0
self.cur_seg_len = 0
self.key_frame_flag = -1
self.cur = 0
self.data = None
self.label = []
self.im_info = None
<DeepExtract>
self.cur = 0
if self.shuffle:
np.random.shuffle(self.index)
</DeepExtract>
<DeepExtract>
cur_roidb = self.roidb[self.cur_roidb_index].copy()
cur_roidb['image'] = cur_roidb['pattern'] % self.cur_frameid
self.cur_seg_len = cur_roidb['frame_seg_len']
(data, label, im_info) = get_rpn_testbatch([cur_roidb], self.cfg)
if self.key_frameid == self.cur_frameid:
self.data_key = data[0]['data'].copy()
if self.key_frameid == 0:
self.key_frame_flag = 0
else:
self.key_frame_flag = 1
else:
self.key_frame_flag = 2
extend_data = [{'data': data[0]['data'], 'im_info': data[0]['im_info'], 'data_key': self.data_key, 'feat_key': np.zeros((1, self.cfg.network.DFF_FEAT_DIM, 1, 1))}]
self.data = [[mx.nd.array(extend_data[i][name]) for name in self.data_name] for i in xrange(len(data))]
self.im_info = im_info
</DeepExtract>
|
def __init__(self, roidb, config, batch_size=1, shuffle=False, has_rpn=False):
super(TestLoader, self).__init__()
self.cfg = config
self.roidb = roidb
self.batch_size = batch_size
self.shuffle = shuffle
self.has_rpn = has_rpn
self.size = np.sum([x['frame_seg_len'] for x in self.roidb])
self.index = np.arange(self.size)
self.data_name = ['data', 'im_info', 'data_key', 'feat_key']
self.label_name = None
self.cur_roidb_index = 0
self.cur_frameid = 0
self.data_key = None
self.key_frameid = 0
self.cur_seg_len = 0
self.key_frame_flag = -1
self.cur = 0
self.data = None
self.label = []
self.im_info = None
self.cur = 0
if self.shuffle:
np.random.shuffle(self.index)
cur_roidb = self.roidb[self.cur_roidb_index].copy()
cur_roidb['image'] = cur_roidb['pattern'] % self.cur_frameid
self.cur_seg_len = cur_roidb['frame_seg_len']
(data, label, im_info) = get_rpn_testbatch([cur_roidb], self.cfg)
if self.key_frameid == self.cur_frameid:
self.data_key = data[0]['data'].copy()
if self.key_frameid == 0:
self.key_frame_flag = 0
else:
self.key_frame_flag = 1
else:
self.key_frame_flag = 2
extend_data = [{'data': data[0]['data'], 'im_info': data[0]['im_info'], 'data_key': self.data_key, 'feat_key': np.zeros((1, self.cfg.network.DFF_FEAT_DIM, 1, 1))}]
self.data = [[mx.nd.array(extend_data[i][name]) for name in self.data_name] for i in xrange(len(data))]
self.im_info = im_info
</DeepExtract>
|
Deep-Feature-Flow
|
positive
|
def gnss_nmea_gsv():
"""
Get list where entries are grouped by all available satellites in NMEA GSV data.
"""
<DeepExtract>
res = client.send_sync(_msg_pack('AT+QGPSGNMEA="{:s}"'.format('gsv'), cooldown_delay=cooldown_delay, **kwargs))
</DeepExtract>
if 'data' in res:
values = []
sentences = _parse_dict(res.pop('data'), multiline=True)['+QGPSGNMEA']
for sentence in sentences:
obj = pynmea2.parse(sentence, check=True)
msg_idx = int(obj.msg_num)
msg_count = int(obj.num_messages)
sat_count = int(obj.num_sv_in_view)
for idx in range(1, 5):
prn = getattr(obj, 'sv_prn_num_{:d}'.format(idx))
if not prn:
continue
azimuth = getattr(obj, 'azimuth_{:d}'.format(idx))
elevation = getattr(obj, 'elevation_deg_{:d}'.format(idx))
snr = getattr(obj, 'snr_{:d}'.format(idx))
values.append({'msg_idx': msg_idx, 'msg_count': msg_count, 'sat_count': sat_count, 'prn': prn, 'azimuth': float(azimuth) if azimuth else None, 'elevation': float(elevation) if elevation else None, 'snr': float(snr) if snr else None})
res['values'] = values
return res
|
def gnss_nmea_gsv():
"""
Get list where entries are grouped by all available satellites in NMEA GSV data.
"""
res = client.send_sync(_msg_pack('AT+QGPSGNMEA="{:s}"'.format('gsv'), cooldown_delay=cooldown_delay, **kwargs))
if 'data' in res:
values = []
sentences = _parse_dict(res.pop('data'), multiline=True)['+QGPSGNMEA']
for sentence in sentences:
obj = pynmea2.parse(sentence, check=True)
msg_idx = int(obj.msg_num)
msg_count = int(obj.num_messages)
sat_count = int(obj.num_sv_in_view)
for idx in range(1, 5):
prn = getattr(obj, 'sv_prn_num_{:d}'.format(idx))
if not prn:
continue
azimuth = getattr(obj, 'azimuth_{:d}'.format(idx))
elevation = getattr(obj, 'elevation_deg_{:d}'.format(idx))
snr = getattr(obj, 'snr_{:d}'.format(idx))
values.append({'msg_idx': msg_idx, 'msg_count': msg_count, 'sat_count': sat_count, 'prn': prn, 'azimuth': float(azimuth) if azimuth else None, 'elevation': float(elevation) if elevation else None, 'snr': float(snr) if snr else None})
res['values'] = values
return res
|
autopi-core
|
positive
|
@pytest.mark.django_db
@patch(f'{code_path}.rqi')
@patch('polaris.sep10.utils.check_auth', mock_check_auth_success)
def test_get_price_failure_bad_sell_amount(mock_rqi, client):
<DeepExtract>
usd_stellar = Asset.objects.create(code='usd', issuer=Keypair.random().public_key, sep38_enabled=True)
brl_offchain = OffChainAsset.objects.create(scheme='iso4217', identifier='BRL', country_codes='BRA')
delivery_methods = [DeliveryMethod.objects.create(type=DeliveryMethod.TYPE.buy, name='cash_pickup', description='cash pick-up'), DeliveryMethod.objects.create(type=DeliveryMethod.TYPE.sell, name='cash_dropoff', description='cash drop-off')]
brl_offchain.delivery_methods.add(*delivery_methods)
pair = ExchangePair.objects.create(buy_asset=brl_offchain.asset_identification_format, sell_asset=usd_stellar.asset_identification_format)
data = {'stellar_assets': [usd_stellar], 'offchain_assets': [brl_offchain], 'exchange_pairs': [pair], 'delivery_methods': delivery_methods}
</DeepExtract>
response = client.get(PRICE_ENDPOINT, {'sell_asset': data['stellar_assets'][0].asset_identification_format, 'sell_amount': 'test', 'buy_asset': data['offchain_assets'][0].asset_identification_format})
assert response.status_code == 400, response.content
assert response.json() == {'error': "invalid 'buy_amount' or 'sell_amount'; Expected decimal strings."}
mock_rqi.get_price.assert_not_called()
|
@pytest.mark.django_db
@patch(f'{code_path}.rqi')
@patch('polaris.sep10.utils.check_auth', mock_check_auth_success)
def test_get_price_failure_bad_sell_amount(mock_rqi, client):
usd_stellar = Asset.objects.create(code='usd', issuer=Keypair.random().public_key, sep38_enabled=True)
brl_offchain = OffChainAsset.objects.create(scheme='iso4217', identifier='BRL', country_codes='BRA')
delivery_methods = [DeliveryMethod.objects.create(type=DeliveryMethod.TYPE.buy, name='cash_pickup', description='cash pick-up'), DeliveryMethod.objects.create(type=DeliveryMethod.TYPE.sell, name='cash_dropoff', description='cash drop-off')]
brl_offchain.delivery_methods.add(*delivery_methods)
pair = ExchangePair.objects.create(buy_asset=brl_offchain.asset_identification_format, sell_asset=usd_stellar.asset_identification_format)
data = {'stellar_assets': [usd_stellar], 'offchain_assets': [brl_offchain], 'exchange_pairs': [pair], 'delivery_methods': delivery_methods}
response = client.get(PRICE_ENDPOINT, {'sell_asset': data['stellar_assets'][0].asset_identification_format, 'sell_amount': 'test', 'buy_asset': data['offchain_assets'][0].asset_identification_format})
assert response.status_code == 400, response.content
assert response.json() == {'error': "invalid 'buy_amount' or 'sell_amount'; Expected decimal strings."}
mock_rqi.get_price.assert_not_called()
|
django-polaris
|
positive
|
def save_to(self, fp: TextIO):
"""
Serializes the call commit graph contained in the current instance into the specified text IO as JSON.
"""
<DeepExtract>
d = {'nodes': [serialize_node(n) for n in self._nodes_dict.values()], 'edges': [serialize_edge(n) for n in self._edges_dict.values()], 'commits': [serialize_commit(n) for n in self._commits.values()]}
</DeepExtract>
json.dump(d, fp)
|
def save_to(self, fp: TextIO):
"""
Serializes the call commit graph contained in the current instance into the specified text IO as JSON.
"""
d = {'nodes': [serialize_node(n) for n in self._nodes_dict.values()], 'edges': [serialize_edge(n) for n in self._edges_dict.values()], 'commits': [serialize_commit(n) for n in self._commits.values()]}
json.dump(d, fp)
|
code-analytics
|
positive
|
def locate_cuda():
"""Locate the CUDA environment on the system
Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
and values giving the absolute path to each directory.
Starts by looking for the CUDAHOME env variable. If not found, everything
is based on finding 'nvcc' in the PATH.
"""
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
<DeepExtract>
for dir in os.environ['PATH'] + os.pathsep + default_path.split(os.pathsep):
binpath = pjoin(dir, 'nvcc')
if os.path.exists(binpath):
nvcc = os.path.abspath(binpath)
nvcc = None
</DeepExtract>
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be located in your $PATH. Either add it to your path, or set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home': home, 'nvcc': nvcc, 'include': pjoin(home, 'include'), 'lib64': pjoin(home, 'lib64')}
for (k, v) in cudaconfig.items():
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))
return cudaconfig
|
def locate_cuda():
"""Locate the CUDA environment on the system
Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
and values giving the absolute path to each directory.
Starts by looking for the CUDAHOME env variable. If not found, everything
is based on finding 'nvcc' in the PATH.
"""
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
for dir in os.environ['PATH'] + os.pathsep + default_path.split(os.pathsep):
binpath = pjoin(dir, 'nvcc')
if os.path.exists(binpath):
nvcc = os.path.abspath(binpath)
nvcc = None
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be located in your $PATH. Either add it to your path, or set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home': home, 'nvcc': nvcc, 'include': pjoin(home, 'include'), 'lib64': pjoin(home, 'lib64')}
for (k, v) in cudaconfig.items():
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))
return cudaconfig
|
AutoML
|
positive
|
def play(self, model: model.Model) -> Dict[str, int]:
"""Play a whole game."""
for _ in range(self.round, self.total_rounds):
<DeepExtract>
prompt = get_prompt(self.letter_set, self.correct_words, self.incorrect_words, self.score, self.round, self.total_rounds)
logging.info(prompt)
attempt = model.generate_text(prompt)
logging.info(attempt)
self._score_round(attempt)
self.round += 1
</DeepExtract>
score_dict = {'score': self.score, 'num_correct': len(self.correct_words), 'num_incorrect': len(self.incorrect_words), 'max_score': self.max_score, 'word_count': self.word_count}
return score_dict
|
def play(self, model: model.Model) -> Dict[str, int]:
"""Play a whole game."""
for _ in range(self.round, self.total_rounds):
prompt = get_prompt(self.letter_set, self.correct_words, self.incorrect_words, self.score, self.round, self.total_rounds)
logging.info(prompt)
attempt = model.generate_text(prompt)
logging.info(attempt)
self._score_round(attempt)
self.round += 1
score_dict = {'score': self.score, 'num_correct': len(self.correct_words), 'num_incorrect': len(self.incorrect_words), 'max_score': self.max_score, 'word_count': self.word_count}
return score_dict
|
BIG-bench
|
positive
|
def dynamic_rnn(cell, inputs, att_scores=None, sequence_length=None, initial_state=None, dtype=None, parallel_iterations=None, swap_memory=False, time_major=False, scope=None):
"""Creates a recurrent neural network specified by RNNCell `cell`.
Performs fully dynamic unrolling of `inputs`.
Example:
```python
# create a BasicRNNCell
rnn_cell = tf.nn.rnn_cell.BasicRNNCell(hidden_size)
# 'outputs' is a tensor of shape [batch_size, max_time, cell_state_size]
# defining initial state
initial_state = rnn_cell.zero_state(batch_size, dtype=tf.float32)
# 'state' is a tensor of shape [batch_size, cell_state_size]
outputs, state = tf.nn.dynamic_rnn(rnn_cell, input_data,
initial_state=initial_state,
dtype=tf.float32)
```
```python
# create 2 LSTMCells
rnn_layers = [tf.nn.rnn_cell.LSTMCell(size) for size in [128, 256]]
# create a RNN cell composed sequentially of a number of RNNCells
multi_rnn_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_layers)
# 'outputs' is a tensor of shape [batch_size, max_time, 256]
# 'state' is a N-tuple where N is the number of LSTMCells containing a
# tf.contrib.rnn.LSTMStateTuple for each cell
outputs, state = tf.nn.dynamic_rnn(cell=multi_rnn_cell,
inputs=data,
dtype=tf.float32)
```
Args:
cell: An instance of RNNCell.
inputs: The RNN inputs.
If `time_major == False` (default), this must be a `Tensor` of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such
elements.
If `time_major == True`, this must be a `Tensor` of shape:
`[max_time, batch_size, ...]`, or a nested tuple of such
elements.
This may also be a (possibly nested) tuple of Tensors satisfying
this property. The first two dimensions must match across all the inputs,
but otherwise the ranks and other shape components may differ.
In this case, input to `cell` at each time-step will replicate the
structure of these tuples, except for the time dimension (from which the
time is taken).
The input to `cell` at each time step will be a `Tensor` or (possibly
nested) tuple of Tensors each with dimensions `[batch_size, ...]`.
sequence_length: (optional) An int32/int64 vector sized `[batch_size]`.
Used to copy-through state and zero-out outputs when past a batch
element's sequence length. So it's more for correctness than performance.
initial_state: (optional) An initial state for the RNN.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_state is not provided or RNN state has a heterogeneous
dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
outputs: The RNN output `Tensor`.
If time_major == False (default), this will be a `Tensor` shaped:
`[batch_size, max_time, cell.output_size]`.
If time_major == True, this will be a `Tensor` shaped:
`[max_time, batch_size, cell.output_size]`.
Note, if `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `outputs` will be a tuple having the
same structure as `cell.output_size`, containing Tensors having shapes
corresponding to the shape data in `cell.output_size`.
state: The final state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes. If cells are `LSTMCells`
`state` will be a tuple containing a `LSTMStateTuple` for each cell.
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
if not _like_rnncell(cell):
raise TypeError('cell must be an instance of RNNCell')
flat_input = nest.flatten(inputs)
if not time_major:
flat_input = [ops.convert_to_tensor(input_) for input_ in flat_input]
flat_input = tuple((_transpose_batch_time(input_) for input_ in flat_input))
parallel_iterations = parallel_iterations or 32
if sequence_length is not None:
sequence_length = math_ops.to_int32(sequence_length)
if sequence_length.get_shape().ndims not in (None, 1):
raise ValueError('sequence_length must be a vector of length batch_size, but saw shape: %s' % sequence_length.get_shape())
sequence_length = array_ops.identity(sequence_length, name='sequence_length')
with vs.variable_scope(scope or 'rnn') as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
<DeepExtract>
for input_ in flat_input:
shape = input_.shape
if shape.ndims is None:
continue
if shape.ndims < 2:
raise ValueError('Expected input tensor %s to have rank at least 2' % input_)
batch_size = shape[1].value
if batch_size is not None:
batch_size = batch_size
batch_size = array_ops.shape(flat_input[0])[1]
</DeepExtract>
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError('If there is no initial_state, you must give a dtype.')
state = cell.zero_state(batch_size, dtype)
def _assert_has_shape(x, shape):
x_shape = array_ops.shape(x)
packed_shape = array_ops.stack(shape)
return control_flow_ops.Assert(math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)), ['Expected shape for Tensor %s is ' % x.name, packed_shape, ' but saw shape: ', x_shape])
if sequence_length is not None:
with ops.control_dependencies([_assert_has_shape(sequence_length, [batch_size])]):
sequence_length = array_ops.identity(sequence_length, name='CheckSeqLen')
inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input)
<DeepExtract>
state = state
assert isinstance(parallel_iterations, int), 'parallel_iterations must be int'
state_size = cell.state_size
flat_input = nest.flatten(inputs)
flat_output_size = nest.flatten(cell.output_size)
input_shape = array_ops.shape(flat_input[0])
time_steps = input_shape[0]
batch_size = _best_effort_input_batch_size(flat_input)
inputs_got_shape = tuple((input_.get_shape().with_rank_at_least(3) for input_ in flat_input))
(const_time_steps, const_batch_size) = inputs_got_shape[0].as_list()[:2]
for shape in inputs_got_shape:
if not shape[2:].is_fully_defined():
raise ValueError('Input size (depth of inputs) must be accessible via shape inference, but saw value None.')
got_time_steps = shape[0].value
got_batch_size = shape[1].value
if const_time_steps != got_time_steps:
raise ValueError('Time steps is not the same for all the elements in the input in a batch.')
if const_batch_size != got_batch_size:
raise ValueError('Batch_size is not the same for all the elements in the input.')
def _create_zero_arrays(size):
size = _concat(batch_size, size)
(outputs, final_state) = array_ops.zeros(array_ops.stack(size), _infer_state_dtype(dtype, state))
flat_zero_output = tuple((_create_zero_arrays(output) for output in flat_output_size))
zero_output = nest.pack_sequence_as(structure=cell.output_size, flat_sequence=flat_zero_output)
if sequence_length is not None:
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
time = array_ops.constant(0, dtype=dtypes.int32, name='time')
with ops.name_scope('dynamic_rnn') as scope:
base_name = scope
def _create_ta(name, dtype):
(outputs, final_state) = tensor_array_ops.TensorArray(dtype=dtype, size=time_steps, tensor_array_name=base_name + name)
output_ta = tuple((_create_ta('output_%d' % i, _infer_state_dtype(dtype, state)) for i in range(len(flat_output_size))))
input_ta = tuple((_create_ta('input_%d' % i, flat_input[i].dtype) for i in range(len(flat_input))))
input_ta = tuple((ta.unstack(input_) for (ta, input_) in zip(input_ta, flat_input)))
def _time_step(time, output_ta_t, state, att_scores=None):
"""Take a time step of the dynamic RNN.
Args:
time: int32 scalar Tensor.
output_ta_t: List of `TensorArray`s that represent the output.
state: nested tuple of vector tensors that represent the state.
Returns:
The tuple (time + 1, output_ta_t with updated flow, new_state).
"""
input_t = tuple((ta.read(time) for ta in input_ta))
for (input_, shape) in zip(input_t, inputs_got_shape):
input_.set_shape(shape[1:])
input_t = nest.pack_sequence_as(structure=inputs, flat_sequence=input_t)
if att_scores is not None:
att_score = att_scores[:, time, :]
call_cell = lambda : cell(input_t, state, att_score)
else:
call_cell = lambda : cell(input_t, state)
if sequence_length is not None:
(output, new_state) = _rnn_step(time=time, sequence_length=sequence_length, min_sequence_length=min_sequence_length, max_sequence_length=max_sequence_length, zero_output=zero_output, state=state, call_cell=call_cell, state_size=state_size, skip_conditionals=True)
else:
(output, new_state) = call_cell()
output = nest.flatten(output)
output_ta_t = tuple((ta.write(time, out) for (ta, out) in zip(output_ta_t, output)))
if att_scores is not None:
(outputs, final_state) = (time + 1, output_ta_t, new_state, att_scores)
else:
(outputs, final_state) = (time + 1, output_ta_t, new_state)
if att_scores is not None:
(_, output_final_ta, final_state, _) = control_flow_ops.while_loop(cond=lambda time, *_: time < time_steps, body=_time_step, loop_vars=(time, output_ta, state, att_scores), parallel_iterations=parallel_iterations, swap_memory=swap_memory)
else:
(_, output_final_ta, final_state) = control_flow_ops.while_loop(cond=lambda time, *_: time < time_steps, body=_time_step, loop_vars=(time, output_ta, state), parallel_iterations=parallel_iterations, swap_memory=swap_memory)
final_outputs = tuple((ta.stack() for ta in output_final_ta))
for (output, output_size) in zip(final_outputs, flat_output_size):
shape = _concat([const_time_steps, const_batch_size], output_size, static=True)
output.set_shape(shape)
final_outputs = nest.pack_sequence_as(structure=cell.output_size, flat_sequence=final_outputs)
(outputs, final_state) = (final_outputs, final_state)
</DeepExtract>
if not time_major:
outputs = nest.map_structure(_transpose_batch_time, outputs)
return (outputs, final_state)
|
def dynamic_rnn(cell, inputs, att_scores=None, sequence_length=None, initial_state=None, dtype=None, parallel_iterations=None, swap_memory=False, time_major=False, scope=None):
"""Creates a recurrent neural network specified by RNNCell `cell`.
Performs fully dynamic unrolling of `inputs`.
Example:
```python
# create a BasicRNNCell
rnn_cell = tf.nn.rnn_cell.BasicRNNCell(hidden_size)
# 'outputs' is a tensor of shape [batch_size, max_time, cell_state_size]
# defining initial state
initial_state = rnn_cell.zero_state(batch_size, dtype=tf.float32)
# 'state' is a tensor of shape [batch_size, cell_state_size]
outputs, state = tf.nn.dynamic_rnn(rnn_cell, input_data,
initial_state=initial_state,
dtype=tf.float32)
```
```python
# create 2 LSTMCells
rnn_layers = [tf.nn.rnn_cell.LSTMCell(size) for size in [128, 256]]
# create a RNN cell composed sequentially of a number of RNNCells
multi_rnn_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_layers)
# 'outputs' is a tensor of shape [batch_size, max_time, 256]
# 'state' is a N-tuple where N is the number of LSTMCells containing a
# tf.contrib.rnn.LSTMStateTuple for each cell
outputs, state = tf.nn.dynamic_rnn(cell=multi_rnn_cell,
inputs=data,
dtype=tf.float32)
```
Args:
cell: An instance of RNNCell.
inputs: The RNN inputs.
If `time_major == False` (default), this must be a `Tensor` of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such
elements.
If `time_major == True`, this must be a `Tensor` of shape:
`[max_time, batch_size, ...]`, or a nested tuple of such
elements.
This may also be a (possibly nested) tuple of Tensors satisfying
this property. The first two dimensions must match across all the inputs,
but otherwise the ranks and other shape components may differ.
In this case, input to `cell` at each time-step will replicate the
structure of these tuples, except for the time dimension (from which the
time is taken).
The input to `cell` at each time step will be a `Tensor` or (possibly
nested) tuple of Tensors each with dimensions `[batch_size, ...]`.
sequence_length: (optional) An int32/int64 vector sized `[batch_size]`.
Used to copy-through state and zero-out outputs when past a batch
element's sequence length. So it's more for correctness than performance.
initial_state: (optional) An initial state for the RNN.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_state is not provided or RNN state has a heterogeneous
dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
outputs: The RNN output `Tensor`.
If time_major == False (default), this will be a `Tensor` shaped:
`[batch_size, max_time, cell.output_size]`.
If time_major == True, this will be a `Tensor` shaped:
`[max_time, batch_size, cell.output_size]`.
Note, if `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `outputs` will be a tuple having the
same structure as `cell.output_size`, containing Tensors having shapes
corresponding to the shape data in `cell.output_size`.
state: The final state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes. If cells are `LSTMCells`
`state` will be a tuple containing a `LSTMStateTuple` for each cell.
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
if not _like_rnncell(cell):
raise TypeError('cell must be an instance of RNNCell')
flat_input = nest.flatten(inputs)
if not time_major:
flat_input = [ops.convert_to_tensor(input_) for input_ in flat_input]
flat_input = tuple((_transpose_batch_time(input_) for input_ in flat_input))
parallel_iterations = parallel_iterations or 32
if sequence_length is not None:
sequence_length = math_ops.to_int32(sequence_length)
if sequence_length.get_shape().ndims not in (None, 1):
raise ValueError('sequence_length must be a vector of length batch_size, but saw shape: %s' % sequence_length.get_shape())
sequence_length = array_ops.identity(sequence_length, name='sequence_length')
with vs.variable_scope(scope or 'rnn') as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
for input_ in flat_input:
shape = input_.shape
if shape.ndims is None:
continue
if shape.ndims < 2:
raise ValueError('Expected input tensor %s to have rank at least 2' % input_)
batch_size = shape[1].value
if batch_size is not None:
batch_size = batch_size
batch_size = array_ops.shape(flat_input[0])[1]
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError('If there is no initial_state, you must give a dtype.')
state = cell.zero_state(batch_size, dtype)
def _assert_has_shape(x, shape):
x_shape = array_ops.shape(x)
packed_shape = array_ops.stack(shape)
return control_flow_ops.Assert(math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)), ['Expected shape for Tensor %s is ' % x.name, packed_shape, ' but saw shape: ', x_shape])
if sequence_length is not None:
with ops.control_dependencies([_assert_has_shape(sequence_length, [batch_size])]):
sequence_length = array_ops.identity(sequence_length, name='CheckSeqLen')
inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input)
state = state
assert isinstance(parallel_iterations, int), 'parallel_iterations must be int'
state_size = cell.state_size
flat_input = nest.flatten(inputs)
flat_output_size = nest.flatten(cell.output_size)
input_shape = array_ops.shape(flat_input[0])
time_steps = input_shape[0]
batch_size = _best_effort_input_batch_size(flat_input)
inputs_got_shape = tuple((input_.get_shape().with_rank_at_least(3) for input_ in flat_input))
(const_time_steps, const_batch_size) = inputs_got_shape[0].as_list()[:2]
for shape in inputs_got_shape:
if not shape[2:].is_fully_defined():
raise ValueError('Input size (depth of inputs) must be accessible via shape inference, but saw value None.')
got_time_steps = shape[0].value
got_batch_size = shape[1].value
if const_time_steps != got_time_steps:
raise ValueError('Time steps is not the same for all the elements in the input in a batch.')
if const_batch_size != got_batch_size:
raise ValueError('Batch_size is not the same for all the elements in the input.')
def _create_zero_arrays(size):
size = _concat(batch_size, size)
(outputs, final_state) = array_ops.zeros(array_ops.stack(size), _infer_state_dtype(dtype, state))
flat_zero_output = tuple((_create_zero_arrays(output) for output in flat_output_size))
zero_output = nest.pack_sequence_as(structure=cell.output_size, flat_sequence=flat_zero_output)
if sequence_length is not None:
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
time = array_ops.constant(0, dtype=dtypes.int32, name='time')
with ops.name_scope('dynamic_rnn') as scope:
base_name = scope
def _create_ta(name, dtype):
(outputs, final_state) = tensor_array_ops.TensorArray(dtype=dtype, size=time_steps, tensor_array_name=base_name + name)
output_ta = tuple((_create_ta('output_%d' % i, _infer_state_dtype(dtype, state)) for i in range(len(flat_output_size))))
input_ta = tuple((_create_ta('input_%d' % i, flat_input[i].dtype) for i in range(len(flat_input))))
input_ta = tuple((ta.unstack(input_) for (ta, input_) in zip(input_ta, flat_input)))
def _time_step(time, output_ta_t, state, att_scores=None):
"""Take a time step of the dynamic RNN.
Args:
time: int32 scalar Tensor.
output_ta_t: List of `TensorArray`s that represent the output.
state: nested tuple of vector tensors that represent the state.
Returns:
The tuple (time + 1, output_ta_t with updated flow, new_state).
"""
input_t = tuple((ta.read(time) for ta in input_ta))
for (input_, shape) in zip(input_t, inputs_got_shape):
input_.set_shape(shape[1:])
input_t = nest.pack_sequence_as(structure=inputs, flat_sequence=input_t)
if att_scores is not None:
att_score = att_scores[:, time, :]
call_cell = lambda : cell(input_t, state, att_score)
else:
call_cell = lambda : cell(input_t, state)
if sequence_length is not None:
(output, new_state) = _rnn_step(time=time, sequence_length=sequence_length, min_sequence_length=min_sequence_length, max_sequence_length=max_sequence_length, zero_output=zero_output, state=state, call_cell=call_cell, state_size=state_size, skip_conditionals=True)
else:
(output, new_state) = call_cell()
output = nest.flatten(output)
output_ta_t = tuple((ta.write(time, out) for (ta, out) in zip(output_ta_t, output)))
if att_scores is not None:
(outputs, final_state) = (time + 1, output_ta_t, new_state, att_scores)
else:
(outputs, final_state) = (time + 1, output_ta_t, new_state)
if att_scores is not None:
(_, output_final_ta, final_state, _) = control_flow_ops.while_loop(cond=lambda time, *_: time < time_steps, body=_time_step, loop_vars=(time, output_ta, state, att_scores), parallel_iterations=parallel_iterations, swap_memory=swap_memory)
else:
(_, output_final_ta, final_state) = control_flow_ops.while_loop(cond=lambda time, *_: time < time_steps, body=_time_step, loop_vars=(time, output_ta, state), parallel_iterations=parallel_iterations, swap_memory=swap_memory)
final_outputs = tuple((ta.stack() for ta in output_final_ta))
for (output, output_size) in zip(final_outputs, flat_output_size):
shape = _concat([const_time_steps, const_batch_size], output_size, static=True)
output.set_shape(shape)
final_outputs = nest.pack_sequence_as(structure=cell.output_size, flat_sequence=final_outputs)
(outputs, final_state) = (final_outputs, final_state)
if not time_major:
outputs = nest.map_structure(_transpose_batch_time, outputs)
return (outputs, final_state)
|
ai_explore
|
positive
|
def find_header(self, header_names, default_val=None):
"""given a list of headers return the first one you can, default_val if you
don't find any
:param header_names: list, a list of headers, first one found is returned
:param default_val: mixed, returned if no matching header is found
:returns: mixed, the value of the header or default_val
"""
ret = default_val
for header_name in header_names:
if self.has_header(header_name):
<DeepExtract>
ret = self.headers.get(header_name, default_val)
</DeepExtract>
break
return ret
|
def find_header(self, header_names, default_val=None):
"""given a list of headers return the first one you can, default_val if you
don't find any
:param header_names: list, a list of headers, first one found is returned
:param default_val: mixed, returned if no matching header is found
:returns: mixed, the value of the header or default_val
"""
ret = default_val
for header_name in header_names:
if self.has_header(header_name):
ret = self.headers.get(header_name, default_val)
break
return ret
|
endpoints
|
positive
|
def _SortObjects(self, modelObjects=None, sortColumn=None, secondarySortColumn=None):
"""
Sort the given modelObjects in place.
This does not change the information shown in the control itself.
"""
if modelObjects is None:
modelObjects = self.modelObjects
if sortColumn is None:
<DeepExtract>
if self.sortColumnIndex < 0 or self.sortColumnIndex >= len(self.columns):
sortColumn = None
else:
sortColumn = self.columns[self.sortColumnIndex]
</DeepExtract>
if secondarySortColumn == sortColumn:
secondarySortColumn = None
if sortColumn is None:
return
evt = OLVEvent.SortEvent(self, self.sortColumnIndex, self.sortAscending, True)
self.GetEventHandler().ProcessEvent(evt)
if evt.IsVetoed() or evt.wasHandled:
return
def _getSortValue(x):
primary = sortColumn.GetValue(x)
try:
primary = primary.lower()
except AttributeError:
pass
if secondarySortColumn:
secondary = secondarySortColumn.GetValue(x)
try:
secondary = secondary.lower()
except AttributeError:
pass
return (primary, secondary)
else:
return primary
modelObjects.sort(key=_getSortValue, reverse=not self.sortAscending)
self.objectToIndexMap = None
|
def _SortObjects(self, modelObjects=None, sortColumn=None, secondarySortColumn=None):
"""
Sort the given modelObjects in place.
This does not change the information shown in the control itself.
"""
if modelObjects is None:
modelObjects = self.modelObjects
if sortColumn is None:
if self.sortColumnIndex < 0 or self.sortColumnIndex >= len(self.columns):
sortColumn = None
else:
sortColumn = self.columns[self.sortColumnIndex]
if secondarySortColumn == sortColumn:
secondarySortColumn = None
if sortColumn is None:
return
evt = OLVEvent.SortEvent(self, self.sortColumnIndex, self.sortAscending, True)
self.GetEventHandler().ProcessEvent(evt)
if evt.IsVetoed() or evt.wasHandled:
return
def _getSortValue(x):
primary = sortColumn.GetValue(x)
try:
primary = primary.lower()
except AttributeError:
pass
if secondarySortColumn:
secondary = secondarySortColumn.GetValue(x)
try:
secondary = secondary.lower()
except AttributeError:
pass
return (primary, secondary)
else:
return primary
modelObjects.sort(key=_getSortValue, reverse=not self.sortAscending)
self.objectToIndexMap = None
|
bookhub
|
positive
|
def genExponential(varname, param1, param2, avgLength1, avgLength2, lang):
<DeepExtract>
print('Poisson params: %f %f' % (param1, avgLength1))
bpms = float(param1 * avgLength1)
kbps = bpms * 8 / 1000
print('%f kpbs' % kbps)
if kbps < 56:
ratio = math.ceil(56 / kbps)
print('Raising %f' % ratio)
ratio = ratio * 1.5
print('Increading 50%')
param1 = param1 * ratio
elif kbps > 560:
ratio = math.floor(kbps / 560)
print('Lowering %f' % ratio)
param1 = param1 / ratio
print('Final Poisson: %f' % param1)
param1 = 0.5
</DeepExtract>
<DeepExtract>
print('Poisson params: %f %f' % (param2, avgLength2))
bpms = float(param2 * avgLength2)
kbps = bpms * 8 / 1000
print('%f kpbs' % kbps)
if kbps < 56:
ratio = math.ceil(56 / kbps)
print('Raising %f' % ratio)
ratio = ratio * 1.5
print('Increading 50%')
param2 = param2 * ratio
elif kbps > 560:
ratio = math.floor(kbps / 560)
print('Lowering %f' % ratio)
param2 = param2 / ratio
print('Final Poisson: %f' % param2)
param2 = 0.5
</DeepExtract>
if lang == 'go':
return {'incoming': 'enc1.%s = dist.Exponential{Rate: float64(%s), Source: model.prng}' % (varname, param1), 'outgoing': 'enc1.%s = dist.Exponential{Rate: float64(%s), Source: model.prng}' % (varname, param2), 'expr': 'clampUint16(self.%s.Rand())' % varname}
elif lang == 'js':
return {'decl': 'this.%s=null;' % varname, 'data': 'this.%s=%s;' % (varname, param), 'expr': 'clampUint16(randomExponential(this.%s))' % varname}
else:
return {}
|
def genExponential(varname, param1, param2, avgLength1, avgLength2, lang):
print('Poisson params: %f %f' % (param1, avgLength1))
bpms = float(param1 * avgLength1)
kbps = bpms * 8 / 1000
print('%f kpbs' % kbps)
if kbps < 56:
ratio = math.ceil(56 / kbps)
print('Raising %f' % ratio)
ratio = ratio * 1.5
print('Increading 50%')
param1 = param1 * ratio
elif kbps > 560:
ratio = math.floor(kbps / 560)
print('Lowering %f' % ratio)
param1 = param1 / ratio
print('Final Poisson: %f' % param1)
param1 = 0.5
print('Poisson params: %f %f' % (param2, avgLength2))
bpms = float(param2 * avgLength2)
kbps = bpms * 8 / 1000
print('%f kpbs' % kbps)
if kbps < 56:
ratio = math.ceil(56 / kbps)
print('Raising %f' % ratio)
ratio = ratio * 1.5
print('Increading 50%')
param2 = param2 * ratio
elif kbps > 560:
ratio = math.floor(kbps / 560)
print('Lowering %f' % ratio)
param2 = param2 / ratio
print('Final Poisson: %f' % param2)
param2 = 0.5
if lang == 'go':
return {'incoming': 'enc1.%s = dist.Exponential{Rate: float64(%s), Source: model.prng}' % (varname, param1), 'outgoing': 'enc1.%s = dist.Exponential{Rate: float64(%s), Source: model.prng}' % (varname, param2), 'expr': 'clampUint16(self.%s.Rand())' % varname}
elif lang == 'js':
return {'decl': 'this.%s=null;' % varname, 'data': 'this.%s=%s;' % (varname, param), 'expr': 'clampUint16(randomExponential(this.%s))' % varname}
else:
return {}
|
Dust
|
positive
|
def check_agent_ppo_discrete_style(batch_size=3, horizon_len=16, net_dims=(64, 32), gpu_id=0):
print('\n| check_agent_ppo_discrete_style()')
env_args = {'env_name': 'CartPole-v1', 'state_dim': 4, 'action_dim': 2, 'if_discrete': True}
env = build_env(env_class=gym.make, env_args=env_args)
num_envs = env_args['num_envs']
state_dim = env_args['state_dim']
action_dim = env_args['action_dim']
if_discrete = env_args['if_discrete']
'init agent'
from elegantrl.agents.AgentPPO import AgentDiscretePPO
from elegantrl.agents.AgentA2C import AgentDiscreteA2C
for agent_class in (AgentDiscretePPO, AgentDiscreteA2C):
print(f' agent_class = {agent_class.__name__}')
args = Config()
args.batch_size = batch_size
agent = agent_class(net_dims=net_dims, state_dim=state_dim, action_dim=action_dim, gpu_id=gpu_id, args=args)
state = torch.tensor(env.reset(), dtype=torch.float32, device=agent.device).unsqueeze(0)
assert isinstance(state, Tensor)
assert state.shape == (num_envs, state_dim)
agent.last_state = state
'check for agent.explore_env'
buffer_items = agent.explore_env(env=env, horizon_len=horizon_len)
<DeepExtract>
(states, actions, logprobs, rewards, undones) = buffer_items
assert states.shape == (horizon_len, num_envs, state_dim)
assert states.dtype in {torch.float, torch.int}
if if_discrete:
actions_shape = (horizon_len, num_envs, 1)
actions_dtypes = {torch.int, torch.long}
else:
actions_shape = (horizon_len, num_envs, action_dim)
actions_dtypes = {torch.float}
assert actions.shape == actions_shape
assert actions.dtype in actions_dtypes
assert logprobs.shape == (horizon_len, num_envs)
assert logprobs.dtype == torch.float
assert rewards.shape == (horizon_len, num_envs)
assert rewards.dtype == torch.float
assert undones.shape == (horizon_len, num_envs)
assert undones.dtype == torch.float
assert set(undones.squeeze(1).cpu().data.tolist()).issubset({0.0, 1.0})
</DeepExtract>
'check for agent.update_net'
(states, actions, logprobs, rewards, undones) = buffer_items
values = agent.cri(states)
assert values.shape == (horizon_len, num_envs)
advantages = agent.get_advantages(rewards, undones, values)
assert advantages.shape == (horizon_len, num_envs)
logging_tuple = agent.update_net(buffer=buffer_items)
assert isinstance(logging_tuple, tuple)
assert any([isinstance(item, float) for item in logging_tuple])
assert len(logging_tuple) >= 2
|
def check_agent_ppo_discrete_style(batch_size=3, horizon_len=16, net_dims=(64, 32), gpu_id=0):
print('\n| check_agent_ppo_discrete_style()')
env_args = {'env_name': 'CartPole-v1', 'state_dim': 4, 'action_dim': 2, 'if_discrete': True}
env = build_env(env_class=gym.make, env_args=env_args)
num_envs = env_args['num_envs']
state_dim = env_args['state_dim']
action_dim = env_args['action_dim']
if_discrete = env_args['if_discrete']
'init agent'
from elegantrl.agents.AgentPPO import AgentDiscretePPO
from elegantrl.agents.AgentA2C import AgentDiscreteA2C
for agent_class in (AgentDiscretePPO, AgentDiscreteA2C):
print(f' agent_class = {agent_class.__name__}')
args = Config()
args.batch_size = batch_size
agent = agent_class(net_dims=net_dims, state_dim=state_dim, action_dim=action_dim, gpu_id=gpu_id, args=args)
state = torch.tensor(env.reset(), dtype=torch.float32, device=agent.device).unsqueeze(0)
assert isinstance(state, Tensor)
assert state.shape == (num_envs, state_dim)
agent.last_state = state
'check for agent.explore_env'
buffer_items = agent.explore_env(env=env, horizon_len=horizon_len)
(states, actions, logprobs, rewards, undones) = buffer_items
assert states.shape == (horizon_len, num_envs, state_dim)
assert states.dtype in {torch.float, torch.int}
if if_discrete:
actions_shape = (horizon_len, num_envs, 1)
actions_dtypes = {torch.int, torch.long}
else:
actions_shape = (horizon_len, num_envs, action_dim)
actions_dtypes = {torch.float}
assert actions.shape == actions_shape
assert actions.dtype in actions_dtypes
assert logprobs.shape == (horizon_len, num_envs)
assert logprobs.dtype == torch.float
assert rewards.shape == (horizon_len, num_envs)
assert rewards.dtype == torch.float
assert undones.shape == (horizon_len, num_envs)
assert undones.dtype == torch.float
assert set(undones.squeeze(1).cpu().data.tolist()).issubset({0.0, 1.0})
'check for agent.update_net'
(states, actions, logprobs, rewards, undones) = buffer_items
values = agent.cri(states)
assert values.shape == (horizon_len, num_envs)
advantages = agent.get_advantages(rewards, undones, values)
assert advantages.shape == (horizon_len, num_envs)
logging_tuple = agent.update_net(buffer=buffer_items)
assert isinstance(logging_tuple, tuple)
assert any([isinstance(item, float) for item in logging_tuple])
assert len(logging_tuple) >= 2
|
ElegantRL
|
positive
|
def changeName(self, name=None, *args):
""" Edit name of the current network node selected.
If there isn't any given name, it will try to get from the UI.
Returns the name result.
"""
oldName = cmds.getAttr(self.net + '.name')
if not name:
if self.ui:
name = cmds.textFieldGrp(self.nameTFG, query=True, text=True)
if name:
name = dpUtils.resolveName(name, self.netSuffix)[0]
<DeepExtract>
messageAttrList = []
attrList = cmds.listAttr(self.net)
for attr in attrList:
if cmds.getAttr(self.net + '.' + attr, type=True) == 'message':
messageAttrList.append(attr)
if messageAttrList:
for messageAttr in messageAttrList:
connectedNodeList = cmds.listConnections(self.net + '.' + messageAttr)
if connectedNodeList:
childrenList = cmds.listRelatives(connectedNodeList[0], children=True, allDescendents=True)
cmds.rename(connectedNodeList[0], connectedNodeList[0].replace(oldName, name))
if childrenList:
for children in childrenList:
try:
cmds.rename(children, children.replace(oldName, name))
except:
pass
</DeepExtract>
cmds.setAttr(self.net + '.name', name, type='string')
self.net = cmds.rename(self.net, self.net.replace(oldName, name))
if self.ui:
<DeepExtract>
cmds.textScrollList(self.existingNetTSL, edit=True, deselectAll=True)
cmds.textScrollList(self.existingNetTSL, edit=True, removeAll=True)
currentNetList = cmds.ls(selection=False, type='network')
if currentNetList:
self.netList = []
for item in currentNetList:
if cmds.objExists(item + '.dpNetwork'):
if cmds.getAttr(item + '.dpNetwork') == 1:
if cmds.objExists(item + '.dpCorrectionManager'):
if cmds.getAttr(item + '.dpCorrectionManager') == 1:
self.netList.append(item)
if self.netList:
cmds.textScrollList(self.existingNetTSL, edit=True, append=self.netList)
if self.net:
if cmds.objExists(self.net):
cmds.textScrollList(self.existingNetTSL, edit=True, selectItem=self.net)
</DeepExtract>
cmds.textFieldGrp(self.nameTFG, label=self.langDic[self.langName]['m006_name'], edit=True, text=name)
return name
|
def changeName(self, name=None, *args):
""" Edit name of the current network node selected.
If there isn't any given name, it will try to get from the UI.
Returns the name result.
"""
oldName = cmds.getAttr(self.net + '.name')
if not name:
if self.ui:
name = cmds.textFieldGrp(self.nameTFG, query=True, text=True)
if name:
name = dpUtils.resolveName(name, self.netSuffix)[0]
messageAttrList = []
attrList = cmds.listAttr(self.net)
for attr in attrList:
if cmds.getAttr(self.net + '.' + attr, type=True) == 'message':
messageAttrList.append(attr)
if messageAttrList:
for messageAttr in messageAttrList:
connectedNodeList = cmds.listConnections(self.net + '.' + messageAttr)
if connectedNodeList:
childrenList = cmds.listRelatives(connectedNodeList[0], children=True, allDescendents=True)
cmds.rename(connectedNodeList[0], connectedNodeList[0].replace(oldName, name))
if childrenList:
for children in childrenList:
try:
cmds.rename(children, children.replace(oldName, name))
except:
pass
cmds.setAttr(self.net + '.name', name, type='string')
self.net = cmds.rename(self.net, self.net.replace(oldName, name))
if self.ui:
cmds.textScrollList(self.existingNetTSL, edit=True, deselectAll=True)
cmds.textScrollList(self.existingNetTSL, edit=True, removeAll=True)
currentNetList = cmds.ls(selection=False, type='network')
if currentNetList:
self.netList = []
for item in currentNetList:
if cmds.objExists(item + '.dpNetwork'):
if cmds.getAttr(item + '.dpNetwork') == 1:
if cmds.objExists(item + '.dpCorrectionManager'):
if cmds.getAttr(item + '.dpCorrectionManager') == 1:
self.netList.append(item)
if self.netList:
cmds.textScrollList(self.existingNetTSL, edit=True, append=self.netList)
if self.net:
if cmds.objExists(self.net):
cmds.textScrollList(self.existingNetTSL, edit=True, selectItem=self.net)
cmds.textFieldGrp(self.nameTFG, label=self.langDic[self.langName]['m006_name'], edit=True, text=name)
return name
|
dpAutoRigSystem
|
positive
|
def eigenspectrum_vec(self, data):
"""Sorted eigenspectrum of the Coulomb matrix.
Parameters
----------
data : object
Data object with Cartesian coordinates and atomic numbers
available.
Returns
-------
features : ndarray
Sorted Eigen values of the coulomb matrix, n atoms is size.
"""
if data is None:
msg = 'Class must have atom_len set to return feature names.'
assert hasattr(self, 'atom_len') and self.atom_len is not None, msg
return ['eig_{}'.format(n) for n in range(self.atom_len)]
features = np.zeros(self.atom_len)
<DeepExtract>
if len(data) < 2:
raise ValueError('Columb matrix requires atoms object with at least 2 atoms')
dm = self.get_all_distances(data)
np.fill_diagonal(dm, 1)
ano = self.get_atomic_numbers(data)
coulomb = np.outer(ano, ano) / dm
diagonal = 0.5 * ano ** 2.4
np.fill_diagonal(coulomb, diagonal)
coulomb = coulomb
</DeepExtract>
v = np.linalg.eigvals(coulomb)
v[::-1].sort()
features[:len(v)] = v
return features
|
def eigenspectrum_vec(self, data):
"""Sorted eigenspectrum of the Coulomb matrix.
Parameters
----------
data : object
Data object with Cartesian coordinates and atomic numbers
available.
Returns
-------
features : ndarray
Sorted Eigen values of the coulomb matrix, n atoms is size.
"""
if data is None:
msg = 'Class must have atom_len set to return feature names.'
assert hasattr(self, 'atom_len') and self.atom_len is not None, msg
return ['eig_{}'.format(n) for n in range(self.atom_len)]
features = np.zeros(self.atom_len)
if len(data) < 2:
raise ValueError('Columb matrix requires atoms object with at least 2 atoms')
dm = self.get_all_distances(data)
np.fill_diagonal(dm, 1)
ano = self.get_atomic_numbers(data)
coulomb = np.outer(ano, ano) / dm
diagonal = 0.5 * ano ** 2.4
np.fill_diagonal(coulomb, diagonal)
coulomb = coulomb
v = np.linalg.eigvals(coulomb)
v[::-1].sort()
features[:len(v)] = v
return features
|
CatLearn
|
positive
|
def update_from_vpc(self, vpc_stack, settings=None):
"""
Override to set the specific resources right once we have a VPC Definition
:param ecs_composex.vpc.vpc_stack.VpcStack vpc_stack:
:param ecs_composex.common.settings.ComposeXSettings settings:
"""
if vpc_stack and vpc_stack.vpc_resource:
if self.is_alb():
<DeepExtract>
if self.is_nlb():
self.cfn_resource.Subnets = Ref(AWS_NO_VALUE)
elif not self.lb_is_public and self.subnets_override:
if vpc_stack.vpc_resource.cfn_resource and self.subnets_override not in [PUBLIC_SUBNETS.title, APP_SUBNETS.title]:
raise ValueError('When Compose-X creates the VPC, the only subnets you can define to use are', [PUBLIC_SUBNETS.title, APP_SUBNETS.title])
elif not vpc_stack.vpc_resource.cfn_resource and vpc_stack.vpc_resource.mappings and (self.subnets_override in vpc_stack.vpc_resource.mappings.keys()):
self.cfn_resource.Subnets = Ref(self.subnets_override)
elif self.is_alb() and self.lb_is_public:
self.cfn_resource.Subnets = Ref(PUBLIC_SUBNETS)
elif not self.lb_is_public:
self.cfn_resource.Subnets = Ref(APP_SUBNETS)
self.cfn_resource.Subnets = APP_SUBNETS.title
</DeepExtract>
elif self.is_nlb():
<DeepExtract>
if self.is_alb():
self.cfn_resource.SubnetMappings = Ref(AWS_NO_VALUE)
if not self.lb_eips and self.lb_is_public:
self.set_eips(vpc_stack)
mappings = []
subnets = self.define_override_subnets(PUBLIC_SUBNETS.title, vpc_stack)
for (count, eip) in enumerate(self.lb_eips):
mappings.append(SubnetMapping(AllocationId=GetAtt(eip, 'AllocationId'), SubnetId=Select(count, Ref(subnets))))
self.cfn_resource.SubnetMappings = mappings
</DeepExtract>
|
def update_from_vpc(self, vpc_stack, settings=None):
"""
Override to set the specific resources right once we have a VPC Definition
:param ecs_composex.vpc.vpc_stack.VpcStack vpc_stack:
:param ecs_composex.common.settings.ComposeXSettings settings:
"""
if vpc_stack and vpc_stack.vpc_resource:
if self.is_alb():
if self.is_nlb():
self.cfn_resource.Subnets = Ref(AWS_NO_VALUE)
elif not self.lb_is_public and self.subnets_override:
if vpc_stack.vpc_resource.cfn_resource and self.subnets_override not in [PUBLIC_SUBNETS.title, APP_SUBNETS.title]:
raise ValueError('When Compose-X creates the VPC, the only subnets you can define to use are', [PUBLIC_SUBNETS.title, APP_SUBNETS.title])
elif not vpc_stack.vpc_resource.cfn_resource and vpc_stack.vpc_resource.mappings and (self.subnets_override in vpc_stack.vpc_resource.mappings.keys()):
self.cfn_resource.Subnets = Ref(self.subnets_override)
elif self.is_alb() and self.lb_is_public:
self.cfn_resource.Subnets = Ref(PUBLIC_SUBNETS)
elif not self.lb_is_public:
self.cfn_resource.Subnets = Ref(APP_SUBNETS)
self.cfn_resource.Subnets = APP_SUBNETS.title
elif self.is_nlb():
if self.is_alb():
self.cfn_resource.SubnetMappings = Ref(AWS_NO_VALUE)
if not self.lb_eips and self.lb_is_public:
self.set_eips(vpc_stack)
mappings = []
subnets = self.define_override_subnets(PUBLIC_SUBNETS.title, vpc_stack)
for (count, eip) in enumerate(self.lb_eips):
mappings.append(SubnetMapping(AllocationId=GetAtt(eip, 'AllocationId'), SubnetId=Select(count, Ref(subnets))))
self.cfn_resource.SubnetMappings = mappings
</DeepExtract>
|
ecs_composex
|
positive
|
def __init__(self, root=os.path.expanduser('~/.encoding/data/citys/'), split='train', mode=None, transform=None, target_transform=None, **kwargs):
super(CitySegmentation, self).__init__(root, split, mode, transform, target_transform, **kwargs)
<DeepExtract>
def get_path_pairs(img_folder, mask_folder):
img_paths = []
mask_paths = []
for (root, directories, files) in os.walk(img_folder):
for filename in files:
if filename.endswith('.png'):
imgpath = os.path.join(root, filename)
foldername = os.path.basename(os.path.dirname(imgpath))
maskname = filename.replace('leftImg8bit', 'gtFine_labelIds')
maskpath = os.path.join(mask_folder, foldername, maskname)
if os.path.isfile(imgpath) and os.path.isfile(maskpath):
img_paths.append(imgpath)
mask_paths.append(maskpath)
else:
print('cannot find the mask or image:', imgpath, maskpath)
print('Found {} images in the folder {}'.format(len(img_paths), img_folder))
(self.images, self.mask_paths) = (img_paths, mask_paths)
if self.split == 'train' or self.split == 'val' or self.split == 'test':
img_folder = os.path.join(self.root, 'leftImg8bit/' + self.split)
mask_folder = os.path.join(self.root, 'gtFine/' + self.split)
(img_paths, mask_paths) = get_path_pairs(img_folder, mask_folder)
(self.images, self.mask_paths) = (img_paths, mask_paths)
else:
assert self.split == 'trainval'
print('trainval set')
train_img_folder = os.path.join(self.root, 'leftImg8bit/train')
train_mask_folder = os.path.join(self.root, 'gtFine/train')
val_img_folder = os.path.join(self.root, 'leftImg8bit/val')
val_mask_folder = os.path.join(self.root, 'gtFine/val')
(train_img_paths, train_mask_paths) = get_path_pairs(train_img_folder, train_mask_folder)
(val_img_paths, val_mask_paths) = get_path_pairs(val_img_folder, val_mask_folder)
img_paths = train_img_paths + val_img_paths
mask_paths = train_mask_paths + val_mask_paths
(self.images, self.mask_paths) = (img_paths, mask_paths)
</DeepExtract>
assert len(self.images) == len(self.mask_paths)
if len(self.images) == 0:
raise RuntimeError('Found 0 images in subfolders of: ' + self.root + '\n')
self._indices = np.array(range(-1, 19))
self._classes = np.array([0, 7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33])
self._key = np.array([-1, -1, -1, -1, -1, -1, -1, -1, 0, 1, -1, -1, 2, 3, 4, -1, -1, -1, 5, -1, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, -1, -1, 16, 17, 18])
self._mapping = np.array(range(-1, len(self._key) - 1)).astype('int32')
|
def __init__(self, root=os.path.expanduser('~/.encoding/data/citys/'), split='train', mode=None, transform=None, target_transform=None, **kwargs):
super(CitySegmentation, self).__init__(root, split, mode, transform, target_transform, **kwargs)
def get_path_pairs(img_folder, mask_folder):
img_paths = []
mask_paths = []
for (root, directories, files) in os.walk(img_folder):
for filename in files:
if filename.endswith('.png'):
imgpath = os.path.join(root, filename)
foldername = os.path.basename(os.path.dirname(imgpath))
maskname = filename.replace('leftImg8bit', 'gtFine_labelIds')
maskpath = os.path.join(mask_folder, foldername, maskname)
if os.path.isfile(imgpath) and os.path.isfile(maskpath):
img_paths.append(imgpath)
mask_paths.append(maskpath)
else:
print('cannot find the mask or image:', imgpath, maskpath)
print('Found {} images in the folder {}'.format(len(img_paths), img_folder))
(self.images, self.mask_paths) = (img_paths, mask_paths)
if self.split == 'train' or self.split == 'val' or self.split == 'test':
img_folder = os.path.join(self.root, 'leftImg8bit/' + self.split)
mask_folder = os.path.join(self.root, 'gtFine/' + self.split)
(img_paths, mask_paths) = get_path_pairs(img_folder, mask_folder)
(self.images, self.mask_paths) = (img_paths, mask_paths)
else:
assert self.split == 'trainval'
print('trainval set')
train_img_folder = os.path.join(self.root, 'leftImg8bit/train')
train_mask_folder = os.path.join(self.root, 'gtFine/train')
val_img_folder = os.path.join(self.root, 'leftImg8bit/val')
val_mask_folder = os.path.join(self.root, 'gtFine/val')
(train_img_paths, train_mask_paths) = get_path_pairs(train_img_folder, train_mask_folder)
(val_img_paths, val_mask_paths) = get_path_pairs(val_img_folder, val_mask_folder)
img_paths = train_img_paths + val_img_paths
mask_paths = train_mask_paths + val_mask_paths
(self.images, self.mask_paths) = (img_paths, mask_paths)
assert len(self.images) == len(self.mask_paths)
if len(self.images) == 0:
raise RuntimeError('Found 0 images in subfolders of: ' + self.root + '\n')
self._indices = np.array(range(-1, 19))
self._classes = np.array([0, 7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33])
self._key = np.array([-1, -1, -1, -1, -1, -1, -1, -1, 0, 1, -1, -1, 2, 3, 4, -1, -1, -1, 5, -1, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, -1, -1, 16, 17, 18])
self._mapping = np.array(range(-1, len(self._key) - 1)).astype('int32')
|
DANet
|
positive
|
def jump_to_address(addr, rax=0, rdi=0, rsi=0, rdx=0, interact=False):
fake_struct = p64(rax) + p64(rdi) + p64(rsi) + p64(rdx) + p64(addr)
<DeepExtract>
p.sendline('1')
p.recvuntil('Note id: ')
p.sendline(str(9))
p.recvuntil('Contents: ')
p.send('A' * 8 + fake_struct + '\n')
get_prompt()
</DeepExtract>
<DeepExtract>
p.sendline('2')
p.recvuntil('Note id: ')
p.sendline(str(0))
if interact:
return
else:
get_prompt()
</DeepExtract>
|
def jump_to_address(addr, rax=0, rdi=0, rsi=0, rdx=0, interact=False):
fake_struct = p64(rax) + p64(rdi) + p64(rsi) + p64(rdx) + p64(addr)
p.sendline('1')
p.recvuntil('Note id: ')
p.sendline(str(9))
p.recvuntil('Contents: ')
p.send('A' * 8 + fake_struct + '\n')
get_prompt()
p.sendline('2')
p.recvuntil('Note id: ')
p.sendline(str(0))
if interact:
return
else:
get_prompt()
</DeepExtract>
|
CTF-writeups
|
positive
|
def MoveMouse(self, pressed='left', coords=(0, 0)):
"""Move the mouse"""
<DeepExtract>
flags = 0
for key in pressed.split():
flags |= _mouse_flags[key.lower()]
click_point = win32functions.MakeLong(coords[1], coords[0])
(flags, click_point) = (flags, click_point)
</DeepExtract>
<DeepExtract>
if timeout is None:
timeout = Timings.sendmessagetimeout_timeout
result = ctypes.c_long()
win32functions.SendMessageTimeout(self, win32defines.WM_MOUSEMOVE, flags, click_point, timeoutflags, int(timeout * 1000), ctypes.byref(result))
return result.value
</DeepExtract>
win32functions.WaitGuiThreadIdle(self)
return self
|
def MoveMouse(self, pressed='left', coords=(0, 0)):
"""Move the mouse"""
flags = 0
for key in pressed.split():
flags |= _mouse_flags[key.lower()]
click_point = win32functions.MakeLong(coords[1], coords[0])
(flags, click_point) = (flags, click_point)
if timeout is None:
timeout = Timings.sendmessagetimeout_timeout
result = ctypes.c_long()
win32functions.SendMessageTimeout(self, win32defines.WM_MOUSEMOVE, flags, click_point, timeoutflags, int(timeout * 1000), ctypes.byref(result))
return result.value
win32functions.WaitGuiThreadIdle(self)
return self
|
BrowserRefresh-Sublime
|
positive
|
def setAffinities(self, data):
"""Set the affinities.
Affinities need to be set before they can be assigned to :class:`Job` or Worker.
:param data: a dictionnary of affinities
"""
<DeepExtract>
if data:
data = json.dumps(data)
headers = {'Content-Type': 'application/json'}
self._Conn.request('POST', '/api/affinities', data, headers)
res = self._Conn.getresponse()
if res.status == 200:
res = res.read()
else:
raise CoalitionError(res.read())
</DeepExtract>
return res
|
def setAffinities(self, data):
"""Set the affinities.
Affinities need to be set before they can be assigned to :class:`Job` or Worker.
:param data: a dictionnary of affinities
"""
if data:
data = json.dumps(data)
headers = {'Content-Type': 'application/json'}
self._Conn.request('POST', '/api/affinities', data, headers)
res = self._Conn.getresponse()
if res.status == 200:
res = res.read()
else:
raise CoalitionError(res.read())
return res
|
coalition
|
positive
|
def unsaved_call(self):
self.unsaved = True
<DeepExtract>
self.setWindowTitle(f'Config editor - {self.filename}' + '*' * self.unsaved)
</DeepExtract>
self.model.dataChanged.disconnect(self.unsaved_call)
|
def unsaved_call(self):
self.unsaved = True
self.setWindowTitle(f'Config editor - {self.filename}' + '*' * self.unsaved)
self.model.dataChanged.disconnect(self.unsaved_call)
|
clever-show
|
positive
|
def post_process_predictions(predictions):
device_index = self.inputs['img'].get_device()
tensor_type = self.inputs['img'].type()
img_size = self.inputs['img'].shape[2:]
opts = self.opts
b_size = len(self.inputs['img'])
if opts.flip_train:
true_size = b_size // 2
<DeepExtract>
device = predictions['cam_probs'].get_device()
opts = self.opts
if opts.multiple_cam:
keys_to_copy = ['cam_probs']
for key in keys_to_copy:
predictions[key] = torch.cat([predictions[key][:true_size], predictions[key][:true_size]])
part_perm = self.part_perm.to(device)
if opts.multiple_cam:
keys_to_copy = ['part_transforms', 'delta_part_transforms']
for key in keys_to_copy:
mirror_transforms_swaps = predictions[key][:true_size][:, :, part_perm, :]
predictions[key] = torch.cat([predictions[key][:true_size], mirror_transforms_swaps])
camera = predictions['cam'][:true_size]
if opts.multiple_cam:
new_cam = cb.reflect_cam_pose(camera[:true_size])
predictions['cam'] = torch.cat([camera[:true_size], new_cam])
else:
new_cam = cb.reflect_cam_pose(camera[:true_size, None, :]).squeeze(1)
predictions['cam'] = torch.cat([camera[:true_size], new_cam])
predictions = predictions
</DeepExtract>
real_iter = self.inputs['iter']
verts = (self.mean_shape['verts'] * 1).to(device=device_index)
if opts.warmup_deform_iter > real_iter:
n_verts = verts.shape[0]
predictions['verts'] = verts[None, None, ...].expand((b_size, opts.num_hypo_cams, n_verts, 3))
else:
parts_rc = self.mean_shape['parts_rc']
parts_rc = (torch.stack(parts_rc) * 1).to(device=device_index)
parts_rc = parts_rc.unsqueeze(0).repeat(b_size, 1, 1)
predictions['verts'] = []
for cx in range(opts.num_hypo_cams):
part_transforms = predictions['part_transforms'][:, cx]
verts_cx = geom_utils.apply_part_transforms(verts, self.mean_shape['parts'], parts_rc, part_transforms, predictions['membership'])
predictions['verts'].append(verts_cx)
predictions['verts'] = torch.stack(predictions['verts'], dim=1)
if opts.warmup_pose_iter > real_iter:
predictions['cam_probs'] = 1.0 / opts.num_hypo_cams * (torch.zeros(predictions['cam_probs'].shape).float() + 1).to(device_index)
if opts.multiple_cam:
camera = predictions['cam']
device = camera.get_device()
faces = (self.mean_shape['faces'] * 1).to(device)
faces = faces[None, ...].repeat(b_size, 1, 1)
verts = predictions['verts']
mask_preds = []
depth_preds = []
multi_renderer_mask = self.renderers[device]['mask']
multi_renderer_depth = self.renderers[device]['depth']
for cx in range(opts.num_hypo_cams):
mask_pred = multi_renderer_mask[cx].forward(verts[:, cx], faces, camera[:, cx])
mask_preds.append(mask_pred)
depth_pred = multi_renderer_depth[cx].forward(verts[:, cx], faces, camera[:, cx], depth_only=True)
depth_preds.append(depth_pred)
predictions['mask_render'] = torch.stack(mask_preds, dim=1)
predictions['depth'] = torch.stack(depth_preds, dim=1)
points3d = [None for _ in range(opts.num_hypo_cams)]
predictions['project_points_cam_pred'] = [None for _ in range(opts.num_hypo_cams)]
predictions['project_points_cam_z'] = [None for _ in range(opts.num_hypo_cams)]
predictions['project_points'] = [None for _ in range(opts.num_hypo_cams)]
predictions['kp_project'] = [None for _ in range(opts.num_hypo_cams)]
predictions['verts_proj'] = [None for _ in range(opts.num_hypo_cams)]
kp_verts = [None for _ in range(opts.num_hypo_cams)]
for cx in range(opts.num_hypo_cams):
points3d[cx] = geom_utils.project_uv_to_3d(self.uv2points, verts[:, cx], predictions['uv_map'])
predictions['project_points_cam_pred'][cx] = geom_utils.project_3d_to_image(points3d[cx], camera[:, cx], self.offset_z)
predictions['project_points_cam_z'][cx] = predictions['project_points_cam_pred'][cx][..., 2] - self.cam_location[2]
shape = (b_size, img_size[0], img_size[1])
predictions['project_points_cam_z'][cx] = predictions['project_points_cam_z'][cx].view(shape)
shape = (b_size, img_size[0], img_size[1], 2)
predictions['project_points'][cx] = predictions['project_points_cam_pred'][cx][..., 0:2].view(shape)
kp_verts = verts[:, cx][:, self.kp_vertex_ids, :]
kp_project = geom_utils.project_3d_to_image(kp_verts, camera[:, cx], self.offset_z)
predictions['kp_project'][cx] = kp_project[..., 0:2].view(b_size, len(self.kp_vertex_ids), -1)
predictions['verts_proj'][cx] = geom_utils.project_3d_to_image(predictions['verts'][:, cx], camera[:, cx], self.offset_z)[..., 0:2]
predictions['verts_proj'] = torch.stack(predictions['verts_proj'], dim=1)
predictions['points3d'] = torch.stack(points3d, dim=1)
predictions['project_points_cam_pred'] = torch.stack(predictions['project_points_cam_pred'], dim=1)
predictions['project_points_cam_z'] = torch.stack(predictions['project_points_cam_z'], dim=1)
predictions['project_points'] = torch.stack(predictions['project_points'], dim=1)
predictions['kp_project'] = torch.stack(predictions['kp_project'], dim=1)
return predictions
|
def post_process_predictions(predictions):
device_index = self.inputs['img'].get_device()
tensor_type = self.inputs['img'].type()
img_size = self.inputs['img'].shape[2:]
opts = self.opts
b_size = len(self.inputs['img'])
if opts.flip_train:
true_size = b_size // 2
device = predictions['cam_probs'].get_device()
opts = self.opts
if opts.multiple_cam:
keys_to_copy = ['cam_probs']
for key in keys_to_copy:
predictions[key] = torch.cat([predictions[key][:true_size], predictions[key][:true_size]])
part_perm = self.part_perm.to(device)
if opts.multiple_cam:
keys_to_copy = ['part_transforms', 'delta_part_transforms']
for key in keys_to_copy:
mirror_transforms_swaps = predictions[key][:true_size][:, :, part_perm, :]
predictions[key] = torch.cat([predictions[key][:true_size], mirror_transforms_swaps])
camera = predictions['cam'][:true_size]
if opts.multiple_cam:
new_cam = cb.reflect_cam_pose(camera[:true_size])
predictions['cam'] = torch.cat([camera[:true_size], new_cam])
else:
new_cam = cb.reflect_cam_pose(camera[:true_size, None, :]).squeeze(1)
predictions['cam'] = torch.cat([camera[:true_size], new_cam])
predictions = predictions
real_iter = self.inputs['iter']
verts = (self.mean_shape['verts'] * 1).to(device=device_index)
if opts.warmup_deform_iter > real_iter:
n_verts = verts.shape[0]
predictions['verts'] = verts[None, None, ...].expand((b_size, opts.num_hypo_cams, n_verts, 3))
else:
parts_rc = self.mean_shape['parts_rc']
parts_rc = (torch.stack(parts_rc) * 1).to(device=device_index)
parts_rc = parts_rc.unsqueeze(0).repeat(b_size, 1, 1)
predictions['verts'] = []
for cx in range(opts.num_hypo_cams):
part_transforms = predictions['part_transforms'][:, cx]
verts_cx = geom_utils.apply_part_transforms(verts, self.mean_shape['parts'], parts_rc, part_transforms, predictions['membership'])
predictions['verts'].append(verts_cx)
predictions['verts'] = torch.stack(predictions['verts'], dim=1)
if opts.warmup_pose_iter > real_iter:
predictions['cam_probs'] = 1.0 / opts.num_hypo_cams * (torch.zeros(predictions['cam_probs'].shape).float() + 1).to(device_index)
if opts.multiple_cam:
camera = predictions['cam']
device = camera.get_device()
faces = (self.mean_shape['faces'] * 1).to(device)
faces = faces[None, ...].repeat(b_size, 1, 1)
verts = predictions['verts']
mask_preds = []
depth_preds = []
multi_renderer_mask = self.renderers[device]['mask']
multi_renderer_depth = self.renderers[device]['depth']
for cx in range(opts.num_hypo_cams):
mask_pred = multi_renderer_mask[cx].forward(verts[:, cx], faces, camera[:, cx])
mask_preds.append(mask_pred)
depth_pred = multi_renderer_depth[cx].forward(verts[:, cx], faces, camera[:, cx], depth_only=True)
depth_preds.append(depth_pred)
predictions['mask_render'] = torch.stack(mask_preds, dim=1)
predictions['depth'] = torch.stack(depth_preds, dim=1)
points3d = [None for _ in range(opts.num_hypo_cams)]
predictions['project_points_cam_pred'] = [None for _ in range(opts.num_hypo_cams)]
predictions['project_points_cam_z'] = [None for _ in range(opts.num_hypo_cams)]
predictions['project_points'] = [None for _ in range(opts.num_hypo_cams)]
predictions['kp_project'] = [None for _ in range(opts.num_hypo_cams)]
predictions['verts_proj'] = [None for _ in range(opts.num_hypo_cams)]
kp_verts = [None for _ in range(opts.num_hypo_cams)]
for cx in range(opts.num_hypo_cams):
points3d[cx] = geom_utils.project_uv_to_3d(self.uv2points, verts[:, cx], predictions['uv_map'])
predictions['project_points_cam_pred'][cx] = geom_utils.project_3d_to_image(points3d[cx], camera[:, cx], self.offset_z)
predictions['project_points_cam_z'][cx] = predictions['project_points_cam_pred'][cx][..., 2] - self.cam_location[2]
shape = (b_size, img_size[0], img_size[1])
predictions['project_points_cam_z'][cx] = predictions['project_points_cam_z'][cx].view(shape)
shape = (b_size, img_size[0], img_size[1], 2)
predictions['project_points'][cx] = predictions['project_points_cam_pred'][cx][..., 0:2].view(shape)
kp_verts = verts[:, cx][:, self.kp_vertex_ids, :]
kp_project = geom_utils.project_3d_to_image(kp_verts, camera[:, cx], self.offset_z)
predictions['kp_project'][cx] = kp_project[..., 0:2].view(b_size, len(self.kp_vertex_ids), -1)
predictions['verts_proj'][cx] = geom_utils.project_3d_to_image(predictions['verts'][:, cx], camera[:, cx], self.offset_z)[..., 0:2]
predictions['verts_proj'] = torch.stack(predictions['verts_proj'], dim=1)
predictions['points3d'] = torch.stack(points3d, dim=1)
predictions['project_points_cam_pred'] = torch.stack(predictions['project_points_cam_pred'], dim=1)
predictions['project_points_cam_z'] = torch.stack(predictions['project_points_cam_z'], dim=1)
predictions['project_points'] = torch.stack(predictions['project_points'], dim=1)
predictions['kp_project'] = torch.stack(predictions['kp_project'], dim=1)
return predictions
|
acsm
|
positive
|
def test_empty_file() -> None:
""" Handles empty file """
<DeepExtract>
k = random.randint(5, 10)
pattern = ''.join(random.choices(string.ascii_letters + string.digits, k=k))
</DeepExtract>
<DeepExtract>
assert os.path.isfile(EMPTY + '.out')
expected = open(EMPTY + '.out').read().rstrip()
out_file = random_string()
if os.path.isfile(out_file):
os.remove(out_file)
try:
options = ' '.join(opts) if opts else ''
cmd = f'{RUN} {options} {pattern} -o {out_file} {EMPTY}'
(rv, _) = getstatusoutput(cmd)
assert os.path.isfile(out_file)
assert rv == 0
assert open(out_file).read().rstrip() == expected
finally:
if os.path.isfile(out_file):
os.remove(out_file)
</DeepExtract>
|
def test_empty_file() -> None:
""" Handles empty file """
k = random.randint(5, 10)
pattern = ''.join(random.choices(string.ascii_letters + string.digits, k=k))
assert os.path.isfile(EMPTY + '.out')
expected = open(EMPTY + '.out').read().rstrip()
out_file = random_string()
if os.path.isfile(out_file):
os.remove(out_file)
try:
options = ' '.join(opts) if opts else ''
cmd = f'{RUN} {options} {pattern} -o {out_file} {EMPTY}'
(rv, _) = getstatusoutput(cmd)
assert os.path.isfile(out_file)
assert rv == 0
assert open(out_file).read().rstrip() == expected
finally:
if os.path.isfile(out_file):
os.remove(out_file)
</DeepExtract>
|
biofx_python
|
positive
|
def test_to_pickle(tmp_path):
start_params = np.array([3, 2, 1])
<DeepExtract>
empirical_moments = np.zeros(3)
</DeepExtract>
if isinstance(empirical_moments, dict):
empirical_moments = empirical_moments['simulated_moments']
calculated = estimate_msm(simulate_moments=_sim_np, empirical_moments=empirical_moments, moments_cov=cov_np, params=start_params, optimize_options='scipy_lbfgsb')
calculated.to_pickle(tmp_path / 'bla.pkl')
|
def test_to_pickle(tmp_path):
start_params = np.array([3, 2, 1])
empirical_moments = np.zeros(3)
if isinstance(empirical_moments, dict):
empirical_moments = empirical_moments['simulated_moments']
calculated = estimate_msm(simulate_moments=_sim_np, empirical_moments=empirical_moments, moments_cov=cov_np, params=start_params, optimize_options='scipy_lbfgsb')
calculated.to_pickle(tmp_path / 'bla.pkl')
|
estimagic
|
positive
|
def readSubsectorData(self, offset):
ss = Subsector()
<DeepExtract>
self.f.seek(offset)
f = self.f.read(2)
ss.segCount = struct.unpack('<H', f)[0]
</DeepExtract>
<DeepExtract>
self.f.seek(offset + 2)
f = self.f.read(2)
ss.firstSegID = struct.unpack('<H', f)[0]
</DeepExtract>
return ss
|
def readSubsectorData(self, offset):
ss = Subsector()
self.f.seek(offset)
f = self.f.read(2)
ss.segCount = struct.unpack('<H', f)[0]
self.f.seek(offset + 2)
f = self.f.read(2)
ss.firstSegID = struct.unpack('<H', f)[0]
return ss
|
doomengine.python
|
positive
|
def pointnet_sa_module(xyz, points, npoint, radius, nsample, mlp, mlp2, group_all, is_training, bn_decay, scope, bn=True, pooling='max', knn=False, use_xyz=True, use_nchw=False):
""" PointNet Set Abstraction (SA) Module
Input:
xyz: (batch_size, ndataset, 3) TF tensor
points: (batch_size, ndataset, channel) TF tensor
npoint: int32 -- #points sampled in farthest point sampling
radius: float32 -- search radius in local region
nsample: int32 -- how many points in each local region
mlp: list of int32 -- output size for MLP on each point
mlp2: list of int32 -- output size for MLP on each region
group_all: bool -- group all points into one PC if set true, OVERRIDE
npoint, radius and nsample settings
use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format
Return:
new_xyz: (batch_size, npoint, 3) TF tensor
new_points: (batch_size, npoint, mlp[-1] or mlp2[-1]) TF tensor
idx: (batch_size, npoint, nsample) int32 -- indices for local regions
"""
data_format = 'NCHW' if use_nchw else 'NHWC'
with tf.variable_scope(scope) as sc:
if group_all:
nsample = xyz.get_shape()[1].value
<DeepExtract>
batch_size = xyz.get_shape()[0].value
nsample = xyz.get_shape()[1].value
new_xyz = tf.constant(np.tile(np.array([0, 0, 0]).reshape((1, 1, 3)), (batch_size, 1, 1)), dtype=tf.float32)
idx = tf.constant(np.tile(np.array(range(nsample)).reshape((1, 1, nsample)), (batch_size, 1, 1)))
grouped_xyz = tf.reshape(xyz, (batch_size, 1, nsample, 3))
if points is not None:
if use_xyz:
new_points = tf.concat([xyz, points], axis=2)
else:
new_points = points
new_points = tf.expand_dims(new_points, 1)
else:
new_points = grouped_xyz
(new_xyz, new_points, idx, grouped_xyz) = (new_xyz, new_points, idx, grouped_xyz)
</DeepExtract>
else:
<DeepExtract>
new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
if knn:
(_, idx) = knn_point(nsample, xyz, new_xyz)
else:
(idx, pts_cnt) = query_ball_point(radius, nsample, xyz, new_xyz)
grouped_xyz = group_point(xyz, idx)
grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1, 1, nsample, 1])
if points is not None:
grouped_points = group_point(points, idx)
if use_xyz:
new_points = tf.concat([grouped_xyz, grouped_points], axis=-1)
else:
new_points = grouped_points
else:
new_points = grouped_xyz
(new_xyz, new_points, idx, grouped_xyz) = (new_xyz, new_points, idx, grouped_xyz)
</DeepExtract>
if use_nchw:
new_points = tf.transpose(new_points, [0, 3, 1, 2])
for (i, num_out_channel) in enumerate(mlp):
new_points = tf_util.conv2d(new_points, num_out_channel, [1, 1], padding='VALID', stride=[1, 1], bn=bn, is_training=is_training, scope='conv%d' % i, bn_decay=bn_decay, data_format=data_format)
if use_nchw:
new_points = tf.transpose(new_points, [0, 2, 3, 1])
if pooling == 'max':
new_points = tf.reduce_max(new_points, axis=[2], keep_dims=True, name='maxpool')
elif pooling == 'avg':
new_points = tf.reduce_mean(new_points, axis=[2], keep_dims=True, name='avgpool')
elif pooling == 'weighted_avg':
with tf.variable_scope('weighted_avg'):
dists = tf.norm(grouped_xyz, axis=-1, ord=2, keep_dims=True)
exp_dists = tf.exp(-dists * 5)
weights = exp_dists / tf.reduce_sum(exp_dists, axis=2, keep_dims=True)
new_points *= weights
new_points = tf.reduce_sum(new_points, axis=2, keep_dims=True)
elif pooling == 'max_and_avg':
max_points = tf.reduce_max(new_points, axis=[2], keep_dims=True, name='maxpool')
avg_points = tf.reduce_mean(new_points, axis=[2], keep_dims=True, name='avgpool')
new_points = tf.concat([avg_points, max_points], axis=-1)
if mlp2 is not None:
if use_nchw:
new_points = tf.transpose(new_points, [0, 3, 1, 2])
for (i, num_out_channel) in enumerate(mlp2):
new_points = tf_util.conv2d(new_points, num_out_channel, [1, 1], padding='VALID', stride=[1, 1], bn=bn, is_training=is_training, scope='conv_post_%d' % i, bn_decay=bn_decay, data_format=data_format)
if use_nchw:
new_points = tf.transpose(new_points, [0, 2, 3, 1])
new_points = tf.squeeze(new_points, [2])
return (new_xyz, new_points, idx)
|
def pointnet_sa_module(xyz, points, npoint, radius, nsample, mlp, mlp2, group_all, is_training, bn_decay, scope, bn=True, pooling='max', knn=False, use_xyz=True, use_nchw=False):
""" PointNet Set Abstraction (SA) Module
Input:
xyz: (batch_size, ndataset, 3) TF tensor
points: (batch_size, ndataset, channel) TF tensor
npoint: int32 -- #points sampled in farthest point sampling
radius: float32 -- search radius in local region
nsample: int32 -- how many points in each local region
mlp: list of int32 -- output size for MLP on each point
mlp2: list of int32 -- output size for MLP on each region
group_all: bool -- group all points into one PC if set true, OVERRIDE
npoint, radius and nsample settings
use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format
Return:
new_xyz: (batch_size, npoint, 3) TF tensor
new_points: (batch_size, npoint, mlp[-1] or mlp2[-1]) TF tensor
idx: (batch_size, npoint, nsample) int32 -- indices for local regions
"""
data_format = 'NCHW' if use_nchw else 'NHWC'
with tf.variable_scope(scope) as sc:
if group_all:
nsample = xyz.get_shape()[1].value
batch_size = xyz.get_shape()[0].value
nsample = xyz.get_shape()[1].value
new_xyz = tf.constant(np.tile(np.array([0, 0, 0]).reshape((1, 1, 3)), (batch_size, 1, 1)), dtype=tf.float32)
idx = tf.constant(np.tile(np.array(range(nsample)).reshape((1, 1, nsample)), (batch_size, 1, 1)))
grouped_xyz = tf.reshape(xyz, (batch_size, 1, nsample, 3))
if points is not None:
if use_xyz:
new_points = tf.concat([xyz, points], axis=2)
else:
new_points = points
new_points = tf.expand_dims(new_points, 1)
else:
new_points = grouped_xyz
(new_xyz, new_points, idx, grouped_xyz) = (new_xyz, new_points, idx, grouped_xyz)
else:
new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
if knn:
(_, idx) = knn_point(nsample, xyz, new_xyz)
else:
(idx, pts_cnt) = query_ball_point(radius, nsample, xyz, new_xyz)
grouped_xyz = group_point(xyz, idx)
grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1, 1, nsample, 1])
if points is not None:
grouped_points = group_point(points, idx)
if use_xyz:
new_points = tf.concat([grouped_xyz, grouped_points], axis=-1)
else:
new_points = grouped_points
else:
new_points = grouped_xyz
(new_xyz, new_points, idx, grouped_xyz) = (new_xyz, new_points, idx, grouped_xyz)
if use_nchw:
new_points = tf.transpose(new_points, [0, 3, 1, 2])
for (i, num_out_channel) in enumerate(mlp):
new_points = tf_util.conv2d(new_points, num_out_channel, [1, 1], padding='VALID', stride=[1, 1], bn=bn, is_training=is_training, scope='conv%d' % i, bn_decay=bn_decay, data_format=data_format)
if use_nchw:
new_points = tf.transpose(new_points, [0, 2, 3, 1])
if pooling == 'max':
new_points = tf.reduce_max(new_points, axis=[2], keep_dims=True, name='maxpool')
elif pooling == 'avg':
new_points = tf.reduce_mean(new_points, axis=[2], keep_dims=True, name='avgpool')
elif pooling == 'weighted_avg':
with tf.variable_scope('weighted_avg'):
dists = tf.norm(grouped_xyz, axis=-1, ord=2, keep_dims=True)
exp_dists = tf.exp(-dists * 5)
weights = exp_dists / tf.reduce_sum(exp_dists, axis=2, keep_dims=True)
new_points *= weights
new_points = tf.reduce_sum(new_points, axis=2, keep_dims=True)
elif pooling == 'max_and_avg':
max_points = tf.reduce_max(new_points, axis=[2], keep_dims=True, name='maxpool')
avg_points = tf.reduce_mean(new_points, axis=[2], keep_dims=True, name='avgpool')
new_points = tf.concat([avg_points, max_points], axis=-1)
if mlp2 is not None:
if use_nchw:
new_points = tf.transpose(new_points, [0, 3, 1, 2])
for (i, num_out_channel) in enumerate(mlp2):
new_points = tf_util.conv2d(new_points, num_out_channel, [1, 1], padding='VALID', stride=[1, 1], bn=bn, is_training=is_training, scope='conv_post_%d' % i, bn_decay=bn_decay, data_format=data_format)
if use_nchw:
new_points = tf.transpose(new_points, [0, 2, 3, 1])
new_points = tf.squeeze(new_points, [2])
return (new_xyz, new_points, idx)
|
deformation_aware_embedding
|
positive
|
@property
def raw_data(self):
if not self._raw_data:
<DeepExtract>
for (dt, series) in self.data.iterrows():
for (sid, price) in series.iterkv():
if sid in self.sids:
event = {'dt': dt, 'sid': sid, 'price': price, 'volume': 1000}
yield event
</DeepExtract>
return self._raw_data
|
@property
def raw_data(self):
if not self._raw_data:
for (dt, series) in self.data.iterrows():
for (sid, price) in series.iterkv():
if sid in self.sids:
event = {'dt': dt, 'sid': sid, 'price': price, 'volume': 1000}
yield event
return self._raw_data
|
AlephNull
|
positive
|
@app.route('/plot_div')
def plot_div():
args = flask.request.args
reload_s3 = args.get('reload_s3', False)
x_plot_key = args.get('x_plot_key', '(default)')
plot_key = args.get('plot_key')
display_mode = args.get('display_mode', 'mean_std')
split_key = args.get('split_key', '')
group_key = args.get('group_key', '')
filters_json = args.get('filters', '{}')
filters = json.loads(filters_json)
if len(split_key) == 0:
split_key = None
if len(group_key) == 0:
group_key = None
print(reload_s3, type(reload_s3))
if reload_s3:
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
print(data_paths)
for data_path in data_paths:
if 'data/s3/' in data_path:
exp_group = data_path.split('data/s3/')[-1].split('/')[0]
os.system('python %s/scripts/sync_s3.py %s' % (project_root, exp_group))
<DeepExtract>
global exps_data
global plottable_keys
global distinct_params
global x_plottable_keys
exps_data = core.load_exps_data(data_paths)
plottable_keys = sorted(list(set(flatten((list(exp.progress.keys()) for exp in exps_data)))))
distinct_params = sorted(core.extract_distinct_params(exps_data))
x_plottable_keys = [key for key in plottable_keys if is_increasing_key(key, exps_data)]
</DeepExtract>
<DeepExtract>
selector = core.Selector(exps_data)
if filters is None:
filters = dict()
for (k, v) in filters.items():
selector = selector.where(k, str(v))
if split_key is not None:
vs = [vs for (k, vs) in distinct_params if k == split_key][0]
split_selectors = [selector.where(split_key, v) for v in vs]
split_legends = list(map(str, vs))
else:
split_selectors = [selector]
split_legends = ['Plot']
plots = []
counter = 1
for (split_selector, split_legend) in zip(split_selectors, split_legends):
if group_key and group_key is not 'exp_name':
vs = [vs for (k, vs) in distinct_params if k == group_key][0]
group_selectors = [split_selector.where(group_key, v) for v in vs]
group_legends = [str(x) for x in vs]
else:
group_key = 'exp_name'
vs = sorted([x.params['exp_name'] for x in split_selector.extract()])
group_selectors = [split_selector.where(group_key, v) for v in vs]
group_legends = [summary_name(x.extract()[0], split_selector) for x in group_selectors]
to_plot = []
for (group_selector, group_legend) in zip(group_selectors, group_legends):
filtered_data = group_selector.extract()
if len(filtered_data) > 0:
progresses = [exp.progress.get(plot_key, np.array([np.nan])) for exp in filtered_data]
sizes = list(map(len, progresses))
max_size = max(sizes)
progresses = [np.concatenate([ps, np.ones(max_size - len(ps)) * np.nan]) for ps in progresses]
if x_plot_key == '(default)':
xs = np.arange(max_size)
else:
all_xs = np.unique(np.sort(np.concatenate([d.progress.get(x_plot_key, []) for d in filtered_data])))
interp_progresses = []
for d in filtered_data:
if x_plot_key in d.progress:
assert plot_key in d.progress
interp_progresses.append(np.interp(all_xs, d.progress[x_plot_key], d.progress[plot_key], right=np.nan))
else:
continue
progresses = interp_progresses
xs = all_xs
if display_mode == 'mean_std':
means = np.nanmean(progresses, axis=0)
stds = np.nanstd(progresses, axis=0)
to_plot.append(AttrDict(means=means, stds=stds, legend=group_legend, xs=xs, display_mode=display_mode))
elif display_mode == 'mean_se':
means = np.nanmean(progresses, axis=0)
ses = np.nanstd(progresses, axis=0) / np.sqrt(np.sum(1 - np.isnan(progresses), axis=0))
to_plot.append(AttrDict(means=means, ses=ses, legend=group_legend, xs=xs, display_mode=display_mode))
elif display_mode == 'individual':
to_plot.append(AttrDict(xs=xs, ys=progresses, legend=group_legend, display_mode=display_mode))
else:
raise NotImplementedError
if len(to_plot) > 0:
fig_title = '%s: %s' % (split_key, split_legend)
plots.append(make_plot(to_plot, title=fig_title))
counter += 1
plot_div = '\n'.join(plots)
</DeepExtract>
return plot_div
|
@app.route('/plot_div')
def plot_div():
args = flask.request.args
reload_s3 = args.get('reload_s3', False)
x_plot_key = args.get('x_plot_key', '(default)')
plot_key = args.get('plot_key')
display_mode = args.get('display_mode', 'mean_std')
split_key = args.get('split_key', '')
group_key = args.get('group_key', '')
filters_json = args.get('filters', '{}')
filters = json.loads(filters_json)
if len(split_key) == 0:
split_key = None
if len(group_key) == 0:
group_key = None
print(reload_s3, type(reload_s3))
if reload_s3:
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
print(data_paths)
for data_path in data_paths:
if 'data/s3/' in data_path:
exp_group = data_path.split('data/s3/')[-1].split('/')[0]
os.system('python %s/scripts/sync_s3.py %s' % (project_root, exp_group))
global exps_data
global plottable_keys
global distinct_params
global x_plottable_keys
exps_data = core.load_exps_data(data_paths)
plottable_keys = sorted(list(set(flatten((list(exp.progress.keys()) for exp in exps_data)))))
distinct_params = sorted(core.extract_distinct_params(exps_data))
x_plottable_keys = [key for key in plottable_keys if is_increasing_key(key, exps_data)]
selector = core.Selector(exps_data)
if filters is None:
filters = dict()
for (k, v) in filters.items():
selector = selector.where(k, str(v))
if split_key is not None:
vs = [vs for (k, vs) in distinct_params if k == split_key][0]
split_selectors = [selector.where(split_key, v) for v in vs]
split_legends = list(map(str, vs))
else:
split_selectors = [selector]
split_legends = ['Plot']
plots = []
counter = 1
for (split_selector, split_legend) in zip(split_selectors, split_legends):
if group_key and group_key is not 'exp_name':
vs = [vs for (k, vs) in distinct_params if k == group_key][0]
group_selectors = [split_selector.where(group_key, v) for v in vs]
group_legends = [str(x) for x in vs]
else:
group_key = 'exp_name'
vs = sorted([x.params['exp_name'] for x in split_selector.extract()])
group_selectors = [split_selector.where(group_key, v) for v in vs]
group_legends = [summary_name(x.extract()[0], split_selector) for x in group_selectors]
to_plot = []
for (group_selector, group_legend) in zip(group_selectors, group_legends):
filtered_data = group_selector.extract()
if len(filtered_data) > 0:
progresses = [exp.progress.get(plot_key, np.array([np.nan])) for exp in filtered_data]
sizes = list(map(len, progresses))
max_size = max(sizes)
progresses = [np.concatenate([ps, np.ones(max_size - len(ps)) * np.nan]) for ps in progresses]
if x_plot_key == '(default)':
xs = np.arange(max_size)
else:
all_xs = np.unique(np.sort(np.concatenate([d.progress.get(x_plot_key, []) for d in filtered_data])))
interp_progresses = []
for d in filtered_data:
if x_plot_key in d.progress:
assert plot_key in d.progress
interp_progresses.append(np.interp(all_xs, d.progress[x_plot_key], d.progress[plot_key], right=np.nan))
else:
continue
progresses = interp_progresses
xs = all_xs
if display_mode == 'mean_std':
means = np.nanmean(progresses, axis=0)
stds = np.nanstd(progresses, axis=0)
to_plot.append(AttrDict(means=means, stds=stds, legend=group_legend, xs=xs, display_mode=display_mode))
elif display_mode == 'mean_se':
means = np.nanmean(progresses, axis=0)
ses = np.nanstd(progresses, axis=0) / np.sqrt(np.sum(1 - np.isnan(progresses), axis=0))
to_plot.append(AttrDict(means=means, ses=ses, legend=group_legend, xs=xs, display_mode=display_mode))
elif display_mode == 'individual':
to_plot.append(AttrDict(xs=xs, ys=progresses, legend=group_legend, display_mode=display_mode))
else:
raise NotImplementedError
if len(to_plot) > 0:
fig_title = '%s: %s' % (split_key, split_legend)
plots.append(make_plot(to_plot, title=fig_title))
counter += 1
plot_div = '\n'.join(plots)
return plot_div
|
Deep-RL-Bootcamp-Labs
|
positive
|
def finalize_parameters(bmk):
"""Utility to parse parameters in common as well as parameters
particular to each benchmark.
Parameters
----------
bmk : benchmark object
Object that has benchmark filepaths and specifications
Return
----------
gParameters : python dictionary
Dictionary with all the parameters necessary to run the benchmark.
Command line overwrites config file specifications
"""
bmk.parse_from_common()
bmk.parse_from_benchmark()
aux = bmk.parser.parse_known_args()
try:
conffile_txt = aux[0].config_file
except AttributeError:
conffile = bmk.conffile
else:
conffile = os.path.join(bmk.file_path, conffile_txt)
fileParameters = bmk.read_config_file(conffile)
args = bmk.parser.parse_args()
bmk_dict = bmk.additional_definitions
<DeepExtract>
args_dict = vars(args)
args_set = set(args_dict.keys())
bmk_keys = []
for item in bmk_dict:
bmk_keys.append(item['name'])
bmk_set = set(bmk_keys)
candle_set = set(PARAMETERS_CANDLE)
candle_set = candle_set.union(args_set)
candle_set = candle_set.union(bmk_set)
file_set = set(fileParameters.keys())
diff_set = file_set.difference(candle_set)
if len(diff_set) > 0:
message = 'These keywords used in the configuration file are not defined in CANDLE: ' + str(sorted(diff_set))
warnings.warn(message, RuntimeWarning)
</DeepExtract>
<DeepExtract>
params = fileParameters
args_dict = vars(args)
for key in args_dict.keys():
params[key] = args_dict[key]
if 'data_type' not in params:
params['data_type'] = DEFAULT_DATATYPE
elif params['data_type'] in set(['f16', 'f32', 'f64']):
params['data_type'] = get_choice(params['datatype'])
if 'output_dir' not in params:
params['output_dir'] = directory_from_parameters(params)
else:
params['output_dir'] = directory_from_parameters(params, params['output_dir'])
if 'rng_seed' not in params:
params['rng_seed'] = DEFAULT_SEED
if 'timeout' not in params:
params['timeout'] = DEFAULT_TIMEOUT
gParameters = params
</DeepExtract>
bmk.check_required_exists(gParameters)
print('Params:')
pprint(gParameters)
<DeepExtract>
key_set = set(gParameters.keys())
for flag_list in CONFLICT_LIST:
flag_count = 0
for i in flag_list:
if i in key_set:
if gParameters[i] is True:
flag_count += 1
if flag_count > 1:
raise Exception('ERROR ! Conflict in flag specification. These flags should not be used together: ' + str(sorted(flag_list)) + '... Exiting')
</DeepExtract>
return gParameters
|
def finalize_parameters(bmk):
"""Utility to parse parameters in common as well as parameters
particular to each benchmark.
Parameters
----------
bmk : benchmark object
Object that has benchmark filepaths and specifications
Return
----------
gParameters : python dictionary
Dictionary with all the parameters necessary to run the benchmark.
Command line overwrites config file specifications
"""
bmk.parse_from_common()
bmk.parse_from_benchmark()
aux = bmk.parser.parse_known_args()
try:
conffile_txt = aux[0].config_file
except AttributeError:
conffile = bmk.conffile
else:
conffile = os.path.join(bmk.file_path, conffile_txt)
fileParameters = bmk.read_config_file(conffile)
args = bmk.parser.parse_args()
bmk_dict = bmk.additional_definitions
args_dict = vars(args)
args_set = set(args_dict.keys())
bmk_keys = []
for item in bmk_dict:
bmk_keys.append(item['name'])
bmk_set = set(bmk_keys)
candle_set = set(PARAMETERS_CANDLE)
candle_set = candle_set.union(args_set)
candle_set = candle_set.union(bmk_set)
file_set = set(fileParameters.keys())
diff_set = file_set.difference(candle_set)
if len(diff_set) > 0:
message = 'These keywords used in the configuration file are not defined in CANDLE: ' + str(sorted(diff_set))
warnings.warn(message, RuntimeWarning)
params = fileParameters
args_dict = vars(args)
for key in args_dict.keys():
params[key] = args_dict[key]
if 'data_type' not in params:
params['data_type'] = DEFAULT_DATATYPE
elif params['data_type'] in set(['f16', 'f32', 'f64']):
params['data_type'] = get_choice(params['datatype'])
if 'output_dir' not in params:
params['output_dir'] = directory_from_parameters(params)
else:
params['output_dir'] = directory_from_parameters(params, params['output_dir'])
if 'rng_seed' not in params:
params['rng_seed'] = DEFAULT_SEED
if 'timeout' not in params:
params['timeout'] = DEFAULT_TIMEOUT
gParameters = params
bmk.check_required_exists(gParameters)
print('Params:')
pprint(gParameters)
key_set = set(gParameters.keys())
for flag_list in CONFLICT_LIST:
flag_count = 0
for i in flag_list:
if i in key_set:
if gParameters[i] is True:
flag_count += 1
if flag_count > 1:
raise Exception('ERROR ! Conflict in flag specification. These flags should not be used together: ' + str(sorted(flag_list)) + '... Exiting')
return gParameters
|
Benchmarks
|
positive
|
def ply_to_obj(ply_path, obj_path, texture_size=(1024, 1024)):
ply_path = Path(ply_path)
obj_path = Path(obj_path)
ply_copied_path = obj_path.parent / ply_path.name
is_same = ply_copied_path == ply_path
if not is_same:
shutil.copy(ply_path, ply_copied_path)
ply = PlyData.read(ply_path)
ply_texture = None
for c in ply.comments:
if 'TextureFile' in c:
ply_texture = c.split(' ')[-1]
if ply_texture is None:
<DeepExtract>
template_path = Path(__file__).resolve().parent / 'meshlab_templates' / 'template_vertexcolor_to_texture.mlx'
template = template_path.read_text()
</DeepExtract>
out_texture_path = obj_path.with_suffix('').name + '_texture.png'
script = template.format(out_texture_path=out_texture_path)
<DeepExtract>
ply_copied_path = Path(ply_copied_path)
obj_path = Path(obj_path)
n = np.random.randint(1000000.0)
script_path = Path(f'/dev/shm/{n}.mlx')
script_path.write_text(script)
if obj_path.parent is None:
obj_path.parent = '.'
command = [f'cd {obj_path.parent} &&', 'LC_ALL=C', 'meshlabserver', '-i', ply_copied_path.as_posix(), '-o', obj_path.as_posix(), '-s', script_path.as_posix(), '-om', 'vn']
if has_textures:
command += ['wt', 'vt']
print(command)
os.system(' '.join(command))
script_path.unlink()
return
</DeepExtract>
else:
<DeepExtract>
template_path = Path(__file__).resolve().parent / 'meshlab_templates' / 'template_ply_texture_to_obj.mlx'
template = template_path.read_text()
</DeepExtract>
script = template
ply_texture_name = ply_texture.split('.')[0]
out_texture_path = obj_path.parent / (ply_texture_name + '_texture.png')
shutil.copy(ply_path.parent / ply_texture, out_texture_path)
Image.open(out_texture_path).resize(texture_size, resample=PIL.Image.BILINEAR).save(out_texture_path)
<DeepExtract>
ply_path = Path(ply_path)
obj_path = Path(obj_path)
n = np.random.randint(1000000.0)
script_path = Path(f'/dev/shm/{n}.mlx')
script_path.write_text(template)
if cd_dir is None:
cd_dir = '.'
command = [f'cd {cd_dir} &&', 'LC_ALL=C', 'meshlabserver', '-i', ply_path.as_posix(), '-o', obj_path.as_posix(), '-s', script_path.as_posix(), '-om', 'vn']
if has_textures:
command += ['wt', 'vt']
print(command)
os.system(' '.join(command))
script_path.unlink()
return
</DeepExtract>
<DeepExtract>
obj_path = Path(obj_path)
texture_name = obj_path.with_suffix('').name + '_texture.png'
mtl_path = obj_path.with_suffix('.obj.mtl')
mtl = mtl_path.read_text()
mtl += f'\nmap_Kd {texture_name}'
mtl_path.write_text(mtl)
return
</DeepExtract>
if not is_same:
ply_copied_path.unlink()
return
|
def ply_to_obj(ply_path, obj_path, texture_size=(1024, 1024)):
ply_path = Path(ply_path)
obj_path = Path(obj_path)
ply_copied_path = obj_path.parent / ply_path.name
is_same = ply_copied_path == ply_path
if not is_same:
shutil.copy(ply_path, ply_copied_path)
ply = PlyData.read(ply_path)
ply_texture = None
for c in ply.comments:
if 'TextureFile' in c:
ply_texture = c.split(' ')[-1]
if ply_texture is None:
template_path = Path(__file__).resolve().parent / 'meshlab_templates' / 'template_vertexcolor_to_texture.mlx'
template = template_path.read_text()
out_texture_path = obj_path.with_suffix('').name + '_texture.png'
script = template.format(out_texture_path=out_texture_path)
ply_copied_path = Path(ply_copied_path)
obj_path = Path(obj_path)
n = np.random.randint(1000000.0)
script_path = Path(f'/dev/shm/{n}.mlx')
script_path.write_text(script)
if obj_path.parent is None:
obj_path.parent = '.'
command = [f'cd {obj_path.parent} &&', 'LC_ALL=C', 'meshlabserver', '-i', ply_copied_path.as_posix(), '-o', obj_path.as_posix(), '-s', script_path.as_posix(), '-om', 'vn']
if has_textures:
command += ['wt', 'vt']
print(command)
os.system(' '.join(command))
script_path.unlink()
return
else:
template_path = Path(__file__).resolve().parent / 'meshlab_templates' / 'template_ply_texture_to_obj.mlx'
template = template_path.read_text()
script = template
ply_texture_name = ply_texture.split('.')[0]
out_texture_path = obj_path.parent / (ply_texture_name + '_texture.png')
shutil.copy(ply_path.parent / ply_texture, out_texture_path)
Image.open(out_texture_path).resize(texture_size, resample=PIL.Image.BILINEAR).save(out_texture_path)
ply_path = Path(ply_path)
obj_path = Path(obj_path)
n = np.random.randint(1000000.0)
script_path = Path(f'/dev/shm/{n}.mlx')
script_path.write_text(template)
if cd_dir is None:
cd_dir = '.'
command = [f'cd {cd_dir} &&', 'LC_ALL=C', 'meshlabserver', '-i', ply_path.as_posix(), '-o', obj_path.as_posix(), '-s', script_path.as_posix(), '-om', 'vn']
if has_textures:
command += ['wt', 'vt']
print(command)
os.system(' '.join(command))
script_path.unlink()
return
obj_path = Path(obj_path)
texture_name = obj_path.with_suffix('').name + '_texture.png'
mtl_path = obj_path.with_suffix('.obj.mtl')
mtl = mtl_path.read_text()
mtl += f'\nmap_Kd {texture_name}'
mtl_path.write_text(mtl)
return
if not is_same:
ply_copied_path.unlink()
return
|
cosypose
|
positive
|
def test_fix_tree_non_destructive(self, mpshort_model):
<DeepExtract>
mpshort_model(path='4', depth=2, numchild=2, desc='a').save()
mpshort_model(path='13', depth=1000, numchild=0, desc='u').save()
mpshort_model(path='14', depth=4, numchild=500, desc='o').save()
mpshort_model(path='134', depth=321, numchild=543, desc='i').save()
mpshort_model(path='1343', depth=321, numchild=543, desc='e').save()
mpshort_model(path='42', depth=1, numchild=1, desc='a').save()
mpshort_model(path='43', depth=1000, numchild=0, desc='u').save()
mpshort_model(path='44', depth=4, numchild=500, desc='o').save()
mpshort_model(path='434', depth=321, numchild=543, desc='i').save()
mpshort_model(path='4343', depth=321, numchild=543, desc='e').save()
mpshort_model(path='41', depth=1, numchild=1, desc='a').save()
mpshort_model(path='3', depth=221, numchild=322, desc='g').save()
mpshort_model(path='1', depth=10, numchild=3, desc='b').save()
mpshort_model(path='2', depth=10, numchild=3, desc='d').save()
</DeepExtract>
mpshort_model.fix_tree(destructive=False)
<DeepExtract>
if mpshort_model in [models.NS_TestNode, models.NS_TestNode_Proxy]:
d = {}
for (tree_id, lft, rgt) in mpshort_model.objects.values_list('tree_id', 'lft', 'rgt'):
d.setdefault(tree_id, []).extend([lft, rgt])
for (tree_id, got_edges) in d.items():
assert len(got_edges) == max(got_edges)
good_edges = list(range(1, len(got_edges) + 1))
assert sorted(got_edges) == good_edges
got = [(o.desc, o.get_depth(), o.get_children_count()) for o in mpshort_model.get_tree()]
</DeepExtract>
expected = self.expected_with_holes[mpshort_model]
assert got == expected
mpshort_model.find_problems()
|
def test_fix_tree_non_destructive(self, mpshort_model):
mpshort_model(path='4', depth=2, numchild=2, desc='a').save()
mpshort_model(path='13', depth=1000, numchild=0, desc='u').save()
mpshort_model(path='14', depth=4, numchild=500, desc='o').save()
mpshort_model(path='134', depth=321, numchild=543, desc='i').save()
mpshort_model(path='1343', depth=321, numchild=543, desc='e').save()
mpshort_model(path='42', depth=1, numchild=1, desc='a').save()
mpshort_model(path='43', depth=1000, numchild=0, desc='u').save()
mpshort_model(path='44', depth=4, numchild=500, desc='o').save()
mpshort_model(path='434', depth=321, numchild=543, desc='i').save()
mpshort_model(path='4343', depth=321, numchild=543, desc='e').save()
mpshort_model(path='41', depth=1, numchild=1, desc='a').save()
mpshort_model(path='3', depth=221, numchild=322, desc='g').save()
mpshort_model(path='1', depth=10, numchild=3, desc='b').save()
mpshort_model(path='2', depth=10, numchild=3, desc='d').save()
mpshort_model.fix_tree(destructive=False)
if mpshort_model in [models.NS_TestNode, models.NS_TestNode_Proxy]:
d = {}
for (tree_id, lft, rgt) in mpshort_model.objects.values_list('tree_id', 'lft', 'rgt'):
d.setdefault(tree_id, []).extend([lft, rgt])
for (tree_id, got_edges) in d.items():
assert len(got_edges) == max(got_edges)
good_edges = list(range(1, len(got_edges) + 1))
assert sorted(got_edges) == good_edges
got = [(o.desc, o.get_depth(), o.get_children_count()) for o in mpshort_model.get_tree()]
expected = self.expected_with_holes[mpshort_model]
assert got == expected
mpshort_model.find_problems()
|
django-treebeard
|
positive
|
def get_metrics_dict() -> dict:
<DeepExtract>
metrics = {f'{metric}_total': 0 for metric in METRIC_NAMES}
</DeepExtract>
for metric in METRIC_NAMES:
metrics.update({f'{metric}_avg': 0.0, f'{metric}_max': 0, f'{metric}_min': sys.maxsize})
return metrics
|
def get_metrics_dict() -> dict:
metrics = {f'{metric}_total': 0 for metric in METRIC_NAMES}
for metric in METRIC_NAMES:
metrics.update({f'{metric}_avg': 0.0, f'{metric}_max': 0, f'{metric}_min': sys.maxsize})
return metrics
|
bugbug
|
positive
|
def subreddit_score_mean_and_std(idx):
<DeepExtract>
subreddits = {0: '100DaysofKeto', 1: 'AskMen', 2: 'AskMenOver30', 3: 'AskWomen', 4: 'AskWomenOver30', 5: 'LGBTeens', 6: 'OkCupid', 7: 'Tinder', 8: 'childfree', 9: 'fatlogic', 10: 'financialindependence', 11: 'infertility', 12: 'infj', 13: 'keto', 14: 'loseit', 15: 'proED', 16: 'sexover30', 17: 'short', 18: 'tall', 19: 'xxketo'}
subreddit = subreddits[idx]
</DeepExtract>
means = {'100DaysofKeto': 4.16580310880829, 'AskMen': 61.08446939321037, 'AskMenOver30': 19.640205077317457, 'AskWomen': 43.515366385795964, 'AskWomenOver30': 16.83549652882584, 'LGBTeens': 11.371757029672208, 'OkCupid': 7.912259406970695, 'Tinder': 54.55133819951338, 'childfree': 41.629965572033896, 'fatlogic': 61.86961140125697, 'financialindependence': 21.741568355308814, 'infertility': 4.269966555183946, 'infj': 6.30328120208525, 'keto': 6.335577664762253, 'loseit': 10.759423210333322, 'proED': 12.293228313157478, 'sexover30': 7.862483545414656, 'short': 6.966324530042671, 'tall': 14.139481707317072, 'xxketo': 6.622788654927554}
stds = {'100DaysofKeto': 4.257771265407754, 'AskMen': 358.85677169829677, 'AskMenOver30': 62.022953833297635, 'AskWomen': 203.28024082587, 'AskWomenOver30': 26.279682061246756, 'LGBTeens': 21.557311658010843, 'OkCupid': 16.683316882877435, 'Tinder': 329.3164620810436, 'childfree': 105.1919761318333, 'fatlogic': 115.06941068605869, 'financialindependence': 70.252278798125, 'infertility': 5.201437091946628, 'infj': 9.37623701272285, 'keto': 30.441002673493898, 'loseit': 69.05652111583404, 'proED': 20.152829147672076, 'sexover30': 14.017546840438202, 'short': 11.229113209821255, 'tall': 33.3192418530642, 'xxketo': 11.041996217862105}
return (means[subreddit], stds[subreddit])
|
def subreddit_score_mean_and_std(idx):
subreddits = {0: '100DaysofKeto', 1: 'AskMen', 2: 'AskMenOver30', 3: 'AskWomen', 4: 'AskWomenOver30', 5: 'LGBTeens', 6: 'OkCupid', 7: 'Tinder', 8: 'childfree', 9: 'fatlogic', 10: 'financialindependence', 11: 'infertility', 12: 'infj', 13: 'keto', 14: 'loseit', 15: 'proED', 16: 'sexover30', 17: 'short', 18: 'tall', 19: 'xxketo'}
subreddit = subreddits[idx]
means = {'100DaysofKeto': 4.16580310880829, 'AskMen': 61.08446939321037, 'AskMenOver30': 19.640205077317457, 'AskWomen': 43.515366385795964, 'AskWomenOver30': 16.83549652882584, 'LGBTeens': 11.371757029672208, 'OkCupid': 7.912259406970695, 'Tinder': 54.55133819951338, 'childfree': 41.629965572033896, 'fatlogic': 61.86961140125697, 'financialindependence': 21.741568355308814, 'infertility': 4.269966555183946, 'infj': 6.30328120208525, 'keto': 6.335577664762253, 'loseit': 10.759423210333322, 'proED': 12.293228313157478, 'sexover30': 7.862483545414656, 'short': 6.966324530042671, 'tall': 14.139481707317072, 'xxketo': 6.622788654927554}
stds = {'100DaysofKeto': 4.257771265407754, 'AskMen': 358.85677169829677, 'AskMenOver30': 62.022953833297635, 'AskWomen': 203.28024082587, 'AskWomenOver30': 26.279682061246756, 'LGBTeens': 21.557311658010843, 'OkCupid': 16.683316882877435, 'Tinder': 329.3164620810436, 'childfree': 105.1919761318333, 'fatlogic': 115.06941068605869, 'financialindependence': 70.252278798125, 'infertility': 5.201437091946628, 'infj': 9.37623701272285, 'keto': 30.441002673493898, 'loseit': 69.05652111583404, 'proED': 20.152829147672076, 'sexover30': 14.017546840438202, 'short': 11.229113209821255, 'tall': 33.3192418530642, 'xxketo': 11.041996217862105}
return (means[subreddit], stds[subreddit])
|
causal-text-embeddings
|
positive
|
@backend_skip
def test_experiment_run(self):
for case in self._test_cases:
with self.subTest(case=case):
<DeepExtract>
callbacks = list(case.pop('callbacks', []))
callbacks.append(LoggingCallback())
case['callbacks'] = callbacks
case = case
</DeepExtract>
with self.assertLogs(callback_logger, 'INFO') as cm:
<DeepExtract>
assert issubclass(self._experiment_cls, BaseExperiment)
exp = self._experiment_cls(config, network_cls, **kwargs)
dset_train = DummyDataset(len_train)
dset_test = DummyDataset(len_test)
dmgr_train = DataManager(dset_train, 16, 4, None)
dmgr_test = DataManager(dset_test, 16, 1, None)
return exp.run(dmgr_train, dmgr_test)
</DeepExtract>
for msg in self.logging_msg_run:
self.assertIn(msg, cm.output)
|
@backend_skip
def test_experiment_run(self):
for case in self._test_cases:
with self.subTest(case=case):
callbacks = list(case.pop('callbacks', []))
callbacks.append(LoggingCallback())
case['callbacks'] = callbacks
case = case
with self.assertLogs(callback_logger, 'INFO') as cm:
assert issubclass(self._experiment_cls, BaseExperiment)
exp = self._experiment_cls(config, network_cls, **kwargs)
dset_train = DummyDataset(len_train)
dset_test = DummyDataset(len_test)
dmgr_train = DataManager(dset_train, 16, 4, None)
dmgr_test = DataManager(dset_test, 16, 1, None)
return exp.run(dmgr_train, dmgr_test)
for msg in self.logging_msg_run:
self.assertIn(msg, cm.output)
|
delira
|
positive
|
def test_prepare_to_run_command(self):
<DeepExtract>
cmd_mgr = commandmanager.CommandManager('cliff.tests')
command = mock.MagicMock(spec=c_cmd.Command)
command_inst = mock.MagicMock(spec=c_cmd.Command)
command_inst.run.return_value = 0
command.return_value = command_inst
cmd_mgr.add_command('mock', command)
err_command = mock.Mock(name='err_command', spec=c_cmd.Command)
err_command_inst = mock.Mock(spec=c_cmd.Command)
err_command_inst.run = mock.Mock(side_effect=RuntimeError('test exception'))
err_command.return_value = err_command_inst
cmd_mgr.add_command('error', err_command)
interrupt_command = mock.Mock(name='interrupt_command', spec=c_cmd.Command)
interrupt_command_inst = mock.Mock(spec=c_cmd.Command)
interrupt_command_inst.run = mock.Mock(side_effect=KeyboardInterrupt)
interrupt_command.return_value = interrupt_command_inst
cmd_mgr.add_command('interrupt', interrupt_command)
pipeclose_command = mock.Mock(name='pipeclose_command', spec=c_cmd.Command)
pipeclose_command_inst = mock.Mock(spec=c_cmd.Command)
pipeclose_command_inst.run = mock.Mock(side_effect=BrokenPipeError)
pipeclose_command.return_value = pipeclose_command_inst
cmd_mgr.add_command('pipe-close', pipeclose_command)
app = application.App('testing interactive mode', '1', cmd_mgr, stderr=mock.Mock(), **kwargs)
(app, command) = (app, command)
</DeepExtract>
app.prepare_to_run_command = mock.MagicMock(name='prepare_to_run_command')
app.run(['mock'])
app.prepare_to_run_command.assert_called_once_with(command())
|
def test_prepare_to_run_command(self):
cmd_mgr = commandmanager.CommandManager('cliff.tests')
command = mock.MagicMock(spec=c_cmd.Command)
command_inst = mock.MagicMock(spec=c_cmd.Command)
command_inst.run.return_value = 0
command.return_value = command_inst
cmd_mgr.add_command('mock', command)
err_command = mock.Mock(name='err_command', spec=c_cmd.Command)
err_command_inst = mock.Mock(spec=c_cmd.Command)
err_command_inst.run = mock.Mock(side_effect=RuntimeError('test exception'))
err_command.return_value = err_command_inst
cmd_mgr.add_command('error', err_command)
interrupt_command = mock.Mock(name='interrupt_command', spec=c_cmd.Command)
interrupt_command_inst = mock.Mock(spec=c_cmd.Command)
interrupt_command_inst.run = mock.Mock(side_effect=KeyboardInterrupt)
interrupt_command.return_value = interrupt_command_inst
cmd_mgr.add_command('interrupt', interrupt_command)
pipeclose_command = mock.Mock(name='pipeclose_command', spec=c_cmd.Command)
pipeclose_command_inst = mock.Mock(spec=c_cmd.Command)
pipeclose_command_inst.run = mock.Mock(side_effect=BrokenPipeError)
pipeclose_command.return_value = pipeclose_command_inst
cmd_mgr.add_command('pipe-close', pipeclose_command)
app = application.App('testing interactive mode', '1', cmd_mgr, stderr=mock.Mock(), **kwargs)
(app, command) = (app, command)
app.prepare_to_run_command = mock.MagicMock(name='prepare_to_run_command')
app.run(['mock'])
app.prepare_to_run_command.assert_called_once_with(command())
|
cliff
|
positive
|
def test_dispatch_disconnect_event_abnormal_disconnect(self):
<DeepExtract>
self.client_status.set_status(ClientStatus.STABLE)
self._fill_in_fake_events([self._create_disconnect_event()])
self.load_mocks_into_test_target()
</DeepExtract>
<DeepExtract>
self.event_consumer.start()
time.sleep(1)
</DeepExtract>
expected_values = {KEY_CLIENT_STATUS_AFTER: ClientStatus.ABNORMAL_DISCONNECT, KEY_STOP_BG_NW_IO_CALL_COUNT: 0, KEY_CLEAN_UP_EVENT_CBS_CALL_COUNT: 0, KEY_IS_EVENT_CONSUMER_UP: True}
<DeepExtract>
client_status_after = expected_values.get(KEY_CLIENT_STATUS_AFTER)
stop_background_network_io_call_count = expected_values.get(KEY_STOP_BG_NW_IO_CALL_COUNT)
clean_up_event_callbacks_call_count = expected_values.get(KEY_CLEAN_UP_EVENT_CBS_CALL_COUNT)
is_event_queue_empty = expected_values.get(KEY_IS_EVENT_Q_EMPTY)
is_event_consumer_running = expected_values.get(KEY_IS_EVENT_CONSUMER_UP)
if client_status_after is not None:
assert self.client_status.get_status() == client_status_after
if stop_background_network_io_call_count is not None:
assert self.internal_async_client.stop_background_network_io.call_count == stop_background_network_io_call_count
if clean_up_event_callbacks_call_count is not None:
assert self.internal_async_client.clean_up_event_callbacks.call_count == clean_up_event_callbacks_call_count
if is_event_queue_empty is not None:
assert self.event_queue.empty() == is_event_queue_empty
if is_event_consumer_running is not None:
assert self.event_consumer.is_running() == is_event_consumer_running
self.internal_async_client.invoke_event_callback.assert_called_once_with(FixedEventMids.DISCONNECT_MID, data=DUMMY_SUCCESS_RC)
</DeepExtract>
assert self.event_consumer.is_fully_stopped() is False
|
def test_dispatch_disconnect_event_abnormal_disconnect(self):
self.client_status.set_status(ClientStatus.STABLE)
self._fill_in_fake_events([self._create_disconnect_event()])
self.load_mocks_into_test_target()
self.event_consumer.start()
time.sleep(1)
expected_values = {KEY_CLIENT_STATUS_AFTER: ClientStatus.ABNORMAL_DISCONNECT, KEY_STOP_BG_NW_IO_CALL_COUNT: 0, KEY_CLEAN_UP_EVENT_CBS_CALL_COUNT: 0, KEY_IS_EVENT_CONSUMER_UP: True}
client_status_after = expected_values.get(KEY_CLIENT_STATUS_AFTER)
stop_background_network_io_call_count = expected_values.get(KEY_STOP_BG_NW_IO_CALL_COUNT)
clean_up_event_callbacks_call_count = expected_values.get(KEY_CLEAN_UP_EVENT_CBS_CALL_COUNT)
is_event_queue_empty = expected_values.get(KEY_IS_EVENT_Q_EMPTY)
is_event_consumer_running = expected_values.get(KEY_IS_EVENT_CONSUMER_UP)
if client_status_after is not None:
assert self.client_status.get_status() == client_status_after
if stop_background_network_io_call_count is not None:
assert self.internal_async_client.stop_background_network_io.call_count == stop_background_network_io_call_count
if clean_up_event_callbacks_call_count is not None:
assert self.internal_async_client.clean_up_event_callbacks.call_count == clean_up_event_callbacks_call_count
if is_event_queue_empty is not None:
assert self.event_queue.empty() == is_event_queue_empty
if is_event_consumer_running is not None:
assert self.event_consumer.is_running() == is_event_consumer_running
self.internal_async_client.invoke_event_callback.assert_called_once_with(FixedEventMids.DISCONNECT_MID, data=DUMMY_SUCCESS_RC)
assert self.event_consumer.is_fully_stopped() is False
|
aws-iot-device-sdk-python
|
positive
|
def compare(self, type, id, v1, v2, context_lines):
if type in ('result', 'device_result'):
first = vs.dict_to_string(getattr(db.fetch('result', id=v1), 'result'))
second = vs.dict_to_string(getattr(db.fetch('result', id=v2), 'result'))
else:
device = db.fetch('device', id=id)
<DeepExtract>
(commit, result) = (Repo(vs.path / 'network_data').commit(v1), {})
device = db.fetch('device', name=device.name, rbac='configuration')
for property in vs.configuration_properties:
try:
file = commit.tree / device.name / property
with BytesIO(file.data_stream.read()) as f:
value = f.read().decode('utf-8')
result[property] = vs.custom.parse_configuration_property(device, property, value)
except KeyError:
result[property] = ''
result1 = {'result': result, 'datetime': commit.committed_datetime}
</DeepExtract>
<DeepExtract>
(commit, result) = (Repo(vs.path / 'network_data').commit(v2), {})
device = db.fetch('device', name=device.name, rbac='configuration')
for property in vs.configuration_properties:
try:
file = commit.tree / device.name / property
with BytesIO(file.data_stream.read()) as f:
value = f.read().decode('utf-8')
result[property] = vs.custom.parse_configuration_property(device, property, value)
except KeyError:
result[property] = ''
result2 = {'result': result, 'datetime': commit.committed_datetime}
</DeepExtract>
(v1, v2) = (result1['datetime'], result2['datetime'])
(first, second) = (result1['result'][type], result2['result'][type])
return '\n'.join(unified_diff(first.splitlines(), second.splitlines(), fromfile=f'V1 ({v1})', tofile=f'V2 ({v2})', lineterm='', n=int(context_lines)))
|
def compare(self, type, id, v1, v2, context_lines):
if type in ('result', 'device_result'):
first = vs.dict_to_string(getattr(db.fetch('result', id=v1), 'result'))
second = vs.dict_to_string(getattr(db.fetch('result', id=v2), 'result'))
else:
device = db.fetch('device', id=id)
(commit, result) = (Repo(vs.path / 'network_data').commit(v1), {})
device = db.fetch('device', name=device.name, rbac='configuration')
for property in vs.configuration_properties:
try:
file = commit.tree / device.name / property
with BytesIO(file.data_stream.read()) as f:
value = f.read().decode('utf-8')
result[property] = vs.custom.parse_configuration_property(device, property, value)
except KeyError:
result[property] = ''
result1 = {'result': result, 'datetime': commit.committed_datetime}
(commit, result) = (Repo(vs.path / 'network_data').commit(v2), {})
device = db.fetch('device', name=device.name, rbac='configuration')
for property in vs.configuration_properties:
try:
file = commit.tree / device.name / property
with BytesIO(file.data_stream.read()) as f:
value = f.read().decode('utf-8')
result[property] = vs.custom.parse_configuration_property(device, property, value)
except KeyError:
result[property] = ''
result2 = {'result': result, 'datetime': commit.committed_datetime}
(v1, v2) = (result1['datetime'], result2['datetime'])
(first, second) = (result1['result'][type], result2['result'][type])
return '\n'.join(unified_diff(first.splitlines(), second.splitlines(), fromfile=f'V1 ({v1})', tofile=f'V2 ({v2})', lineterm='', n=int(context_lines)))
|
eNMS
|
positive
|
def test_interpolate_3d_cubic_extrapolate_linear_xsupymidzsup(self):
"""3D cubic interpolation. Test values in the extrapolation area with x above and y inside and z above the interpolation area.
"""
<DeepExtract>
if x is None:
x = self.x
if y is None:
y = self.y
if z is None:
z = self.z
if data is None:
data = self.data
self.interp_data = data_file.cubic_interpolated_data
self.extrap_data_nea = data_file.cubic_nearest_extrapolated_data
self.extrap_data_lin = data_file.cubic_linear_extrapolated_data
self.extrap_data_qua = data_file.cubic_quadratic_extrapolated_data
self.interp_func = interpolators3d.Interpolate3DCubic(x, y, z, data, extrapolate=True, extrapolation_range=10, extrapolation_type='linear', tolerate_single_value=tolerate_single_value)
</DeepExtract>
<DeepExtract>
(mini, maxi) = self.extrapol_xdomains[2]
(minj, maxj) = self.extrapol_ydomains[1]
(mink, maxk) = self.extrapol_zdomains[2]
for iex in range(mini, maxi):
for jex in range(minj, maxj):
for kex in range(mink, maxk):
self.assertAlmostEqual(self.interp_func(self.xsamples_ex[iex], self.ysamples_ex[jex], self.zsamples_ex[kex]), self.extrap_data_lin[2][1][2][iex - mini, jex - minj, kex - mink], delta=1e-08)
</DeepExtract>
|
def test_interpolate_3d_cubic_extrapolate_linear_xsupymidzsup(self):
"""3D cubic interpolation. Test values in the extrapolation area with x above and y inside and z above the interpolation area.
"""
if x is None:
x = self.x
if y is None:
y = self.y
if z is None:
z = self.z
if data is None:
data = self.data
self.interp_data = data_file.cubic_interpolated_data
self.extrap_data_nea = data_file.cubic_nearest_extrapolated_data
self.extrap_data_lin = data_file.cubic_linear_extrapolated_data
self.extrap_data_qua = data_file.cubic_quadratic_extrapolated_data
self.interp_func = interpolators3d.Interpolate3DCubic(x, y, z, data, extrapolate=True, extrapolation_range=10, extrapolation_type='linear', tolerate_single_value=tolerate_single_value)
(mini, maxi) = self.extrapol_xdomains[2]
(minj, maxj) = self.extrapol_ydomains[1]
(mink, maxk) = self.extrapol_zdomains[2]
for iex in range(mini, maxi):
for jex in range(minj, maxj):
for kex in range(mink, maxk):
self.assertAlmostEqual(self.interp_func(self.xsamples_ex[iex], self.ysamples_ex[jex], self.zsamples_ex[kex]), self.extrap_data_lin[2][1][2][iex - mini, jex - minj, kex - mink], delta=1e-08)
</DeepExtract>
|
core
|
positive
|
def hypertrain(model, trainer, data, nfold=2, allfolds=True, outdir=None, nsample=20, devices=None, verbose=None, report_class=None, auxfilter=None):
if report_class is None:
report_class = training_report
if outdir is None:
outdir = join(tempfile.gettempdir(), 'hypertrain')
<DeepExtract>
model_hparams = []
def collect_hparam(path, attr):
if isinstance(attr, hp.paramdef):
attr.name = 'model:' + path
model_hparams.append(attr)
model.visit(collect_hparam)
trainer_hparams = []
for (name, attr) in trainer.__dict__.iteritems():
if isinstance(attr, hp.paramdef):
attr.name = 'trainer:' + name
trainer_hparams.append(attr)
space = hp.space(trainer_hparams + model_hparams)
</DeepExtract>
logging.info('calibrating...')
samples = hp.search(space, objective=hypertrain_worker, objective_initargs=(model, trainer, data, nfold, allfolds, outdir, report_class, devices, False, sm.get_default_dtype(), globals.flags, auxfilter, 'calib', False), task_ids=data.targetnames, nsample=nsample, nprocess=len(devices), nsample_per_process=15, print_progress=True)
logging.info('...calibrating done')
return samples
|
def hypertrain(model, trainer, data, nfold=2, allfolds=True, outdir=None, nsample=20, devices=None, verbose=None, report_class=None, auxfilter=None):
if report_class is None:
report_class = training_report
if outdir is None:
outdir = join(tempfile.gettempdir(), 'hypertrain')
model_hparams = []
def collect_hparam(path, attr):
if isinstance(attr, hp.paramdef):
attr.name = 'model:' + path
model_hparams.append(attr)
model.visit(collect_hparam)
trainer_hparams = []
for (name, attr) in trainer.__dict__.iteritems():
if isinstance(attr, hp.paramdef):
attr.name = 'trainer:' + name
trainer_hparams.append(attr)
space = hp.space(trainer_hparams + model_hparams)
logging.info('calibrating...')
samples = hp.search(space, objective=hypertrain_worker, objective_initargs=(model, trainer, data, nfold, allfolds, outdir, report_class, devices, False, sm.get_default_dtype(), globals.flags, auxfilter, 'calib', False), task_ids=data.targetnames, nsample=nsample, nprocess=len(devices), nsample_per_process=15, print_progress=True)
logging.info('...calibrating done')
return samples
|
DeepBind
|
positive
|
def phone_address_risk_scoring(self, valid, active):
"""method to create calculate verdict for Phone Number"""
risk_criticality = ''
if valid == 'False':
risk_criticality = self.medium
elif active == 'False':
risk_criticality = self.medium
elif 90 <= self.score <= 100:
risk_criticality = self.high
elif 80 <= self.score <= 89:
risk_criticality = self.low
elif 50 <= self.score <= 79:
risk_criticality = self.suspicious
elif self.score <= 49:
risk_criticality = self.clean
<DeepExtract>
mapper = {self.clean: self.rf_grey, self.low: self.rf_grey, self.medium: self.rf_yellow, self.suspicious: self.rf_yellow, self.high: self.rf_red, self.critical: self.rf_red, self.invalid: self.rf_red, self.disposable: self.rf_red, self.malware: self.rf_red, self.phishing: self.rf_red}
hex_color = mapper.get(risk_criticality, self.rf_white)
</DeepExtract>
tag_name = f'IPQS:VERDICT="{risk_criticality}"'
return self.update_labels(tag_name, hex_color)
|
def phone_address_risk_scoring(self, valid, active):
"""method to create calculate verdict for Phone Number"""
risk_criticality = ''
if valid == 'False':
risk_criticality = self.medium
elif active == 'False':
risk_criticality = self.medium
elif 90 <= self.score <= 100:
risk_criticality = self.high
elif 80 <= self.score <= 89:
risk_criticality = self.low
elif 50 <= self.score <= 79:
risk_criticality = self.suspicious
elif self.score <= 49:
risk_criticality = self.clean
mapper = {self.clean: self.rf_grey, self.low: self.rf_grey, self.medium: self.rf_yellow, self.suspicious: self.rf_yellow, self.high: self.rf_red, self.critical: self.rf_red, self.invalid: self.rf_red, self.disposable: self.rf_red, self.malware: self.rf_red, self.phishing: self.rf_red}
hex_color = mapper.get(risk_criticality, self.rf_white)
tag_name = f'IPQS:VERDICT="{risk_criticality}"'
return self.update_labels(tag_name, hex_color)
|
connectors
|
positive
|
def render_to_file(self, filepath):
"""Overrides parent class implementation"""
<DeepExtract>
for lamp in self.lamps:
lamp.turn_off()
self.set_num_lamps(self.num_lamps.r)
num_active_lamps = self.num_lamps.sample_param()
if num_active_lamps < 0:
raise ValueError('number of lamps negative! aborting')
for l in range(num_active_lamps):
lamp = self.lamps[l]
lamp.turn_on()
self.random_lighting_conditions(lamp)
(x, y, z) = self.camera_loc.sample_param()
r = self.camera_radius.sample_param()
if r < 0:
raise ValueError('camera distance negative! aborting')
loc = (r * x, r * y, r * z)
self.camera.set_location(*loc)
self.camera.face_towards(0.0, 0.0, 0.0)
spin_angle = self.spin_angle.sample_param()
self.camera.spin(spin_angle)
self.subject.set_mesh_bbvol(self.subject_size.sample_param())
if self.subject_bot is None:
return
subject_radius = self.subject.get_scale()[0] * self.subject.compute_mesh_bbvol_diagonal()
r = r + subject_radius
loc = (r * x, r * y, r * z)
if z >= 0.0:
self.subject.set_location(0.0, 0.0, 0.0)
self.subject_bot.set_location(*loc)
elif z < 0.0:
self.subject_bot.set_location(0.0, 0.0, 0.0)
self.subject.set_location(*loc)
</DeepExtract>
self.data.render.filepath = filepath
bpy.ops.render.render(write_still=True)
|
def render_to_file(self, filepath):
"""Overrides parent class implementation"""
for lamp in self.lamps:
lamp.turn_off()
self.set_num_lamps(self.num_lamps.r)
num_active_lamps = self.num_lamps.sample_param()
if num_active_lamps < 0:
raise ValueError('number of lamps negative! aborting')
for l in range(num_active_lamps):
lamp = self.lamps[l]
lamp.turn_on()
self.random_lighting_conditions(lamp)
(x, y, z) = self.camera_loc.sample_param()
r = self.camera_radius.sample_param()
if r < 0:
raise ValueError('camera distance negative! aborting')
loc = (r * x, r * y, r * z)
self.camera.set_location(*loc)
self.camera.face_towards(0.0, 0.0, 0.0)
spin_angle = self.spin_angle.sample_param()
self.camera.spin(spin_angle)
self.subject.set_mesh_bbvol(self.subject_size.sample_param())
if self.subject_bot is None:
return
subject_radius = self.subject.get_scale()[0] * self.subject.compute_mesh_bbvol_diagonal()
r = r + subject_radius
loc = (r * x, r * y, r * z)
if z >= 0.0:
self.subject.set_location(0.0, 0.0, 0.0)
self.subject_bot.set_location(*loc)
elif z < 0.0:
self.subject_bot.set_location(0.0, 0.0, 0.0)
self.subject.set_location(*loc)
self.data.render.filepath = filepath
bpy.ops.render.render(write_still=True)
|
3d-dl
|
positive
|
def test_update_missing_fixed_rev(self):
"""[offline mode] update on a rev that we don't have raises."""
<DeepExtract>
target_dir = os.path.join(self.dst_dir, 'default')
repo = HgRepo(target_dir, self.src_repo)
repo('default')
repo = repo
</DeepExtract>
self.assertRaises(UpdateError, repo, self.rev1)
self.assertRaises(UpdateError, repo, 'future')
|
def test_update_missing_fixed_rev(self):
"""[offline mode] update on a rev that we don't have raises."""
target_dir = os.path.join(self.dst_dir, 'default')
repo = HgRepo(target_dir, self.src_repo)
repo('default')
repo = repo
self.assertRaises(UpdateError, repo, self.rev1)
self.assertRaises(UpdateError, repo, 'future')
|
anybox.recipe.odoo
|
positive
|
def _loadContract(contract):
if not contract:
return
def load(key):
try:
return contract[key]
except KeyError:
print('Contract JSON missing key: %s' % key)
return None
<DeepExtract>
try:
bytecode = contract['bin-runtime']
except KeyError:
print('Contract JSON missing key: %s' % 'bin-runtime')
bytecode = None
</DeepExtract>
if bytecode:
self.binRuntime = bytecode
self.insRuntime = parseCode(bytecode)
<DeepExtract>
try:
bytecode = contract['bin']
except KeyError:
print('Contract JSON missing key: %s' % 'bin')
bytecode = None
</DeepExtract>
if bytecode:
self.bin = bytecode
self.ins = parseCode(bytecode)
<DeepExtract>
mapping = []
if load('srcmap-runtime') is None:
self.mappingRuntime = mapping
entries = load('srcmap-runtime').split(';')
m = ['', '', '', '']
for e in entries:
vals = e.split(':')
m = update(m, vals)
mapping.append(m)
self.mappingRuntime = mapping
</DeepExtract>
<DeepExtract>
mapping = []
if load('srcmap') is None:
self.mapping = mapping
entries = load('srcmap').split(';')
m = ['', '', '', '']
for e in entries:
vals = e.split(':')
m = update(m, vals)
mapping.append(m)
self.mapping = mapping
</DeepExtract>
|
def _loadContract(contract):
if not contract:
return
def load(key):
try:
return contract[key]
except KeyError:
print('Contract JSON missing key: %s' % key)
return None
try:
bytecode = contract['bin-runtime']
except KeyError:
print('Contract JSON missing key: %s' % 'bin-runtime')
bytecode = None
if bytecode:
self.binRuntime = bytecode
self.insRuntime = parseCode(bytecode)
try:
bytecode = contract['bin']
except KeyError:
print('Contract JSON missing key: %s' % 'bin')
bytecode = None
if bytecode:
self.bin = bytecode
self.ins = parseCode(bytecode)
mapping = []
if load('srcmap-runtime') is None:
self.mappingRuntime = mapping
entries = load('srcmap-runtime').split(';')
m = ['', '', '', '']
for e in entries:
vals = e.split(':')
m = update(m, vals)
mapping.append(m)
self.mappingRuntime = mapping
mapping = []
if load('srcmap') is None:
self.mapping = mapping
entries = load('srcmap').split(';')
m = ['', '', '', '']
for e in entries:
vals = e.split(':')
m = update(m, vals)
mapping.append(m)
self.mapping = mapping
</DeepExtract>
|
evmlab
|
positive
|
def train(self, iInputDS, iTime, iSignal, iHorizon, iTransformation):
logger = tsutil.get_pyaf_logger()
start_time = time.time()
<DeepExtract>
assert iInputDS.shape[0] > 0
assert iInputDS.shape[1] > 0
assert iTime in iInputDS.columns
assert iSignal in iInputDS.columns
self.mTime = iTime
self.mOriginalSignal = iSignal
self.mTransformation = iTransformation
self.mTransformation.mOriginalSignal = iSignal
lMissingImputer = tsmiss.cMissingDataImputer()
lMissingImputer.mOptions = self.mOptions
lSignal = lMissingImputer.interpolate_signal_if_needed(iInputDS, iSignal)
lTime = lMissingImputer.interpolate_time_if_needed(iInputDS, iTime)
self.mTransformation.fit(lSignal)
self.mSignal = iTransformation.get_name(iSignal)
self.mHorizon = iHorizon
self.mSignalFrame = pd.DataFrame()
self.mSignalFrame[self.mTime] = lTime
self.mSignalFrame[self.mOriginalSignal] = lSignal
self.mSignalFrame[self.mSignal] = self.mTransformation.apply(lSignal)
self.mSignalFrame['row_number'] = np.arange(0, iInputDS.shape[0])
assert self.mSignalFrame.shape[0] > 0
self.mSplit = tscut.cCuttingInfo()
self.mSplit.mTime = self.mTime
self.mSplit.mSignal = self.mSignal
self.mSplit.mOriginalSignal = self.mOriginalSignal
self.mSplit.mHorizon = self.mHorizon
self.mSplit.mSignalFrame = self.mSignalFrame
self.mSplit.mOptions = self.mOptions
self.mTimeInfo = tsti.cTimeInfo()
self.mTimeInfo.mTime = self.mTime
self.mTimeInfo.mSignal = self.mSignal
self.mTimeInfo.mOriginalSignal = self.mOriginalSignal
self.mTimeInfo.mHorizon = self.mHorizon
self.mTimeInfo.mSignalFrame = self.mSignalFrame
self.mTimeInfo.mOptions = self.mOptions
self.mTimeInfo.mSplit = self.mSplit
self.mExogenousInfo = None
if self.mExogenousData is not None:
self.mExogenousInfo = tsexog.cExogenousInfo()
self.mExogenousInfo.mExogenousData = self.mExogenousData
self.mExogenousInfo.mTimeInfo = self.mTimeInfo
self.mExogenousInfo.mOptions = self.mOptions
</DeepExtract>
<DeepExtract>
import gc
gc.collect()
</DeepExtract>
self.mSplit.estimate()
self.mTimeInfo.estimate()
exog_start_time = time.time()
if self.mExogenousInfo is not None:
self.mExogenousInfo.fit()
if self.mOptions.mDebugProfile:
logger.info('EXOGENOUS_ENCODING_TIME_IN_SECONDS ' + str(self.mSignal) + ' ' + str(time.time() - exog_start_time))
lTrendEstimator = tstr.cTrendEstimator()
lTrendEstimator.mSignalFrame = self.mSignalFrame
lTrendEstimator.mTimeInfo = self.mTimeInfo
lTrendEstimator.mSplit = self.mSplit
lTrendEstimator.mOptions = self.mOptions
trend_start_time = time.time()
lTrendEstimator.estimateTrend()
if self.mOptions.mDebugProfile:
logger.info('TREND_TIME_IN_SECONDS ' + str(self.mSignal) + ' ' + str(time.time() - trend_start_time))
cycle_start_time = time.time()
lCycleEstimator = tscy.cCycleEstimator()
lCycleEstimator.mTrendFrame = lTrendEstimator.mTrendFrame
lCycleEstimator.mTrendList = lTrendEstimator.mTrendList
del lTrendEstimator
<DeepExtract>
import gc
gc.collect()
</DeepExtract>
lCycleEstimator.mTimeInfo = self.mTimeInfo
lCycleEstimator.mSplit = self.mSplit
lCycleEstimator.mOptions = self.mOptions
lCycleEstimator.estimateAllCycles()
if self.mOptions.mDebugProfile:
logger.info('CYCLE_TIME_IN_SECONDS ' + str(self.mSignal) + ' ' + str(str(time.time() - cycle_start_time)))
ar_start_time = time.time()
lAREstimator = tsar.cAutoRegressiveEstimator()
lAREstimator.mCycleFrame = lCycleEstimator.mCycleFrame
lAREstimator.mTrendList = lCycleEstimator.mTrendList
lAREstimator.mCycleList = lCycleEstimator.mCycleList
del lCycleEstimator
<DeepExtract>
import gc
gc.collect()
</DeepExtract>
lAREstimator.mTimeInfo = self.mTimeInfo
lAREstimator.mSplit = self.mSplit
lAREstimator.mExogenousInfo = self.mExogenousInfo
lAREstimator.mOptions = self.mOptions
lAREstimator.estimate()
if self.mOptions.mDebugProfile:
logger.info('AUTOREG_TIME_IN_SECONDS ' + str(self.mSignal) + ' ' + str(str(time.time() - ar_start_time)))
perf_start_time = time.time()
lModels = {}
for trend in lAREstimator.mTrendList:
for cycle in lAREstimator.mCycleList[trend]:
cycle_residue = cycle.getCycleResidueName()
for autoreg in lAREstimator.mARList[cycle_residue]:
lModel = tsmodel.cTimeSeriesModel(self.mTransformation, trend, cycle, autoreg)
lModels[lModel.mOutName] = lModel
del lAREstimator
<DeepExtract>
self.mPerfsByModel = {}
for model in lModels.keys():
lModels[model].updatePerfs()
for (name, model) in lModels.items():
lComplexity = model.getComplexity()
lFitPerf = model.mFitPerf
lForecastPerf = model.mForecastPerf
lTestPerf = model.mTestPerf
self.mPerfsByModel[model.mOutName] = [model, lComplexity, lFitPerf, lForecastPerf, lTestPerf]
return lModels
</DeepExtract>
if self.mOptions.mDebugProfile:
logger.info('PERF_TIME_IN_SECONDS ' + str(self.mSignal) + ' ' + str(len(lModels)) + ' ' + str(str(time.time() - perf_start_time)))
if self.mOptions.mDebugProfile:
logger.info('TRAINING_TIME_IN_SECONDS ' + str(self.mSignal) + ' ' + str(time.time() - start_time))
<DeepExtract>
import gc
gc.collect()
</DeepExtract>
|
def train(self, iInputDS, iTime, iSignal, iHorizon, iTransformation):
logger = tsutil.get_pyaf_logger()
start_time = time.time()
assert iInputDS.shape[0] > 0
assert iInputDS.shape[1] > 0
assert iTime in iInputDS.columns
assert iSignal in iInputDS.columns
self.mTime = iTime
self.mOriginalSignal = iSignal
self.mTransformation = iTransformation
self.mTransformation.mOriginalSignal = iSignal
lMissingImputer = tsmiss.cMissingDataImputer()
lMissingImputer.mOptions = self.mOptions
lSignal = lMissingImputer.interpolate_signal_if_needed(iInputDS, iSignal)
lTime = lMissingImputer.interpolate_time_if_needed(iInputDS, iTime)
self.mTransformation.fit(lSignal)
self.mSignal = iTransformation.get_name(iSignal)
self.mHorizon = iHorizon
self.mSignalFrame = pd.DataFrame()
self.mSignalFrame[self.mTime] = lTime
self.mSignalFrame[self.mOriginalSignal] = lSignal
self.mSignalFrame[self.mSignal] = self.mTransformation.apply(lSignal)
self.mSignalFrame['row_number'] = np.arange(0, iInputDS.shape[0])
assert self.mSignalFrame.shape[0] > 0
self.mSplit = tscut.cCuttingInfo()
self.mSplit.mTime = self.mTime
self.mSplit.mSignal = self.mSignal
self.mSplit.mOriginalSignal = self.mOriginalSignal
self.mSplit.mHorizon = self.mHorizon
self.mSplit.mSignalFrame = self.mSignalFrame
self.mSplit.mOptions = self.mOptions
self.mTimeInfo = tsti.cTimeInfo()
self.mTimeInfo.mTime = self.mTime
self.mTimeInfo.mSignal = self.mSignal
self.mTimeInfo.mOriginalSignal = self.mOriginalSignal
self.mTimeInfo.mHorizon = self.mHorizon
self.mTimeInfo.mSignalFrame = self.mSignalFrame
self.mTimeInfo.mOptions = self.mOptions
self.mTimeInfo.mSplit = self.mSplit
self.mExogenousInfo = None
if self.mExogenousData is not None:
self.mExogenousInfo = tsexog.cExogenousInfo()
self.mExogenousInfo.mExogenousData = self.mExogenousData
self.mExogenousInfo.mTimeInfo = self.mTimeInfo
self.mExogenousInfo.mOptions = self.mOptions
import gc
gc.collect()
self.mSplit.estimate()
self.mTimeInfo.estimate()
exog_start_time = time.time()
if self.mExogenousInfo is not None:
self.mExogenousInfo.fit()
if self.mOptions.mDebugProfile:
logger.info('EXOGENOUS_ENCODING_TIME_IN_SECONDS ' + str(self.mSignal) + ' ' + str(time.time() - exog_start_time))
lTrendEstimator = tstr.cTrendEstimator()
lTrendEstimator.mSignalFrame = self.mSignalFrame
lTrendEstimator.mTimeInfo = self.mTimeInfo
lTrendEstimator.mSplit = self.mSplit
lTrendEstimator.mOptions = self.mOptions
trend_start_time = time.time()
lTrendEstimator.estimateTrend()
if self.mOptions.mDebugProfile:
logger.info('TREND_TIME_IN_SECONDS ' + str(self.mSignal) + ' ' + str(time.time() - trend_start_time))
cycle_start_time = time.time()
lCycleEstimator = tscy.cCycleEstimator()
lCycleEstimator.mTrendFrame = lTrendEstimator.mTrendFrame
lCycleEstimator.mTrendList = lTrendEstimator.mTrendList
del lTrendEstimator
import gc
gc.collect()
lCycleEstimator.mTimeInfo = self.mTimeInfo
lCycleEstimator.mSplit = self.mSplit
lCycleEstimator.mOptions = self.mOptions
lCycleEstimator.estimateAllCycles()
if self.mOptions.mDebugProfile:
logger.info('CYCLE_TIME_IN_SECONDS ' + str(self.mSignal) + ' ' + str(str(time.time() - cycle_start_time)))
ar_start_time = time.time()
lAREstimator = tsar.cAutoRegressiveEstimator()
lAREstimator.mCycleFrame = lCycleEstimator.mCycleFrame
lAREstimator.mTrendList = lCycleEstimator.mTrendList
lAREstimator.mCycleList = lCycleEstimator.mCycleList
del lCycleEstimator
import gc
gc.collect()
lAREstimator.mTimeInfo = self.mTimeInfo
lAREstimator.mSplit = self.mSplit
lAREstimator.mExogenousInfo = self.mExogenousInfo
lAREstimator.mOptions = self.mOptions
lAREstimator.estimate()
if self.mOptions.mDebugProfile:
logger.info('AUTOREG_TIME_IN_SECONDS ' + str(self.mSignal) + ' ' + str(str(time.time() - ar_start_time)))
perf_start_time = time.time()
lModels = {}
for trend in lAREstimator.mTrendList:
for cycle in lAREstimator.mCycleList[trend]:
cycle_residue = cycle.getCycleResidueName()
for autoreg in lAREstimator.mARList[cycle_residue]:
lModel = tsmodel.cTimeSeriesModel(self.mTransformation, trend, cycle, autoreg)
lModels[lModel.mOutName] = lModel
del lAREstimator
self.mPerfsByModel = {}
for model in lModels.keys():
lModels[model].updatePerfs()
for (name, model) in lModels.items():
lComplexity = model.getComplexity()
lFitPerf = model.mFitPerf
lForecastPerf = model.mForecastPerf
lTestPerf = model.mTestPerf
self.mPerfsByModel[model.mOutName] = [model, lComplexity, lFitPerf, lForecastPerf, lTestPerf]
return lModels
if self.mOptions.mDebugProfile:
logger.info('PERF_TIME_IN_SECONDS ' + str(self.mSignal) + ' ' + str(len(lModels)) + ' ' + str(str(time.time() - perf_start_time)))
if self.mOptions.mDebugProfile:
logger.info('TRAINING_TIME_IN_SECONDS ' + str(self.mSignal) + ' ' + str(time.time() - start_time))
import gc
gc.collect()
</DeepExtract>
|
atspy
|
positive
|
def method_wrapper(method, **kwargs):
try:
<DeepExtract>
assert set(SECRETS).issubset(set(environ)), 'Required secrets are not present in environment variables. ENVIRONMENT: %s' % str(environ)
</DeepExtract>
<DeepExtract>
kwargs['client'] = boto3.client('emr', aws_access_key_id=environ['EMR_AWS_ACCESS_KEY_ID'], aws_secret_access_key=environ['EMR_AWS_SECRET_ACCESS_KEY'], region_name=environ['EMR_REGION'])
</DeepExtract>
return {'success': True, 'response': method(**kwargs)}
except Exception as e:
tb = traceback.format_exc()
return {'success': False, 'error': '%s %s\n\n%s' % (str(e.__class__), str(e), tb)}
|
def method_wrapper(method, **kwargs):
try:
assert set(SECRETS).issubset(set(environ)), 'Required secrets are not present in environment variables. ENVIRONMENT: %s' % str(environ)
kwargs['client'] = boto3.client('emr', aws_access_key_id=environ['EMR_AWS_ACCESS_KEY_ID'], aws_secret_access_key=environ['EMR_AWS_SECRET_ACCESS_KEY'], region_name=environ['EMR_REGION'])
return {'success': True, 'response': method(**kwargs)}
except Exception as e:
tb = traceback.format_exc()
return {'success': False, 'error': '%s %s\n\n%s' % (str(e.__class__), str(e), tb)}
|
aws-servicebroker
|
positive
|
@pytest.mark.parametrize('etag_multipart', [True, False])
@patch('ansible_collections.amazon.aws.plugins.module_utils.s3.calculate_checksum_with_file')
def test_calculate_etag(m_checksum_file, etag_multipart):
module = MagicMock()
client = MagicMock()
module.fail_json_aws.side_effect = SystemExit(2)
<DeepExtract>
buffer = string.ascii_lowercase
if include_digits:
buffer += string.digits
module.md5.return_value = ''.join((random.choice(buffer) for i in range(32)))
</DeepExtract>
s3bucket_name = 's3-bucket-%s' % generate_random_string(8, False)
s3bucket_object = 's3-bucket-object-%s' % generate_random_string(8, False)
version = random.randint(0, 1000)
parts = 3
etag = '"f20e84ac3d0c33cea77b3f29e3323a09"'
digest = '"9aa254f7f76fd14435b21e9448525b99"'
<DeepExtract>
buffer = string.ascii_lowercase
if include_digits:
buffer += string.digits
file_name = ''.join((random.choice(buffer) for i in range(32)))
</DeepExtract>
if not etag_multipart:
result = s3.calculate_etag(module, file_name, etag, client, s3bucket_name, s3bucket_object, version)
assert result == '"{0}"'.format(module.md5.return_value)
module.md5.assert_called_once_with(file_name)
else:
etag = '"f20e84ac3d0c33cea77b3f29e3323a09-{0}"'.format(parts)
m_checksum_file.return_value = digest
assert digest == s3.calculate_etag(module, file_name, etag, client, s3bucket_name, s3bucket_object, version)
m_checksum_file.assert_called_with(client, parts, s3bucket_name, s3bucket_object, version, file_name)
|
@pytest.mark.parametrize('etag_multipart', [True, False])
@patch('ansible_collections.amazon.aws.plugins.module_utils.s3.calculate_checksum_with_file')
def test_calculate_etag(m_checksum_file, etag_multipart):
module = MagicMock()
client = MagicMock()
module.fail_json_aws.side_effect = SystemExit(2)
buffer = string.ascii_lowercase
if include_digits:
buffer += string.digits
module.md5.return_value = ''.join((random.choice(buffer) for i in range(32)))
s3bucket_name = 's3-bucket-%s' % generate_random_string(8, False)
s3bucket_object = 's3-bucket-object-%s' % generate_random_string(8, False)
version = random.randint(0, 1000)
parts = 3
etag = '"f20e84ac3d0c33cea77b3f29e3323a09"'
digest = '"9aa254f7f76fd14435b21e9448525b99"'
buffer = string.ascii_lowercase
if include_digits:
buffer += string.digits
file_name = ''.join((random.choice(buffer) for i in range(32)))
if not etag_multipart:
result = s3.calculate_etag(module, file_name, etag, client, s3bucket_name, s3bucket_object, version)
assert result == '"{0}"'.format(module.md5.return_value)
module.md5.assert_called_once_with(file_name)
else:
etag = '"f20e84ac3d0c33cea77b3f29e3323a09-{0}"'.format(parts)
m_checksum_file.return_value = digest
assert digest == s3.calculate_etag(module, file_name, etag, client, s3bucket_name, s3bucket_object, version)
m_checksum_file.assert_called_with(client, parts, s3bucket_name, s3bucket_object, version, file_name)
|
amazon.aws
|
positive
|
def __init__(self, train_dict, data_generator_3d, batch_size=1, frames_per_batch=5, frame_shape=None, shuffle=False, transforms=['outer-distance'], transforms_kwargs={}, aug_3d=False, rotation_3d=0, sampling=None, z_scale=None, seed=None, min_objects=3, data_format='channels_last', save_to_dir=None, save_prefix='', save_format='png'):
if 'X' not in train_dict:
raise ValueError('No training data found in train_dict')
if 'y' not in train_dict:
raise ValueError('Instance masks are required for the Semantic3DIterator')
(X, y) = (train_dict['X'], train_dict['y'])
if X.shape[0] != y.shape[0]:
raise ValueError('Training batches and labels should have the samelength. Found X.shape: {} y.shape: {}'.format(X.shape, y.shape))
if X.ndim != 5:
raise ValueError('Input data in `Semantic3DIterator` should have rank 5. You passed an array with shape', X.shape)
if rotation_3d > 0 and (not z_scale):
raise ValueError('z_scaling factor required to rotate in 3d')
def _scale_im(input_im, scale, order):
dtype = input_im.dtype
batch_list = []
for batch_num in range(input_im.shape[0]):
batch = input_im[batch_num, ...]
if data_format == 'channels_first':
batch = np.moveaxis(batch, 0, -1)
rescaled = rescale(batch, scale, order=order, preserve_range=True, multichannel=True)
rescaled = np.moveaxis(rescaled, -1, 0)
else:
rescaled = rescale(batch, scale, order=order, preserve_range=True, multichannel=True)
batch_list.append(rescaled)
return np.stack(batch_list, axis=0).astype(dtype)
if aug_3d and rotation_3d > 0:
scale = tuple([z_scale, 1, 1])
<DeepExtract>
dtype = X.dtype
batch_list = []
for batch_num in range(X.shape[0]):
batch = X[batch_num, ...]
if data_format == 'channels_first':
batch = np.moveaxis(batch, 0, -1)
rescaled = rescale(batch, scale, order=1, preserve_range=True, multichannel=True)
rescaled = np.moveaxis(rescaled, -1, 0)
else:
rescaled = rescale(batch, scale, order=1, preserve_range=True, multichannel=True)
batch_list.append(rescaled)
X = np.stack(batch_list, axis=0).astype(dtype)
</DeepExtract>
<DeepExtract>
dtype = y.dtype
batch_list = []
for batch_num in range(y.shape[0]):
batch = y[batch_num, ...]
if data_format == 'channels_first':
batch = np.moveaxis(batch, 0, -1)
rescaled = rescale(batch, scale, order=0, preserve_range=True, multichannel=True)
rescaled = np.moveaxis(rescaled, -1, 0)
else:
rescaled = rescale(batch, scale, order=0, preserve_range=True, multichannel=True)
batch_list.append(rescaled)
y = np.stack(batch_list, axis=0).astype(dtype)
</DeepExtract>
self.output_frames = frames_per_batch
frames_per_batch = int(round(frames_per_batch * z_scale))
self.x = np.asarray(X, dtype=K.floatx())
self.y = np.asarray(y, dtype='int32')
self.frames_per_batch = frames_per_batch
self.frame_shape = frame_shape
self.transforms = transforms
self.transforms_kwargs = transforms_kwargs
self.aug_3d = aug_3d
self.rotation_3d = rotation_3d
self.z_scale = z_scale
self.channel_axis = 4 if data_format == 'channels_last' else 1
self.time_axis = 1 if data_format == 'channels_last' else 2
self.row_axis = 2 if data_format == 'channels_last' else 3
self.col_axis = 3 if data_format == 'channels_last' else 4
self.data_generator_3d = data_generator_3d
self.data_format = data_format
self.min_objects = min_objects
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
if X.shape[self.time_axis] - frames_per_batch < 0:
raise ValueError('The number of frames used in each training batch should be less than the number of frames in the training data!fpb is {} and timeaxis is {}'.format(frames_per_batch, X.shape[self.time_axis]))
invalid_batches = []
for b in range(self.x.shape[0]):
if len(np.unique(self.y[b])) - 1 < self.min_objects:
invalid_batches.append(b)
invalid_batches = np.array(invalid_batches, dtype='int')
if invalid_batches.size > 0:
logging.warning('Removing %s of %s images with fewer than %s objects.', invalid_batches.size, self.x.shape[0], self.min_objects)
self.x = np.delete(self.x, invalid_batches, axis=0)
self.y = np.delete(self.y, invalid_batches, axis=0)
super().__init__(self.x.shape[0], batch_size, shuffle, seed)
|
def __init__(self, train_dict, data_generator_3d, batch_size=1, frames_per_batch=5, frame_shape=None, shuffle=False, transforms=['outer-distance'], transforms_kwargs={}, aug_3d=False, rotation_3d=0, sampling=None, z_scale=None, seed=None, min_objects=3, data_format='channels_last', save_to_dir=None, save_prefix='', save_format='png'):
if 'X' not in train_dict:
raise ValueError('No training data found in train_dict')
if 'y' not in train_dict:
raise ValueError('Instance masks are required for the Semantic3DIterator')
(X, y) = (train_dict['X'], train_dict['y'])
if X.shape[0] != y.shape[0]:
raise ValueError('Training batches and labels should have the samelength. Found X.shape: {} y.shape: {}'.format(X.shape, y.shape))
if X.ndim != 5:
raise ValueError('Input data in `Semantic3DIterator` should have rank 5. You passed an array with shape', X.shape)
if rotation_3d > 0 and (not z_scale):
raise ValueError('z_scaling factor required to rotate in 3d')
def _scale_im(input_im, scale, order):
dtype = input_im.dtype
batch_list = []
for batch_num in range(input_im.shape[0]):
batch = input_im[batch_num, ...]
if data_format == 'channels_first':
batch = np.moveaxis(batch, 0, -1)
rescaled = rescale(batch, scale, order=order, preserve_range=True, multichannel=True)
rescaled = np.moveaxis(rescaled, -1, 0)
else:
rescaled = rescale(batch, scale, order=order, preserve_range=True, multichannel=True)
batch_list.append(rescaled)
return np.stack(batch_list, axis=0).astype(dtype)
if aug_3d and rotation_3d > 0:
scale = tuple([z_scale, 1, 1])
dtype = X.dtype
batch_list = []
for batch_num in range(X.shape[0]):
batch = X[batch_num, ...]
if data_format == 'channels_first':
batch = np.moveaxis(batch, 0, -1)
rescaled = rescale(batch, scale, order=1, preserve_range=True, multichannel=True)
rescaled = np.moveaxis(rescaled, -1, 0)
else:
rescaled = rescale(batch, scale, order=1, preserve_range=True, multichannel=True)
batch_list.append(rescaled)
X = np.stack(batch_list, axis=0).astype(dtype)
dtype = y.dtype
batch_list = []
for batch_num in range(y.shape[0]):
batch = y[batch_num, ...]
if data_format == 'channels_first':
batch = np.moveaxis(batch, 0, -1)
rescaled = rescale(batch, scale, order=0, preserve_range=True, multichannel=True)
rescaled = np.moveaxis(rescaled, -1, 0)
else:
rescaled = rescale(batch, scale, order=0, preserve_range=True, multichannel=True)
batch_list.append(rescaled)
y = np.stack(batch_list, axis=0).astype(dtype)
self.output_frames = frames_per_batch
frames_per_batch = int(round(frames_per_batch * z_scale))
self.x = np.asarray(X, dtype=K.floatx())
self.y = np.asarray(y, dtype='int32')
self.frames_per_batch = frames_per_batch
self.frame_shape = frame_shape
self.transforms = transforms
self.transforms_kwargs = transforms_kwargs
self.aug_3d = aug_3d
self.rotation_3d = rotation_3d
self.z_scale = z_scale
self.channel_axis = 4 if data_format == 'channels_last' else 1
self.time_axis = 1 if data_format == 'channels_last' else 2
self.row_axis = 2 if data_format == 'channels_last' else 3
self.col_axis = 3 if data_format == 'channels_last' else 4
self.data_generator_3d = data_generator_3d
self.data_format = data_format
self.min_objects = min_objects
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
if X.shape[self.time_axis] - frames_per_batch < 0:
raise ValueError('The number of frames used in each training batch should be less than the number of frames in the training data!fpb is {} and timeaxis is {}'.format(frames_per_batch, X.shape[self.time_axis]))
invalid_batches = []
for b in range(self.x.shape[0]):
if len(np.unique(self.y[b])) - 1 < self.min_objects:
invalid_batches.append(b)
invalid_batches = np.array(invalid_batches, dtype='int')
if invalid_batches.size > 0:
logging.warning('Removing %s of %s images with fewer than %s objects.', invalid_batches.size, self.x.shape[0], self.min_objects)
self.x = np.delete(self.x, invalid_batches, axis=0)
self.y = np.delete(self.y, invalid_batches, axis=0)
super().__init__(self.x.shape[0], batch_size, shuffle, seed)
|
deepcell-tf
|
positive
|
def do_process(self, parts, i):
<DeepExtract>
l = parts[i][:-1].split(' ')
ret = {}
if len(l) > 1:
for arg in l[1:]:
(key, val) = arg.split('=', 1)
ret[key] = val
for (k, v) in self.ARGS:
if k not in ret:
raise Exception("Missing mandatory argument '%s' for macro '%s'" % (k, self.ID))
args = ret
</DeepExtract>
self.engine.run_script_from_macro(args)
parts[i] = self.engine.get_return_value()
|
def do_process(self, parts, i):
l = parts[i][:-1].split(' ')
ret = {}
if len(l) > 1:
for arg in l[1:]:
(key, val) = arg.split('=', 1)
ret[key] = val
for (k, v) in self.ARGS:
if k not in ret:
raise Exception("Missing mandatory argument '%s' for macro '%s'" % (k, self.ID))
args = ret
self.engine.run_script_from_macro(args)
parts[i] = self.engine.get_return_value()
|
autokey-python2
|
positive
|
def set_directional_cursor(self, event_x, event_y, movable_center=False):
"""Set the accurate cursor depending on the position of the pointer on
the canvas."""
n_sizes = self.get_image().get_nineths_sizes(self.apply_to_selection, self._x, self._y)
if not self._set_directions(event_x, event_y, n_sizes):
return
if movable_center and self._directions == '':
self.cursor_name = 'move'
else:
<DeepExtract>
if self._directions == '':
self.cursor_name = 'not-allowed'
else:
self.cursor_name = self._directions + '-resize'
</DeepExtract>
self.window.set_cursor(True)
|
def set_directional_cursor(self, event_x, event_y, movable_center=False):
"""Set the accurate cursor depending on the position of the pointer on
the canvas."""
n_sizes = self.get_image().get_nineths_sizes(self.apply_to_selection, self._x, self._y)
if not self._set_directions(event_x, event_y, n_sizes):
return
if movable_center and self._directions == '':
self.cursor_name = 'move'
else:
if self._directions == '':
self.cursor_name = 'not-allowed'
else:
self.cursor_name = self._directions + '-resize'
self.window.set_cursor(True)
|
drawing
|
positive
|
def build_vocab(self):
if self.vocab_file:
logger.info('building vocab from {}'.format(self.vocab_file))
<DeepExtract>
self.idx2sym = []
self.sym2idx = OrderedDict()
with open(self.vocab_file, 'r', encoding='utf-8') as f:
for line in f:
symb = line.strip().split()[0]
self.add_symbol(symb)
if '<UNK>' in self.sym2idx:
self.unk_idx = self.sym2idx['<UNK>']
elif '<unk>' in self.sym2idx:
self.unk_idx = self.sym2idx['<unk>']
else:
raise ValueError('No <unkown> token in vocabulary')
</DeepExtract>
logger.info('final vocab size {}'.format(len(self)))
else:
logger.info('building vocab with min_freq={}, max_size={}'.format(self.min_freq, self.max_size))
self.idx2sym = []
self.sym2idx = OrderedDict()
for sym in self.special:
<DeepExtract>
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
setattr(self, '{}_idx'.format(sym.strip('<>')), self.sym2idx[sym])
</DeepExtract>
for (sym, cnt) in self.counter.most_common(self.max_size):
if cnt < self.min_freq:
break
<DeepExtract>
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
</DeepExtract>
logger.info('final vocab size {} from {} unique tokens'.format(len(self), len(self.counter)))
|
def build_vocab(self):
if self.vocab_file:
logger.info('building vocab from {}'.format(self.vocab_file))
self.idx2sym = []
self.sym2idx = OrderedDict()
with open(self.vocab_file, 'r', encoding='utf-8') as f:
for line in f:
symb = line.strip().split()[0]
self.add_symbol(symb)
if '<UNK>' in self.sym2idx:
self.unk_idx = self.sym2idx['<UNK>']
elif '<unk>' in self.sym2idx:
self.unk_idx = self.sym2idx['<unk>']
else:
raise ValueError('No <unkown> token in vocabulary')
logger.info('final vocab size {}'.format(len(self)))
else:
logger.info('building vocab with min_freq={}, max_size={}'.format(self.min_freq, self.max_size))
self.idx2sym = []
self.sym2idx = OrderedDict()
for sym in self.special:
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
setattr(self, '{}_idx'.format(sym.strip('<>')), self.sym2idx[sym])
for (sym, cnt) in self.counter.most_common(self.max_size):
if cnt < self.min_freq:
break
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
logger.info('final vocab size {} from {} unique tokens'.format(len(self), len(self.counter)))
|
BERT-CRF
|
positive
|
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
n = args[2]
sc1 = extend_to_32_bits((d_a & 65535 == 32768) & (d_b >> 16 == 32768) & (n == 1).cast_to(Type.int_32))
sc0 = extend_to_32_bits((d_a >> 16 == 32768) & (d_b >> 16 == 32768) & (n == 1).cast_to(Type.int_32))
mul_res1 = 2147483647 & sc1 | (d_a & 65535) * (d_b >> 16) << n.value & (sc1 ^ 4294967295)
mul_res0 = 2147483647 & sc0 | (d_a >> 16) * (d_b >> 16) << n.value & (sc0 ^ 4294967295)
d_d = self.get('d{0}'.format(self.data['d']), Type.int_32)
result_hw1 = (d_d & 4294901760) + mul_res1 + 32768
result_hw0 = (d_d << 16) + mul_res0 + 32768
result = result_hw1 & 4294901760 | result_hw0 >> 16
c = 0
v = overflow(result)
av = advanced_overflow(result)
<DeepExtract>
psw = self.get('psw', Type.int_32)
</DeepExtract>
cond_sv = v == 0
cond_sav = av == 0
sv = psw & SV_MASK & cond_sv | 1 & (cond_sv ^ 1)
sav = psw & ASV_MASK & cond_sav | 1 & (cond_sav ^ 1)
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, 'psw')
return result
|
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
n = args[2]
sc1 = extend_to_32_bits((d_a & 65535 == 32768) & (d_b >> 16 == 32768) & (n == 1).cast_to(Type.int_32))
sc0 = extend_to_32_bits((d_a >> 16 == 32768) & (d_b >> 16 == 32768) & (n == 1).cast_to(Type.int_32))
mul_res1 = 2147483647 & sc1 | (d_a & 65535) * (d_b >> 16) << n.value & (sc1 ^ 4294967295)
mul_res0 = 2147483647 & sc0 | (d_a >> 16) * (d_b >> 16) << n.value & (sc0 ^ 4294967295)
d_d = self.get('d{0}'.format(self.data['d']), Type.int_32)
result_hw1 = (d_d & 4294901760) + mul_res1 + 32768
result_hw0 = (d_d << 16) + mul_res0 + 32768
result = result_hw1 & 4294901760 | result_hw0 >> 16
c = 0
v = overflow(result)
av = advanced_overflow(result)
psw = self.get('psw', Type.int_32)
cond_sv = v == 0
cond_sav = av == 0
sv = psw & SV_MASK & cond_sv | 1 & (cond_sv ^ 1)
sav = psw & ASV_MASK & cond_sav | 1 & (cond_sav ^ 1)
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, 'psw')
return result
|
angr-platforms
|
positive
|
def predictNoOutcomeGetter(self, groups, standardize=True, sparse=False, restrictToGroups=None):
outcomes = list(self.classificationModels.keys())
groupNormsList = []
XGroups = None
UGroups = None
for i in range(len(self.featureGetters)):
fg = self.featureGetters[i]
(groupNorms, newFeatureNames) = fg.getGroupNormsSparseFeatsFirst(groups)
print(' [Aligning current X with training X: feature group: %d]' % i)
groupNormValues = []
for feat in self.featureNamesList[i]:
groupNormValues.append(groupNorms.get(feat, {}))
groupNormsList.append(groupNormValues)
print(' Features Aligned: %d' % len(groupNormValues))
<DeepExtract>
fgGroups = set([k for gns in groupNormValues for k in gns.keys()])
</DeepExtract>
if not XGroups:
XGroups = set(fgGroups)
UGroups = set(fgGroups)
else:
XGroups = XGroups & fgGroups
UGroups = UGroups | fgGroups
if len(XGroups) < len(groups):
print(' Different number of groups available for different outcomes. (%d, %d)' % (len(XGroups), len(groups)))
predictions = dict()
testGroupsOrder = list(XGroups)
for outcomeName in sorted(outcomes):
print('\n= %s =\n%s' % (outcomeName, '-' * (len(outcomeName) + 4)))
thisTestGroupsOrder = testGroupsOrder
multiXtest = []
for i in range(len(groupNormsList)):
groupNormValues = groupNormsList[i]
gns = dict(list(zip(self.featureNamesList[i], groupNormValues)))
df = pd.DataFrame(data=gns)
df = df.fillna(0.0)
df = df[self.featureNamesList[i]]
df = df.reindex(thisTestGroupsOrder)
print(' (feature group: %d)' % i)
multiXtest.append(csr_matrix(df.values))
<DeepExtract>
if not isinstance(multiXtest, (list, tuple)):
multiXtest = [multiXtest]
multiX = multiXtest
multiXtest = None
adaptMatrix = np.array([])
if adaptTables is not None:
print(('MultiX length after duplication:', len(multiX)))
controls_mat = multiX[-1].todense()
if adaptColumns is None:
adaptMatrix = controls_mat
else:
for adaptCol in adaptColumns:
adaptMatrix = np.insert(adaptMatrix, adaptMatrix.shape[1], controls_mat[:, adaptCol], axis=1)
i = 0
while i < len(multiX):
multiXtest = multiX[i]
if not sparse and isinstance(multiXtest, csr_matrix):
multiXtest = multiXtest.todense()
(scaler, fSelector) = (None, None)
if self.multiScalers[outcomeName]:
scaler = self.multiScalers[outcomeName][i]
if self.multiFSelectors[outcomeName]:
fSelector = self.multiFSelectors[outcomeName][i]
if scaler:
print(' predict: applying standard scaler to X[%d]: %s' % (i, str(scaler)))
try:
multiXtest = scaler.transform(multiXtest)
if self.outliersToMean and (not sparse):
multiXtest[abs(multiXtest) > self.outliersToMean] = 0
print(' predict: setting outliers (> %d) to mean for X[%d]' % (self.outliersToMean, i))
except NotFittedError as e:
warn(e)
warn('Fitting scaler')
multiXtest = scaler.fit_transform(multiXtest)
if outliersToMean and (not sparse):
multiXtest[abs(multiXtest) > self.outliersToMean] = 0
print(' predict: setting outliers (> %d) to mean for X[%d]' % (self.outliersToMean, i))
elif self.outliersToMean:
print(' Warning: Outliers to mean is not being run because standardize is off')
if fSelector:
print(' predict: applying feature selection to X[%d]: %s' % (i, str(fSelector)))
newX = fSelector.transform(multiXtest)
if newX.shape[1]:
multiXtest = newX
else:
print('No features selected, so using original full X')
if adaptTables is not None and i in adaptTables:
for j in range(adaptMatrix.shape[1]):
adaptColMult = adaptMatrix[:, j]
adaptX = list()
for k in range(multiXtest.shape[0]):
adaptX.append(np.array(adaptColMult[k] * multiXtest[k, :])[0])
multiX.insert(len(multiX) - 1, np.array(adaptX))
print(('MultiX length after duplication:', len(multiX)))
'\n #print adaptMatrix\n for j in range(adaptMatrix.shape[1]):\n adaptColMult=adaptMatrix[:,j]\n adaptX = X*adaptColMult.reshape((adaptColMult.shape[0],1))\n # to keep the index of controls table as the last table of multiX\n multiX.insert(len(multiX)-1,adaptX)\n '
'\n if adaptTables is not None:\n if i in adaptTables:\n controlsTable=multiX[len(multiX)-1]\n # if adaptCol is empty, it means all columns of the controls table will be used for adaptation.\n if adaptColumns is None:\n for j in range(controlsTable.shape[1]):\n adaptColMult=controlsTable[:,j]\n adaptX = X*adaptColMult.reshape((adaptColMult.shape[0],1))\n # to keep the index of controls table as the last table of multiX\n multiX.insert(len(multiX)-1,adaptX)\n else:\n for adaptCol in adaptColumns:\n adaptColMult=controlsTable[:,adaptCol]\n adaptX = X*adaptColMult.reshape((adaptColMult.shape[0],1))\n # to keep the index of controls table as the last table of multiX\n multiX.insert(len(multiX)-1,adaptX)\n '
multiX[i] = multiXtest
i += 1
multiXtest = multiX[0]
for nextX in multiX[1:]:
multiXtest = matrixAppendHoriz(multiXtest, nextX)
print(' predict: combined X shape: %s' % str(multiXtest.shape))
if hasattr(self.classificationModels[outcomeName], 'intercept_'):
print(' predict: classifier intercept: %s' % str(self.classificationModels[outcomeName].intercept_))
if probs:
try:
ypred = (self.classificationModels[outcomeName].predict_proba(multiXtest), self.classificationModels[outcomeName].classes_)
except AttributeError:
confs = self.classificationModels[outcomeName].decision_function(multiXtest)
if len(self.classificationModels[outcomeName].classes_) == 2:
confs = array(list(zip([-1 * c for c in confs], confs)))
ypred = (confs, self.classificationModels[outcomeName].classes_)
else:
ypred = self.classificationModels[outcomeName].predict(multiXtest)
</DeepExtract>
print('[Done.]')
assert len(thisTestGroupsOrder) == len(ypred), "can't line predictions up with groups"
predictions[outcomeName] = dict(list(zip(thisTestGroupsOrder, ypred)))
print('[Prediction Complete]')
return predictions
|
def predictNoOutcomeGetter(self, groups, standardize=True, sparse=False, restrictToGroups=None):
outcomes = list(self.classificationModels.keys())
groupNormsList = []
XGroups = None
UGroups = None
for i in range(len(self.featureGetters)):
fg = self.featureGetters[i]
(groupNorms, newFeatureNames) = fg.getGroupNormsSparseFeatsFirst(groups)
print(' [Aligning current X with training X: feature group: %d]' % i)
groupNormValues = []
for feat in self.featureNamesList[i]:
groupNormValues.append(groupNorms.get(feat, {}))
groupNormsList.append(groupNormValues)
print(' Features Aligned: %d' % len(groupNormValues))
fgGroups = set([k for gns in groupNormValues for k in gns.keys()])
if not XGroups:
XGroups = set(fgGroups)
UGroups = set(fgGroups)
else:
XGroups = XGroups & fgGroups
UGroups = UGroups | fgGroups
if len(XGroups) < len(groups):
print(' Different number of groups available for different outcomes. (%d, %d)' % (len(XGroups), len(groups)))
predictions = dict()
testGroupsOrder = list(XGroups)
for outcomeName in sorted(outcomes):
print('\n= %s =\n%s' % (outcomeName, '-' * (len(outcomeName) + 4)))
thisTestGroupsOrder = testGroupsOrder
multiXtest = []
for i in range(len(groupNormsList)):
groupNormValues = groupNormsList[i]
gns = dict(list(zip(self.featureNamesList[i], groupNormValues)))
df = pd.DataFrame(data=gns)
df = df.fillna(0.0)
df = df[self.featureNamesList[i]]
df = df.reindex(thisTestGroupsOrder)
print(' (feature group: %d)' % i)
multiXtest.append(csr_matrix(df.values))
if not isinstance(multiXtest, (list, tuple)):
multiXtest = [multiXtest]
multiX = multiXtest
multiXtest = None
adaptMatrix = np.array([])
if adaptTables is not None:
print(('MultiX length after duplication:', len(multiX)))
controls_mat = multiX[-1].todense()
if adaptColumns is None:
adaptMatrix = controls_mat
else:
for adaptCol in adaptColumns:
adaptMatrix = np.insert(adaptMatrix, adaptMatrix.shape[1], controls_mat[:, adaptCol], axis=1)
i = 0
while i < len(multiX):
multiXtest = multiX[i]
if not sparse and isinstance(multiXtest, csr_matrix):
multiXtest = multiXtest.todense()
(scaler, fSelector) = (None, None)
if self.multiScalers[outcomeName]:
scaler = self.multiScalers[outcomeName][i]
if self.multiFSelectors[outcomeName]:
fSelector = self.multiFSelectors[outcomeName][i]
if scaler:
print(' predict: applying standard scaler to X[%d]: %s' % (i, str(scaler)))
try:
multiXtest = scaler.transform(multiXtest)
if self.outliersToMean and (not sparse):
multiXtest[abs(multiXtest) > self.outliersToMean] = 0
print(' predict: setting outliers (> %d) to mean for X[%d]' % (self.outliersToMean, i))
except NotFittedError as e:
warn(e)
warn('Fitting scaler')
multiXtest = scaler.fit_transform(multiXtest)
if outliersToMean and (not sparse):
multiXtest[abs(multiXtest) > self.outliersToMean] = 0
print(' predict: setting outliers (> %d) to mean for X[%d]' % (self.outliersToMean, i))
elif self.outliersToMean:
print(' Warning: Outliers to mean is not being run because standardize is off')
if fSelector:
print(' predict: applying feature selection to X[%d]: %s' % (i, str(fSelector)))
newX = fSelector.transform(multiXtest)
if newX.shape[1]:
multiXtest = newX
else:
print('No features selected, so using original full X')
if adaptTables is not None and i in adaptTables:
for j in range(adaptMatrix.shape[1]):
adaptColMult = adaptMatrix[:, j]
adaptX = list()
for k in range(multiXtest.shape[0]):
adaptX.append(np.array(adaptColMult[k] * multiXtest[k, :])[0])
multiX.insert(len(multiX) - 1, np.array(adaptX))
print(('MultiX length after duplication:', len(multiX)))
'\n #print adaptMatrix\n for j in range(adaptMatrix.shape[1]):\n adaptColMult=adaptMatrix[:,j]\n adaptX = X*adaptColMult.reshape((adaptColMult.shape[0],1))\n # to keep the index of controls table as the last table of multiX\n multiX.insert(len(multiX)-1,adaptX)\n '
'\n if adaptTables is not None:\n if i in adaptTables:\n controlsTable=multiX[len(multiX)-1]\n # if adaptCol is empty, it means all columns of the controls table will be used for adaptation.\n if adaptColumns is None:\n for j in range(controlsTable.shape[1]):\n adaptColMult=controlsTable[:,j]\n adaptX = X*adaptColMult.reshape((adaptColMult.shape[0],1))\n # to keep the index of controls table as the last table of multiX\n multiX.insert(len(multiX)-1,adaptX)\n else:\n for adaptCol in adaptColumns:\n adaptColMult=controlsTable[:,adaptCol]\n adaptX = X*adaptColMult.reshape((adaptColMult.shape[0],1))\n # to keep the index of controls table as the last table of multiX\n multiX.insert(len(multiX)-1,adaptX)\n '
multiX[i] = multiXtest
i += 1
multiXtest = multiX[0]
for nextX in multiX[1:]:
multiXtest = matrixAppendHoriz(multiXtest, nextX)
print(' predict: combined X shape: %s' % str(multiXtest.shape))
if hasattr(self.classificationModels[outcomeName], 'intercept_'):
print(' predict: classifier intercept: %s' % str(self.classificationModels[outcomeName].intercept_))
if probs:
try:
ypred = (self.classificationModels[outcomeName].predict_proba(multiXtest), self.classificationModels[outcomeName].classes_)
except AttributeError:
confs = self.classificationModels[outcomeName].decision_function(multiXtest)
if len(self.classificationModels[outcomeName].classes_) == 2:
confs = array(list(zip([-1 * c for c in confs], confs)))
ypred = (confs, self.classificationModels[outcomeName].classes_)
else:
ypred = self.classificationModels[outcomeName].predict(multiXtest)
print('[Done.]')
assert len(thisTestGroupsOrder) == len(ypred), "can't line predictions up with groups"
predictions[outcomeName] = dict(list(zip(thisTestGroupsOrder, ypred)))
print('[Prediction Complete]')
return predictions
|
dlatk
|
positive
|
@filter_hook
def formfield_for_dbfield(self, db_field, **kwargs):
if isinstance(db_field, models.ManyToManyField) and (not db_field.rel.through._meta.auto_created):
return None
<DeepExtract>
if db_field.name in self.style_fields:
attrs = self.get_field_style(db_field, self.style_fields[db_field.name], **kwargs)
if attrs:
attrs = attrs
if hasattr(db_field, 'rel') and db_field.rel:
related_modeladmin = self.admin_site._registry.get(db_field.rel.to)
if related_modeladmin and hasattr(related_modeladmin, 'relfield_style'):
attrs = self.get_field_style(db_field, related_modeladmin.relfield_style, **kwargs)
if attrs:
attrs = attrs
if db_field.choices:
attrs = {'widget': widgets.AdminSelectWidget}
for klass in db_field.__class__.mro():
if klass in self.formfield_overrides:
attrs = self.formfield_overrides[klass].copy()
attrs = {}
</DeepExtract>
return db_field.formfield(**dict(attrs, **kwargs))
|
@filter_hook
def formfield_for_dbfield(self, db_field, **kwargs):
if isinstance(db_field, models.ManyToManyField) and (not db_field.rel.through._meta.auto_created):
return None
if db_field.name in self.style_fields:
attrs = self.get_field_style(db_field, self.style_fields[db_field.name], **kwargs)
if attrs:
attrs = attrs
if hasattr(db_field, 'rel') and db_field.rel:
related_modeladmin = self.admin_site._registry.get(db_field.rel.to)
if related_modeladmin and hasattr(related_modeladmin, 'relfield_style'):
attrs = self.get_field_style(db_field, related_modeladmin.relfield_style, **kwargs)
if attrs:
attrs = attrs
if db_field.choices:
attrs = {'widget': widgets.AdminSelectWidget}
for klass in db_field.__class__.mro():
if klass in self.formfield_overrides:
attrs = self.formfield_overrides[klass].copy()
attrs = {}
return db_field.formfield(**dict(attrs, **kwargs))
|
Django_Blog
|
positive
|
def update_refs_per_file(classes):
"""Change all refs to the _ versions."""
changes = []
for c in classes:
if '_' in c.name:
changes.append((c.name.split('_')[1], c.name))
<DeepExtract>
for (i, c) in enumerate(classes):
lines = []
for line in c.lines:
if 'title=' not in line and 'description=' not in line:
for item in changes:
if item[0] in line:
line = replace_token(line, item[0], item[1])
lines.append(line)
classes[i].lines = lines
paren = lines[0].find('(')
class_name = classes[i].name
if paren > 0:
class_name = lines[0][len('class '):paren]
classes[i].name = class_name
classes[i].generate_body_text()
classes = classes
</DeepExtract>
return classes
|
def update_refs_per_file(classes):
"""Change all refs to the _ versions."""
changes = []
for c in classes:
if '_' in c.name:
changes.append((c.name.split('_')[1], c.name))
for (i, c) in enumerate(classes):
lines = []
for line in c.lines:
if 'title=' not in line and 'description=' not in line:
for item in changes:
if item[0] in line:
line = replace_token(line, item[0], item[1])
lines.append(line)
classes[i].lines = lines
paren = lines[0].find('(')
class_name = classes[i].name
if paren > 0:
class_name = lines[0][len('class '):paren]
classes[i].name = class_name
classes[i].generate_body_text()
classes = classes
return classes
|
compliance-trestle
|
positive
|
def test_method3():
<DeepExtract>
z = method4(1, 2)
a = method5(z)
b = method6(a, 1, 2)
ret = b
</DeepExtract>
assert method4.args == (1, 2)
assert method5.args == (method4.ret,)
assert method6.args == (method5.ret, 1, 2)
assert method6.ret == ret
|
def test_method3():
z = method4(1, 2)
a = method5(z)
b = method6(a, 1, 2)
ret = b
assert method4.args == (1, 2)
assert method5.args == (method4.ret,)
assert method6.args == (method5.ret, 1, 2)
assert method6.ret == ret
|
code_interview_training
|
positive
|
def invoke(self, argument, from_tty):
"""Invoke the command with an argument string"""
try:
<DeepExtract>
(args, kwargs) = (gdb.string_to_argv(argument), {})
</DeepExtract>
kwargs = {k: v for (k, v) in kwargs.items() if v is not None}
except SystemExit:
return
except (TypeError, gdb.error):
pwndbg.exception.handle(self.function.__name__)
return
try:
<DeepExtract>
if not from_tty:
self.repeat = False
lines = gdb.execute('show commands', from_tty=False, to_string=True)
lines = lines.splitlines()
if not lines:
self.repeat = False
last_line = lines[-1]
(number, command) = last_line.split(None, 1)
try:
number = int(number)
except ValueError:
self.repeat = False
if number not in Command.history:
Command.history[number] = command
self.repeat = False
if not command.endswith(argument):
self.repeat = False
self.repeat = True
</DeepExtract>
return self(*args, **kwargs)
finally:
self.repeat = False
|
def invoke(self, argument, from_tty):
"""Invoke the command with an argument string"""
try:
(args, kwargs) = (gdb.string_to_argv(argument), {})
kwargs = {k: v for (k, v) in kwargs.items() if v is not None}
except SystemExit:
return
except (TypeError, gdb.error):
pwndbg.exception.handle(self.function.__name__)
return
try:
if not from_tty:
self.repeat = False
lines = gdb.execute('show commands', from_tty=False, to_string=True)
lines = lines.splitlines()
if not lines:
self.repeat = False
last_line = lines[-1]
(number, command) = last_line.split(None, 1)
try:
number = int(number)
except ValueError:
self.repeat = False
if number not in Command.history:
Command.history[number] = command
self.repeat = False
if not command.endswith(argument):
self.repeat = False
self.repeat = True
return self(*args, **kwargs)
finally:
self.repeat = False
|
217gdb
|
positive
|
def _flattened_pairwise_distances(reference_embeddings, query_embeddings):
"""Calculates flattened tensor of pairwise distances between ref and query.
Args:
reference_embeddings: Tensor of shape [..., embedding_dim],
the embedding vectors for the reference frame
query_embeddings: Tensor of shape [n_query_images, height, width,
embedding_dim], the embedding vectors for the query frames.
Returns:
A distance tensor of shape [reference_embeddings.size / embedding_dim,
query_embeddings.size / embedding_dim]
"""
embedding_dim = query_embeddings.size()[-1]
reference_embeddings = reference_embeddings.view(-1, embedding_dim)
first_dim = -1
query_embeddings = query_embeddings.view(first_dim, embedding_dim)
<DeepExtract>
xs = torch.sum(query_embeddings * query_embeddings, 1)
ys = torch.sum(reference_embeddings * reference_embeddings, 1)
xs = xs.unsqueeze(1)
ys = ys.unsqueeze(0)
d = xs + ys - 2.0 * torch.matmul(query_embeddings, torch.t(reference_embeddings))
dists = d
</DeepExtract>
return dists
|
def _flattened_pairwise_distances(reference_embeddings, query_embeddings):
"""Calculates flattened tensor of pairwise distances between ref and query.
Args:
reference_embeddings: Tensor of shape [..., embedding_dim],
the embedding vectors for the reference frame
query_embeddings: Tensor of shape [n_query_images, height, width,
embedding_dim], the embedding vectors for the query frames.
Returns:
A distance tensor of shape [reference_embeddings.size / embedding_dim,
query_embeddings.size / embedding_dim]
"""
embedding_dim = query_embeddings.size()[-1]
reference_embeddings = reference_embeddings.view(-1, embedding_dim)
first_dim = -1
query_embeddings = query_embeddings.view(first_dim, embedding_dim)
xs = torch.sum(query_embeddings * query_embeddings, 1)
ys = torch.sum(reference_embeddings * reference_embeddings, 1)
xs = xs.unsqueeze(1)
ys = ys.unsqueeze(0)
d = xs + ys - 2.0 * torch.matmul(query_embeddings, torch.t(reference_embeddings))
dists = d
return dists
|
CVPR2020_MANet
|
positive
|
def handle_failure(directory, entry, file_path):
if not args.no_move_failed:
if directory not in fail_subdir_ok:
<DeepExtract>
fail_subdir_ok[directory] = False
if not os.access(directory, os.W_OK):
logger.warning('No permission to create a sub-directory nor remove files from %s. Disabling the move-to-faildir feature for that directory.', directory)
return False
fail_dir_path = os.path.join(directory, args.fail_subdir)
if not os.path.exists(fail_dir_path):
logger.info('Creating sub-directory %s', args.fail_subdir)
os.makedirs(fail_dir_path)
if not os.access(fail_dir_path, os.W_OK):
logger.warning('No permission to move files to %s. Disabling the move-to-faildirfeature for directory %s', fail_dir_path, directory)
return False
fail_subdir_ok[directory] = fail_dir_path
return True
</DeepExtract>
fail_dir_path = fail_subdir_ok[directory]
if fail_dir_path:
logger.info('Moving file %s to sub-directory %s', entry, args.fail_subdir)
os.rename(file_path, os.path.join(fail_dir_path, entry))
|
def handle_failure(directory, entry, file_path):
if not args.no_move_failed:
if directory not in fail_subdir_ok:
fail_subdir_ok[directory] = False
if not os.access(directory, os.W_OK):
logger.warning('No permission to create a sub-directory nor remove files from %s. Disabling the move-to-faildir feature for that directory.', directory)
return False
fail_dir_path = os.path.join(directory, args.fail_subdir)
if not os.path.exists(fail_dir_path):
logger.info('Creating sub-directory %s', args.fail_subdir)
os.makedirs(fail_dir_path)
if not os.access(fail_dir_path, os.W_OK):
logger.warning('No permission to move files to %s. Disabling the move-to-faildirfeature for directory %s', fail_dir_path, directory)
return False
fail_subdir_ok[directory] = fail_dir_path
return True
fail_dir_path = fail_subdir_ok[directory]
if fail_dir_path:
logger.info('Moving file %s to sub-directory %s', entry, args.fail_subdir)
os.rename(file_path, os.path.join(fail_dir_path, entry))
|
edi
|
positive
|
def test_vrid_create_floating_ip(self):
self.target.create(4, threshold=1, disable=0, floating_ips=['10.10.10.8'])
<DeepExtract>
rv = {'vrid': {'vrid-val': 4, 'preempt-mode': {'threshold': threshold, 'disable': disable}}}
payload = rv
</DeepExtract>
payload['vrid']['floating-ip'] = mock.ANY
self.client.http.request.assert_called_with('POST', self.url_prefix, payload, mock.ANY, axapi_args=None, max_retries=None, timeout=mock.ANY)
|
def test_vrid_create_floating_ip(self):
self.target.create(4, threshold=1, disable=0, floating_ips=['10.10.10.8'])
rv = {'vrid': {'vrid-val': 4, 'preempt-mode': {'threshold': threshold, 'disable': disable}}}
payload = rv
payload['vrid']['floating-ip'] = mock.ANY
self.client.http.request.assert_called_with('POST', self.url_prefix, payload, mock.ANY, axapi_args=None, max_retries=None, timeout=mock.ANY)
|
acos-client
|
positive
|
def test_list_zoom_factor(self):
<DeepExtract>
observation = np.ones(shape=[300, 300, 3])
fake_timestep = dm_env.TimeStep(step_type=dm_env.StepType.MID, reward=3.14, discount=0.9, observation={'pixels': observation})
</DeepExtract>
fake_env = mock.create_autospec(env_interface.AndroidEnvInterface)
fake_env.observation_spec.return_value = {'pixels': _simple_spec()}
fake_env.reset.return_value = fake_timestep
fake_env.step.return_value = fake_timestep
wrapper = image_rescale_wrapper.ImageRescaleWrapper(fake_env, zoom_factors=[0.5, 0.2])
self.assertIsNotNone(wrapper)
self.assertEqual(wrapper.observation_spec()['pixels'].shape, (150, 60, 3))
reset_timestep = wrapper.reset()
reset_image = reset_timestep.observation['pixels']
self.assertEqual(reset_image.shape, (150, 60, 3))
step_timestep = wrapper.step(action='fake_action')
step_image = step_timestep.observation['pixels']
self.assertEqual(step_image.shape, (150, 60, 3))
|
def test_list_zoom_factor(self):
observation = np.ones(shape=[300, 300, 3])
fake_timestep = dm_env.TimeStep(step_type=dm_env.StepType.MID, reward=3.14, discount=0.9, observation={'pixels': observation})
fake_env = mock.create_autospec(env_interface.AndroidEnvInterface)
fake_env.observation_spec.return_value = {'pixels': _simple_spec()}
fake_env.reset.return_value = fake_timestep
fake_env.step.return_value = fake_timestep
wrapper = image_rescale_wrapper.ImageRescaleWrapper(fake_env, zoom_factors=[0.5, 0.2])
self.assertIsNotNone(wrapper)
self.assertEqual(wrapper.observation_spec()['pixels'].shape, (150, 60, 3))
reset_timestep = wrapper.reset()
reset_image = reset_timestep.observation['pixels']
self.assertEqual(reset_image.shape, (150, 60, 3))
step_timestep = wrapper.step(action='fake_action')
step_image = step_timestep.observation['pixels']
self.assertEqual(step_image.shape, (150, 60, 3))
|
android_env
|
positive
|
@staticmethod
def parse_list(content, unquote=True):
"""A wrapper to utils.parse_list() with unquoting support
Parses a specified set of data and breaks it into a list.
Args:
content (str): The path to split up into a list. If a list is
provided, then it's individual entries are processed.
unquote (:obj:`bool`, optional): call unquote on each element
added to the returned list.
Returns:
list: A unique list containing all of the elements in the path
"""
<DeepExtract>
content = parse_list(content)
if unquote:
content = [URLBase.unquote(x) for x in filter(bool, content)]
content = content
</DeepExtract>
if unquote:
content = [URLBase.unquote(x) for x in filter(bool, content)]
return content
|
@staticmethod
def parse_list(content, unquote=True):
"""A wrapper to utils.parse_list() with unquoting support
Parses a specified set of data and breaks it into a list.
Args:
content (str): The path to split up into a list. If a list is
provided, then it's individual entries are processed.
unquote (:obj:`bool`, optional): call unquote on each element
added to the returned list.
Returns:
list: A unique list containing all of the elements in the path
"""
content = parse_list(content)
if unquote:
content = [URLBase.unquote(x) for x in filter(bool, content)]
content = content
if unquote:
content = [URLBase.unquote(x) for x in filter(bool, content)]
return content
|
apprise
|
positive
|
def check_not_cross_sectional(self):
"""
This function checks if the dataset is longitudinal. If it is cross
sectional, clinica proposes to convert it in a clinica compliant form.
author: Arnaud Marcoux
"""
import sys
from os import listdir
from os.path import abspath, basename, dirname, isdir, join
from clinica.utils.exceptions import ClinicaInconsistentDatasetError
from clinica.utils.stream import cprint
def _check_cross_subj(cross_subj: list) -> None:
if len(cross_subj) > 0:
raise ClinicaInconsistentDatasetError(cross_subj)
def convert_cross_sectional(bids_in, bids_out, cross_subjects, long_subjects):
"""
This function converts a cross-sectional-bids dataset into a
longitudinal clinica-compliant dataset
Args:
bids_in: cross sectional bids dataset you want to convert
bids_out: converted longitudinal bids dataset
cross_subjects: list of subjects in cross sectional form
(they need some adjustment)
long_subjects: list of subjects in longitudinal form (they
just need to be copied)
Returns:
nothing
"""
from os import mkdir
from os.path import exists, isfile
from shutil import copy2, copytree
def add_ses(f):
"""
Use this function to transform a cross sectional filename into
a longitudinal one.
Examples:
sub-ADNI001_scans.tsv -> sub-ADNI001_ses-M000_scans.tsv
sub-023a_ses-M012_T1w.nii.gz -> sub-023a_ses-M012_T1w.nii.gz (no
modification done if filename already has a session)
Args:
f: filename
Returns:
filename with '_ses-M000_ added just after participant_id
"""
import re
m = re.search('(^sub-[a-zA-Z0-9]*)_(?!ses-[a-zA-Z0-9])(.*)', f)
try:
return m.group(1) + '_ses-M000_' + m.group(2)
except AttributeError:
return f
def copy2_add_ses(src, dst):
"""
copy2_add_ses calls copy2 function from shutil, but modifies
the filename of the copied files if they match the regex
template described in add_ses() function
Args:
src: path to the file that needs to be copied
dst: original destination for the copied file
Returns:
copy2 with modified filename
"""
from os.path import basename, dirname, join
from shutil import copy2
dst_modified = join(dirname(dst), add_ses(basename(src)))
return copy2(src, dst_modified)
if not exists(bids_out):
mkdir(bids_out)
for subj in cross_subjects:
to_copy = [f for f in listdir(join(bids_in, subj)) if not f.startswith('.')]
if not exists(join(bids_out, subj)):
mkdir(join(bids_out, subj))
if not exists(join(bids_out, subj, 'ses-M000')):
mkdir(join(bids_out, subj, 'ses-M000'))
for el in to_copy:
path_el = join(bids_in, subj, el)
if not exists(join(bids_out, subj, 'ses-M000', el)):
if isdir(path_el):
copytree(path_el, join(bids_out, subj, 'ses-M000', basename(path_el)), copy_function=copy2_add_ses)
elif isfile(path_el):
<DeepExtract>
import re
m = re.search('(^sub-[a-zA-Z0-9]*)_(?!ses-[a-zA-Z0-9])(.*)', el)
try:
new_filename_wo_ses = m.group(1) + '_ses-M000_' + m.group(2)
except AttributeError:
new_filename_wo_ses = el
</DeepExtract>
copy2(path_el, join(bids_out, subj, 'ses-M000', new_filename_wo_ses))
for su in long_subjects:
to_copy = [f for f in listdir(join(bids_in, su)) if not f.startswith('.')]
if not exists(join(bids_out, su)):
mkdir(join(bids_out, su))
for el in to_copy:
path_el = join(bids_in, su, el)
if not exists(join(bids_out, su, el)):
if isdir(path_el):
copytree(path_el, join(bids_out, su, basename(path_el)))
elif isfile(path_el):
copy2(path_el, join(bids_out, su))
if self.bids_directory is not None:
bids_dir = abspath(self.bids_directory)
all_subs = [f for f in listdir(bids_dir) if isdir(join(bids_dir, f)) and f.startswith('sub-')]
<DeepExtract>
from os import listdir
from os.path import isdir, join
cross_subj = []
long_subj = []
for sub in all_subs:
folder_list = [f for f in listdir(join(bids_dir, sub)) if isdir(join(bids_dir, sub, f))]
if not all([fold.startswith('ses-') for fold in folder_list]):
cross_subj.append(sub)
else:
long_subj.append(sub)
(cross_subj, long_subj) = (cross_subj, long_subj)
</DeepExtract>
try:
<DeepExtract>
if len(cross_subj) > 0:
raise ClinicaInconsistentDatasetError(cross_subj)
</DeepExtract>
except ClinicaInconsistentDatasetError as e:
cprint(e, lvl='warning')
proposed_bids = join(dirname(bids_dir), basename(bids_dir) + '_clinica_compliant')
if not click.confirm(f'Do you want to proceed with the conversion in another folder? (Your original BIDS folder will not be modified and the folder {proposed_bids} will be created.)'):
click.echo('Clinica will now exit...')
sys.exit()
else:
cprint('Converting cross-sectional dataset into longitudinal...')
<DeepExtract>
from os import mkdir
from os.path import exists, isfile
from shutil import copy2, copytree
def add_ses(f):
"""
Use this function to transform a cross sectional filename into
a longitudinal one.
Examples:
sub-ADNI001_scans.tsv -> sub-ADNI001_ses-M000_scans.tsv
sub-023a_ses-M012_T1w.nii.gz -> sub-023a_ses-M012_T1w.nii.gz (no
modification done if filename already has a session)
Args:
f: filename
Returns:
filename with '_ses-M000_ added just after participant_id
"""
import re
m = re.search('(^sub-[a-zA-Z0-9]*)_(?!ses-[a-zA-Z0-9])(.*)', f)
try:
return m.group(1) + '_ses-M000_' + m.group(2)
except AttributeError:
return f
def copy2_add_ses(src, dst):
"""
copy2_add_ses calls copy2 function from shutil, but modifies
the filename of the copied files if they match the regex
template described in add_ses() function
Args:
src: path to the file that needs to be copied
dst: original destination for the copied file
Returns:
copy2 with modified filename
"""
from os.path import basename, dirname, join
from shutil import copy2
dst_modified = join(dirname(dst), add_ses(basename(src)))
return copy2(src, dst_modified)
if not exists(proposed_bids):
mkdir(proposed_bids)
for subj in cross_subj:
to_copy = [f for f in listdir(join(bids_dir, subj)) if not f.startswith('.')]
if not exists(join(proposed_bids, subj)):
mkdir(join(proposed_bids, subj))
if not exists(join(proposed_bids, subj, 'ses-M000')):
mkdir(join(proposed_bids, subj, 'ses-M000'))
for el in to_copy:
path_el = join(bids_dir, subj, el)
if not exists(join(proposed_bids, subj, 'ses-M000', el)):
if isdir(path_el):
copytree(path_el, join(proposed_bids, subj, 'ses-M000', basename(path_el)), copy_function=copy2_add_ses)
elif isfile(path_el):
new_filename_wo_ses = add_ses(el)
copy2(path_el, join(proposed_bids, subj, 'ses-M000', new_filename_wo_ses))
for su in long_subj:
to_copy = [f for f in listdir(join(bids_dir, su)) if not f.startswith('.')]
if not exists(join(proposed_bids, su)):
mkdir(join(proposed_bids, su))
for el in to_copy:
path_el = join(bids_dir, su, el)
if not exists(join(proposed_bids, su, el)):
if isdir(path_el):
copytree(path_el, join(proposed_bids, su, basename(path_el)))
elif isfile(path_el):
copy2(path_el, join(proposed_bids, su))
</DeepExtract>
cprint(f'Conversion succeeded. Your clinica-compliant dataset is located here: {proposed_bids}')
|
def check_not_cross_sectional(self):
"""
This function checks if the dataset is longitudinal. If it is cross
sectional, clinica proposes to convert it in a clinica compliant form.
author: Arnaud Marcoux
"""
import sys
from os import listdir
from os.path import abspath, basename, dirname, isdir, join
from clinica.utils.exceptions import ClinicaInconsistentDatasetError
from clinica.utils.stream import cprint
def _check_cross_subj(cross_subj: list) -> None:
if len(cross_subj) > 0:
raise ClinicaInconsistentDatasetError(cross_subj)
def convert_cross_sectional(bids_in, bids_out, cross_subjects, long_subjects):
"""
This function converts a cross-sectional-bids dataset into a
longitudinal clinica-compliant dataset
Args:
bids_in: cross sectional bids dataset you want to convert
bids_out: converted longitudinal bids dataset
cross_subjects: list of subjects in cross sectional form
(they need some adjustment)
long_subjects: list of subjects in longitudinal form (they
just need to be copied)
Returns:
nothing
"""
from os import mkdir
from os.path import exists, isfile
from shutil import copy2, copytree
def add_ses(f):
"""
Use this function to transform a cross sectional filename into
a longitudinal one.
Examples:
sub-ADNI001_scans.tsv -> sub-ADNI001_ses-M000_scans.tsv
sub-023a_ses-M012_T1w.nii.gz -> sub-023a_ses-M012_T1w.nii.gz (no
modification done if filename already has a session)
Args:
f: filename
Returns:
filename with '_ses-M000_ added just after participant_id
"""
import re
m = re.search('(^sub-[a-zA-Z0-9]*)_(?!ses-[a-zA-Z0-9])(.*)', f)
try:
return m.group(1) + '_ses-M000_' + m.group(2)
except AttributeError:
return f
def copy2_add_ses(src, dst):
"""
copy2_add_ses calls copy2 function from shutil, but modifies
the filename of the copied files if they match the regex
template described in add_ses() function
Args:
src: path to the file that needs to be copied
dst: original destination for the copied file
Returns:
copy2 with modified filename
"""
from os.path import basename, dirname, join
from shutil import copy2
dst_modified = join(dirname(dst), add_ses(basename(src)))
return copy2(src, dst_modified)
if not exists(bids_out):
mkdir(bids_out)
for subj in cross_subjects:
to_copy = [f for f in listdir(join(bids_in, subj)) if not f.startswith('.')]
if not exists(join(bids_out, subj)):
mkdir(join(bids_out, subj))
if not exists(join(bids_out, subj, 'ses-M000')):
mkdir(join(bids_out, subj, 'ses-M000'))
for el in to_copy:
path_el = join(bids_in, subj, el)
if not exists(join(bids_out, subj, 'ses-M000', el)):
if isdir(path_el):
copytree(path_el, join(bids_out, subj, 'ses-M000', basename(path_el)), copy_function=copy2_add_ses)
elif isfile(path_el):
import re
m = re.search('(^sub-[a-zA-Z0-9]*)_(?!ses-[a-zA-Z0-9])(.*)', el)
try:
new_filename_wo_ses = m.group(1) + '_ses-M000_' + m.group(2)
except AttributeError:
new_filename_wo_ses = el
copy2(path_el, join(bids_out, subj, 'ses-M000', new_filename_wo_ses))
for su in long_subjects:
to_copy = [f for f in listdir(join(bids_in, su)) if not f.startswith('.')]
if not exists(join(bids_out, su)):
mkdir(join(bids_out, su))
for el in to_copy:
path_el = join(bids_in, su, el)
if not exists(join(bids_out, su, el)):
if isdir(path_el):
copytree(path_el, join(bids_out, su, basename(path_el)))
elif isfile(path_el):
copy2(path_el, join(bids_out, su))
if self.bids_directory is not None:
bids_dir = abspath(self.bids_directory)
all_subs = [f for f in listdir(bids_dir) if isdir(join(bids_dir, f)) and f.startswith('sub-')]
from os import listdir
from os.path import isdir, join
cross_subj = []
long_subj = []
for sub in all_subs:
folder_list = [f for f in listdir(join(bids_dir, sub)) if isdir(join(bids_dir, sub, f))]
if not all([fold.startswith('ses-') for fold in folder_list]):
cross_subj.append(sub)
else:
long_subj.append(sub)
(cross_subj, long_subj) = (cross_subj, long_subj)
try:
if len(cross_subj) > 0:
raise ClinicaInconsistentDatasetError(cross_subj)
except ClinicaInconsistentDatasetError as e:
cprint(e, lvl='warning')
proposed_bids = join(dirname(bids_dir), basename(bids_dir) + '_clinica_compliant')
if not click.confirm(f'Do you want to proceed with the conversion in another folder? (Your original BIDS folder will not be modified and the folder {proposed_bids} will be created.)'):
click.echo('Clinica will now exit...')
sys.exit()
else:
cprint('Converting cross-sectional dataset into longitudinal...')
from os import mkdir
from os.path import exists, isfile
from shutil import copy2, copytree
def add_ses(f):
"""
Use this function to transform a cross sectional filename into
a longitudinal one.
Examples:
sub-ADNI001_scans.tsv -> sub-ADNI001_ses-M000_scans.tsv
sub-023a_ses-M012_T1w.nii.gz -> sub-023a_ses-M012_T1w.nii.gz (no
modification done if filename already has a session)
Args:
f: filename
Returns:
filename with '_ses-M000_ added just after participant_id
"""
import re
m = re.search('(^sub-[a-zA-Z0-9]*)_(?!ses-[a-zA-Z0-9])(.*)', f)
try:
return m.group(1) + '_ses-M000_' + m.group(2)
except AttributeError:
return f
def copy2_add_ses(src, dst):
"""
copy2_add_ses calls copy2 function from shutil, but modifies
the filename of the copied files if they match the regex
template described in add_ses() function
Args:
src: path to the file that needs to be copied
dst: original destination for the copied file
Returns:
copy2 with modified filename
"""
from os.path import basename, dirname, join
from shutil import copy2
dst_modified = join(dirname(dst), add_ses(basename(src)))
return copy2(src, dst_modified)
if not exists(proposed_bids):
mkdir(proposed_bids)
for subj in cross_subj:
to_copy = [f for f in listdir(join(bids_dir, subj)) if not f.startswith('.')]
if not exists(join(proposed_bids, subj)):
mkdir(join(proposed_bids, subj))
if not exists(join(proposed_bids, subj, 'ses-M000')):
mkdir(join(proposed_bids, subj, 'ses-M000'))
for el in to_copy:
path_el = join(bids_dir, subj, el)
if not exists(join(proposed_bids, subj, 'ses-M000', el)):
if isdir(path_el):
copytree(path_el, join(proposed_bids, subj, 'ses-M000', basename(path_el)), copy_function=copy2_add_ses)
elif isfile(path_el):
new_filename_wo_ses = add_ses(el)
copy2(path_el, join(proposed_bids, subj, 'ses-M000', new_filename_wo_ses))
for su in long_subj:
to_copy = [f for f in listdir(join(bids_dir, su)) if not f.startswith('.')]
if not exists(join(proposed_bids, su)):
mkdir(join(proposed_bids, su))
for el in to_copy:
path_el = join(bids_dir, su, el)
if not exists(join(proposed_bids, su, el)):
if isdir(path_el):
copytree(path_el, join(proposed_bids, su, basename(path_el)))
elif isfile(path_el):
copy2(path_el, join(proposed_bids, su))
cprint(f'Conversion succeeded. Your clinica-compliant dataset is located here: {proposed_bids}')
|
clinica
|
positive
|
def request(self, method, args):
try:
<DeepExtract>
self.server.send({'method': method, 'args': args}, nowait=True)
result = (yield self.server.receive())
mreturn(result)
</DeepExtract>
except MonadReturn as val:
return val
|
def request(self, method, args):
try:
self.server.send({'method': method, 'args': args}, nowait=True)
result = (yield self.server.receive())
mreturn(result)
except MonadReturn as val:
return val
|
cell
|
positive
|
def write(self, model, **kwargs):
"""General writing function responsible for calling the sub-functions.
:param model: DiTTo model
:type model: DiTTo model
:param verbose: Set verbose mode. Optional. Default=False
:type verbose: bool
:param write_taps: Write the transformer taps if they are provided. (This can cause some problems). Optional. Default=False
:type write_taps: bool
:returns: 1 for success, -1 for failure
:rtype: int
"""
if 'verbose' in kwargs and isinstance(kwargs['verbose'], bool):
self.verbose = kwargs['verbose']
else:
self.verbose = False
if 'write_wires' in kwargs and isinstance(kwargs['write_wires'], bool):
self.write_wires = True
else:
self.write_wires = False
with open(os.path.join(self.output_path, 'Model.glm'), 'w') as fp:
logger.info('Writing the Module...')
if self.verbose:
logger.debug('Writing the Module...')
fp.write('module powerflow{\n solver_method NR;\n NR_iteration_limit 50;\n};\n\n')
if self.verbose:
logger.debug('Succesful!')
logger.info('Writing the Nodes...')
if self.verbose:
logger.debug('Writing the Nodes...')
<DeepExtract>
for i in model.models:
if isinstance(i, Node):
fp.write('object node {\n')
if hasattr(i, 'name') and i.name is not None:
fp.write(' name n{name};\n'.format(name=i.name))
if i.name == sourcebus:
fp.write(' bustype SWING;\n')
if hasattr(i, 'phases') and i.phases is not None and (len(i.phases) > 0):
fp.write(' phases ')
for phase in i.phases:
fp.write(phase.default_value)
fp.write('N;\n')
if hasattr(i, 'nominal_voltage') and i.nominal_voltage is not None:
fp.write(' nominal_voltage {nv};\n'.format(nv=i.nominal_voltage))
else:
fp.write(' nominal_voltage 12470;\n')
fp.write('};\n\n')
_ = 1
</DeepExtract>
if self.verbose:
logger.debug('Succesful!')
logger.info('Writing the Capacitors...')
if self.verbose:
logger.debug('Writing the Capacitors...')
<DeepExtract>
for i in model.models:
if isinstance(i, Capacitor):
fp.write('object capacitor {\n')
if hasattr(i, 'name') and i.name is not None:
fp.write(' name n{name};\n'.format(name=i.name))
if hasattr(i, 'nominal_voltage') and i.nominal_voltage is not None:
fp.write(' nominal_voltage {nv};\n'.format(nv=i.nominal_voltage))
if hasattr(i, 'delay') and i.delay is not None:
fp.write(' time_dela {td};\n'.format(td=i.delay))
if hasattr(i, 'mode') and i.mode is not None:
fp.write(' control {mode};\n'.format(mode=i.mode))
if hasattr(i, 'low') and i.low is not None:
fp.write(' voltage_set_low {low};\n'.format(low=i.low))
if hasattr(i, 'high') and i.low is not None:
fp.write(' voltage_set_high {high};\n'.format(high=i.high))
if hasattr(i, 'pt_phase') and i.pt_phase is not None:
fp.write(' pt_phase {pt};\n'.format(pt=i.pt_phase))
if hasattr(i, 'connecting_element') and i.connecting_element is not None:
fp.write(' parent n{ce};\n'.format(ce=i.connecting_element))
if hasattr(i, 'phase_capacitors') and i.phase_capacitors is not None:
phases = ''
for j in i.phase_capacitors:
if hasattr(j, 'phase') and j.phase is not None:
phases = phases + j.phase
logger.debug(j.var)
if hasattr(j, 'var') and j.var is not None:
fp.write(' capacitor_{phase} {var};\n'.format(phase=j.phase, var=j.var / 1000000.0))
if hasattr(j, 'switch') and j.var is not None:
if j.switch == 1:
fp.write(' switch' + j.phase + ' OPEN;\n')
else:
fp.write(' switch' + j.phase + ' CLOSED;\n')
if phases != '':
fp.write(' phases {ps};\n'.format(ps=phases))
else:
logger.debug('Warning - No phases provided for the Capacitor. No vars will be supplied')
fp.write('};\n\n')
</DeepExtract>
if self.verbose:
logger.debug('Succesful!')
logger.info('Writing the Loads...')
if self.verbose:
logger.debug('Writing the Loads...')
<DeepExtract>
for i in model.models:
if isinstance(i, Load):
fp.write('object load {\n')
if hasattr(i, 'name') and i.name is not None:
fp.write(' name n{name};\n'.format(name=i.name))
if hasattr(i, 'nominal_voltage') and i.nominal_voltage is not None:
fp.write(' nominal_voltage {nv};\n'.format(nv=i.nominal_voltage))
if hasattr(i, 'connecting_element') and i.connecting_element is not None:
fp.write(' parent n{ce};\n'.format(ce=i.connecting_element))
if hasattr(i, 'phase_loads') and i.phase_loads is not None:
phases = ''
for j in i.phase_loads:
if hasattr(j, 'phase') and j.phase is not None:
phases = phases + j.phase
if hasattr(j, 'use_zip') and j.use_zip is not None:
if j.use_zip == 1:
fp.write(' current_fraction_{phase} {cf};\n'.format(phase=j.phase, cf=j.ppercentcurrent + j.qpercentcurrent))
fp.write(' current_pf_{phase} {cpf};\n'.format(phase=j.phase, cpf=j.ppercentcurrent / (j.ppercentcurrent + j.qpercentcurrent)))
fp.write(' power_fraction_{phase} {pf};\n'.format(phase=j.phase, pf=j.ppercentpower + j.qpercentpower))
fp.write(' power_pf_{phase} {ppf};\n'.format(phase=j.phase, ppf=j.ppercentpower / (j.ppercentpower + j.qpercentpower)))
fp.write(' impedance_fraction_{phase} {iff};\n'.format(phase=j.phase, iff=j.ppercentimpedance + j.qpercentimpedance))
fp.write(' impedance_pf_{phase} {ipf};\n'.format(phase=j.phase, ipf=j.ppercentimpedance / (j.ppercentimpedance + j.qpercentimpedance)))
fp.write(' base_power_{phase} {bp};\n'.format(phase=j.phase, bp=complex(j.p, j.q)))
elif hasattr(j, 'p') and j.p is not None and hasattr(j, 'q') and (j.q is not None) and hasattr(j, 'phase') and (j.phase is not None):
fp.write(' constant_power_{phase} {cp};\n'.format(phase=j.phase, cp=str(complex(j.p, j.q)).strip('()')))
fp.write('};\n\n')
</DeepExtract>
if self.verbose:
logger.debug('Succesful!')
logger.info('Writing the Transformer Configurations...')
if self.verbose:
logger.debug('Writing the Transformer Configurations...')
<DeepExtract>
configuration_count = 1
for i in model.models:
if isinstance(i, PowerTransformer):
dic = {}
if hasattr(i, 'install_type') and i.install_type is not None:
dic['install_type'] = i.install_type
if hasattr(i, 'noload_loss') and i.noload_loss is not None:
dic['no_load_loss'] = i.noload_loss
n_windings = 0
if hasattr(i, 'windings') and i.windings is not None and (len(i.windings) > 1):
winding1 = i.windings[0]
winding2 = i.windings[1]
logger.debug(winding1.nominal_voltage, winding2.nominal_voltage)
if len(i.windings) == 3:
dic['connect_type'] = 'SINGLE_PHASE_CENTER_TAPPED'
elif hasattr(winding1, 'connection_type') and winding1.connection_type is not None and hasattr(winding2, 'connection_type') and (winding2.connection_type is not None):
conn_type = ''
if winding1.connection_type == 'Y':
conn_type = 'WYE_'
elif winding1.connection_type == 'D':
conn_type = 'DELTA_'
else:
conn_type = 'ERR'
if winding2.connection_type == 'Y':
if winding1.connection_type == 'D':
conn_type = conn_type + 'GWYE'
else:
conn_type = conn_type + 'WYE'
elif winding2.connection_type == 'D':
conn_type = conn_type + 'DELTA'
else:
conn_type = conn_type + 'ERR'
if conn_type[:3] != 'ERR' and conn_type[-3:] != 'ERR':
dic['connect_type'] = conn_type
if hasattr(winding1, 'nominal_voltage') and winding1.nominal_voltage is not None:
if hasattr(winding1, 'voltage_type') and winding1.voltage_type == 2:
dic['secondary_voltage'] = winding1.nominal_voltage
else:
dic['primary_voltage'] = winding1.nominal_voltage
if hasattr(winding2, 'nominal_voltage') and winding2.nominal_voltage is not None:
if hasattr(winding2, 'voltage_type') and winding2.voltage_type == 0:
dic['primary_voltage'] = winding2.nominal_voltage
else:
dic['secondary_voltage'] = winding2.nominal_voltage
if hasattr(winding1, 'rated_power') and winding1.rated_power is not None:
dic['power_rating'] = winding1.rated_power / 1000.0
n_windings = len(i.windings)
else:
logger.debug('Warning - No windings included in the transformer')
if hasattr(i, 'reactances') and i.reactances is not None:
if len(i.reactances) == 1 and n_windings == 2:
dic['reactance'] = i.reactances[0]
dic['resistance'] = i.windings[0].resistance + i.windings[1].resistance
if len(i.reactances) == 3 and n_windings == 3:
resistance = i.windings[0].resistance * 2
dic['resistance'] = resistance
dic['reactance'] = i.reactance[0]
dic['impedance1'] = complex(resistance, i.reactance[1])
dic['impedance2'] = complex(resistance, i.reactance[2])
dic_set = set()
for (a, b) in dic.items():
dic_set.add((a, b))
dic_set = frozenset(dic_set)
if dic_set in self.transformer_configurations:
logger.debug(i.name)
self.transformer_configurations_name[i.name] = self.transformer_configurations[dic_set]
continue
self.transformer_configurations[dic_set] = 'transformer_config_{num}'.format(num=configuration_count)
dic['name'] = 'transformer_config_{num}'.format(num=configuration_count)
self.transformer_configurations_name[i.name] = 'transformer_config_{num}'.format(num=configuration_count)
fp.write('object transformer_configuration {\n')
for j in dic:
fp.write(' {key} {value};\n'.format(key=j, value=dic[j]))
fp.write('};\n\n')
configuration_count = configuration_count + 1
</DeepExtract>
if self.verbose:
logger.debug('Succesful!')
logger.info('Writing the Transformers...')
if self.verbose:
logger.debug('Writing the Transformers...')
<DeepExtract>
for i in model.models:
if isinstance(i, PowerTransformer):
is_reg = False
for j in model.models:
if isinstance(j, Regulator) and j.name == i.name:
is_reg = True
break
if is_reg:
continue
fp.write('object transformer{\n')
if hasattr(i, 'name') and i.name is not None:
fp.write(' name n{name};\n'.format(name=i.name))
if hasattr(i, 'from_element') and i.from_element is not None:
fp.write(' from n{fn};\n'.format(fn=i.from_element))
if hasattr(i, 'to_element') and i.to_element is not None:
fp.write(' to n{tn};\n'.format(tn=i.to_element))
phase_set = set()
if hasattr(i, 'windings') and i.windings is not None:
for w in i.windings:
if hasattr(w, 'phase_windings') and w.phase_windings is not None:
for pw in w.phase_windings:
if hasattr(pw, 'phase') and pw.phase is not None:
phase_set.add(pw.phase)
phase_set = sorted(list(phase_set))
phases = ''
for p in phase_set:
phases = phases + p
if phases != '':
fp.write(' phases {pw};\n'.format(pw=phases))
if hasattr(i, 'name') and i.name is not None and (i.name in self.transformer_configurations_name):
fp.write(' configuration {config};\n'.format(config=self.transformer_configurations_name[i.name]))
fp.write('};\n\n')
</DeepExtract>
if self.verbose:
logger.debug('Succesful!')
logger.info('Writing the Regulator Configurations...')
if self.verbose:
logger.debug('Writing the Regulator Configurations...')
<DeepExtract>
configuration_count = 1
for i in model.models:
if isinstance(i, Regulator):
dic = {}
if hasattr(i, 'delay') and i.delay is not None:
dic['time_delay'] = i.delay
if hasattr(i, 'highstep') and i.highstep is not None:
dic['raise_taps'] = i.highstep
if hasattr(i, 'lowstep') and i.lowstep is not None:
dic['lower_taps'] = i.lowstep
elif hasattr(i, 'highstep') and i.highstep is not None:
dic['lower_taps'] = i.highstep
if hasattr(i, 'pt_ratio') and i.pt_ratio is not None:
dic['power_transducer_ratio'] = i.pt_ratio
if hasattr(i, 'ct_ratio') and i.ct_ratio is not None:
dic['current_transducer_ratio'] = i.ct_ratio
if hasattr(i, 'bandwidth') and i.bandwidth is not None:
dic['band_width'] = i.bandwidth
if hasattr(i, 'bandcenter') and i.bandcenter is not None:
dic['band_center'] = i.bandcenter
if hasattr(i, 'pt_phase') and i.pt_phase is not None:
dic['pt_phase'] = i.pt_phase
dic['connect_type'] = 'WYE_WYE'
dic_set = set()
for (a, b) in dic.items():
dic_set.add((a, b))
dic_set = frozenset(dic_set)
if dic_set not in self.regulator_configurations:
self.regulator_phases[dic_set] = {}
if hasattr(i, 'connected_transformer') and i.connected_transformer is not None:
for j in model.models:
if isinstance(j, PowerTransformer) and j.name == i.connected_transformer:
if hasattr(j, 'windings') and j.windings is not None:
for w in j.windings:
if hasattr(w, 'phase_windings') and w.phase_windings is not None:
for pw in w.phase_windings:
if hasattr(pw, 'phase') and pw.phase is not None:
if hasattr(pw, 'tap_position'):
self.regulator_phases[dic_set]['tap_pos_{phase}'.format(phase=pw.phase)] = pw.tap_position
if hasattr(pw, 'compensator_r') and pw.compensator_r is not None:
self.regulator_phases[dic_set]['compensator_r_setting_{phase}'.format(phase=pw.phase)] = pw.compensator_r
if hasattr(pw, 'compensator_x') and pw.compensator_x is not None:
self.regulator_phases[dic_set]['compensator_r_setting_{phase}'.format(phase=pw.phase)] = pw.compensator_r
elif hasattr(i, 'windings') and i.windings is not None:
for w in i.windings:
if hasattr(w, 'phase_windings') and w.phase_windings is not None:
for pw in w.phase_windings:
if hasattr(pw, 'phase') and pw.phase is not None:
if hasattr(pw, 'tap_position'):
self.regulator_phases[dic_set]['tap_pos_{phase}'.format(phase=pw.phase)] = pw.tap_position
if hasattr(pw, 'compensator_r') and pw.compensator_r is not None:
self.regulator_phases[dic_set]['compensator_r_setting_{phase}'.format(phase=pw.phase)] = pw.compensator_r
if hasattr(pw, 'compensator_x') and pw.compensator_x is not None:
self.regulator_phases[dic_set]['compensator_r_setting_{phase}'.format(phase=pw.phase)] = pw.compensator_r
if dic_set in self.regulator_configurations:
logger.debug(i.name)
self.regulator_configurations_name[i.name] = self.regulator_configurations[dic_set]
continue
self.regulator_configurations[dic_set] = 'regulator_config_{num}'.format(num=configuration_count)
dic['name'] = 'regulator_config_{num}'.format(num=configuration_count)
self.regulator_configurations_name[i.name] = 'regulator_config_{num}'.format(num=configuration_count)
configuration_count = configuration_count + 1
for dic in self.regulator_configurations:
fp.write('object regulator_configuration {\n')
fp.write(' name {n};\n'.format(n=self.regulator_configurations[dic]))
for j in dic:
fp.write(' {key} {value};\n'.format(key=j[0], value=j[1]))
for j in self.regulator_phases[dic]:
logger.debug(j)
fp.write(' {key} {value};\n'.format(key=j, value=self.regulator_phases[dic][j]))
fp.write('};\n\n')
</DeepExtract>
if self.verbose:
logger.debug('Succesful!')
logger.info('Writing the Regulators...')
if self.verbose:
logger.debug('Writing the Regulators...')
<DeepExtract>
for i in model.models:
if isinstance(i, Regulator):
if hasattr(i, 'from_element') and i.from_element is not None and hasattr(i, 'to_element') and (i.to_element is not None):
if i.from_element + '_' + i.to_element in self.regulator_seen:
continue
self.regulator_seen.add(i.from_element + '_' + i.to_element)
fp.write('object regulator{\n')
if hasattr(i, 'name') and i.name is not None:
fp.write(' name n{name};\n'.format(name=i.name))
if hasattr(i, 'from_element') and i.from_element is not None:
fp.write(' from n{fn};\n'.format(fn=i.from_element))
if hasattr(i, 'to_element') and i.to_element is not None:
fp.write(' to n{tn};\n'.format(tn=i.to_element))
phases = ''
if hasattr(i, 'connected_transformer') and i.connected_transformer is not None:
for j in model.models:
if isinstance(j, PowerTransformer) and j.name == i.connected_transformer:
if hasattr(j, 'windings') and j.windings is not None:
for w in j.windings:
if hasattr(w, 'phase_windings') and w.phase_windings is not None:
for pw in w.phase_windings:
if hasattr(pw, 'phase') and pw.phase is not None:
phases = phases + pw.phase
elif hasattr(i, 'windings') and i.windings is not None:
for w in i.windings:
if hasattr(w, 'phase_windings') and w.phase_windings is not None:
for pw in w.phase_windings:
if hasattr(pw, 'phase') and pw.phase is not None:
phases = phases + pw.phase
if hasattr(i, 'name') and i.name is not None and (i.name in self.regulator_configurations_name):
fp.write(' configuration {config};\n'.format(config=self.regulator_configurations_name[i.name]))
fp.write('};\n\n')
</DeepExtract>
if self.verbose:
logger.debug('Succesful!')
logger.info('Writing the Line Configurations...')
if self.verbose:
logger.debug('Writing the Line Configurations...')
<DeepExtract>
configuration_count = 1
if self.write_wires:
pass
else:
for i in model.models:
if isinstance(i, Line):
if hasattr(i, 'is_switch') and i.is_switch == 1 or (hasattr(i, 'is_fuse') and i.is_fuse == 1):
continue
dic = {}
phase_map = {'A': 1, 'B': 2, 'C': 3, '1': 1, '2': 2}
phases = []
if hasattr(i, 'wires') and i.wires is not None:
for w in i.wires:
if hasattr(w, 'phase') and w.phase is not None and (w.phase != 'N'):
phases.append(w.phase)
phases.sort()
if hasattr(i, 'impedance_matrix') and i.impedance_matrix is not None:
lc = i.impedance_matrix
if len(phases) != len(lc):
logger.debug('Warning - impedance matrix size different from number of phases for line {ln}'.format(ln=i.name))
logger.debug(i.name, i.from_element, i.to_element)
logger.debug(phases)
logger.debug(lc)
for j_cnt in range(len(phases)):
for k_cnt in range(len(phases)):
j_val = phases[j_cnt]
k_val = phases[k_cnt]
j = phase_map[j_val] - 1
k = phase_map[k_val] - 1
if len(lc) < 3:
j = j_cnt
k = k_cnt
impedance = str(lc[j][k]).strip('()')
pattern = re.compile('[^e]-')
if '+' not in impedance and (not len(pattern.findall(impedance)) > 0):
impedance = '0+' + impedance
dic['z{one}{two}'.format(one=phase_map[j_val], two=phase_map[k_val])] = impedance
dic_set = set()
for (a, b) in dic.items():
dic_set.add((a, b))
dic_set = frozenset(dic_set)
if dic_set in self.line_configurations:
self.line_configurations_name[i.name] = self.line_configurations[dic_set]
continue
self.line_configurations[dic_set] = 'line_config_{num}'.format(num=configuration_count)
dic['name'] = 'line_config_{num}'.format(num=configuration_count)
self.line_configurations_name[i.name] = 'line_config_{num}'.format(num=configuration_count)
fp.write('object line_configuration {\n')
for j in dic:
fp.write(' {key} {value};\n'.format(key=j, value=dic[j]))
fp.write('};\n\n')
configuration_count = configuration_count + 1
</DeepExtract>
if self.verbose:
logger.debug('Succesful!')
logger.info('Writing the Lines...')
if self.verbose:
logger.debug('Writing the Lines...')
<DeepExtract>
for i in model.models:
if isinstance(i, Line):
if hasattr(i, 'line_type') and i.line_type is not None and (i.line_type == 'underground'):
fp.write('object underground_line{\n')
if hasattr(i, 'length') and i.length is not None:
fp.write(' length {len};\n'.format(len=i.length * 3.28084))
if hasattr(i, 'name') and i.name is not None and (i.name in self.line_configurations_name):
fp.write(' configuration {config};\n'.format(config=self.line_configurations_name[i.name]))
elif hasattr(i, 'is_fuse') and i.is_fuse is not None and (i.is_fuse == 1):
fp.write('object fuse{\n')
elif hasattr(i, 'is_switch') and i.is_switch is not None and (i.is_switch == 1):
fp.write('object switch{\n')
elif hasattr(i, 'line_type') and i.line_type is not None:
fp.write('object overhead_line{\n')
if hasattr(i, 'length') and i.length is not None:
fp.write(' length {len};\n'.format(len=i.length * 3.28084))
if hasattr(i, 'name') and i.name is not None and (i.name in self.line_configurations_name):
fp.write(' configuration {config};\n'.format(config=self.line_configurations_name[i.name]))
else:
fp.write('object overhead_line{\n')
if hasattr(i, 'length') and i.length is not None:
fp.write(' length {len};\n'.format(len=i.length * 3.28084))
if hasattr(i, 'name') and i.name is not None and (i.name in self.line_configurations_name):
fp.write(' configuration {config};\n'.format(config=self.line_configurations_name[i.name]))
if hasattr(i, 'name') and i.name is not None:
fp.write(' name n{name};\n'.format(name=i.name))
if hasattr(i, 'from_element') and i.from_element is not None:
fp.write(' from n{fn};\n'.format(fn=i.from_element))
if hasattr(i, 'to_element') and i.to_element is not None:
fp.write(' to n{tn};\n'.format(tn=i.to_element))
phases = ''
if hasattr(i, 'wires') and i.wires is not None:
for w in i.wires:
if hasattr(w, 'phase') and w.phase is not None:
phases = phases + w.phase
if phases != '':
fp.write(' phases {ph};\n'.format(ph=phases))
fp.write('};\n\n')
</DeepExtract>
if self.verbose:
logger.debug('Succesful!')
|
def write(self, model, **kwargs):
"""General writing function responsible for calling the sub-functions.
:param model: DiTTo model
:type model: DiTTo model
:param verbose: Set verbose mode. Optional. Default=False
:type verbose: bool
:param write_taps: Write the transformer taps if they are provided. (This can cause some problems). Optional. Default=False
:type write_taps: bool
:returns: 1 for success, -1 for failure
:rtype: int
"""
if 'verbose' in kwargs and isinstance(kwargs['verbose'], bool):
self.verbose = kwargs['verbose']
else:
self.verbose = False
if 'write_wires' in kwargs and isinstance(kwargs['write_wires'], bool):
self.write_wires = True
else:
self.write_wires = False
with open(os.path.join(self.output_path, 'Model.glm'), 'w') as fp:
logger.info('Writing the Module...')
if self.verbose:
logger.debug('Writing the Module...')
fp.write('module powerflow{\n solver_method NR;\n NR_iteration_limit 50;\n};\n\n')
if self.verbose:
logger.debug('Succesful!')
logger.info('Writing the Nodes...')
if self.verbose:
logger.debug('Writing the Nodes...')
for i in model.models:
if isinstance(i, Node):
fp.write('object node {\n')
if hasattr(i, 'name') and i.name is not None:
fp.write(' name n{name};\n'.format(name=i.name))
if i.name == sourcebus:
fp.write(' bustype SWING;\n')
if hasattr(i, 'phases') and i.phases is not None and (len(i.phases) > 0):
fp.write(' phases ')
for phase in i.phases:
fp.write(phase.default_value)
fp.write('N;\n')
if hasattr(i, 'nominal_voltage') and i.nominal_voltage is not None:
fp.write(' nominal_voltage {nv};\n'.format(nv=i.nominal_voltage))
else:
fp.write(' nominal_voltage 12470;\n')
fp.write('};\n\n')
_ = 1
if self.verbose:
logger.debug('Succesful!')
logger.info('Writing the Capacitors...')
if self.verbose:
logger.debug('Writing the Capacitors...')
for i in model.models:
if isinstance(i, Capacitor):
fp.write('object capacitor {\n')
if hasattr(i, 'name') and i.name is not None:
fp.write(' name n{name};\n'.format(name=i.name))
if hasattr(i, 'nominal_voltage') and i.nominal_voltage is not None:
fp.write(' nominal_voltage {nv};\n'.format(nv=i.nominal_voltage))
if hasattr(i, 'delay') and i.delay is not None:
fp.write(' time_dela {td};\n'.format(td=i.delay))
if hasattr(i, 'mode') and i.mode is not None:
fp.write(' control {mode};\n'.format(mode=i.mode))
if hasattr(i, 'low') and i.low is not None:
fp.write(' voltage_set_low {low};\n'.format(low=i.low))
if hasattr(i, 'high') and i.low is not None:
fp.write(' voltage_set_high {high};\n'.format(high=i.high))
if hasattr(i, 'pt_phase') and i.pt_phase is not None:
fp.write(' pt_phase {pt};\n'.format(pt=i.pt_phase))
if hasattr(i, 'connecting_element') and i.connecting_element is not None:
fp.write(' parent n{ce};\n'.format(ce=i.connecting_element))
if hasattr(i, 'phase_capacitors') and i.phase_capacitors is not None:
phases = ''
for j in i.phase_capacitors:
if hasattr(j, 'phase') and j.phase is not None:
phases = phases + j.phase
logger.debug(j.var)
if hasattr(j, 'var') and j.var is not None:
fp.write(' capacitor_{phase} {var};\n'.format(phase=j.phase, var=j.var / 1000000.0))
if hasattr(j, 'switch') and j.var is not None:
if j.switch == 1:
fp.write(' switch' + j.phase + ' OPEN;\n')
else:
fp.write(' switch' + j.phase + ' CLOSED;\n')
if phases != '':
fp.write(' phases {ps};\n'.format(ps=phases))
else:
logger.debug('Warning - No phases provided for the Capacitor. No vars will be supplied')
fp.write('};\n\n')
if self.verbose:
logger.debug('Succesful!')
logger.info('Writing the Loads...')
if self.verbose:
logger.debug('Writing the Loads...')
for i in model.models:
if isinstance(i, Load):
fp.write('object load {\n')
if hasattr(i, 'name') and i.name is not None:
fp.write(' name n{name};\n'.format(name=i.name))
if hasattr(i, 'nominal_voltage') and i.nominal_voltage is not None:
fp.write(' nominal_voltage {nv};\n'.format(nv=i.nominal_voltage))
if hasattr(i, 'connecting_element') and i.connecting_element is not None:
fp.write(' parent n{ce};\n'.format(ce=i.connecting_element))
if hasattr(i, 'phase_loads') and i.phase_loads is not None:
phases = ''
for j in i.phase_loads:
if hasattr(j, 'phase') and j.phase is not None:
phases = phases + j.phase
if hasattr(j, 'use_zip') and j.use_zip is not None:
if j.use_zip == 1:
fp.write(' current_fraction_{phase} {cf};\n'.format(phase=j.phase, cf=j.ppercentcurrent + j.qpercentcurrent))
fp.write(' current_pf_{phase} {cpf};\n'.format(phase=j.phase, cpf=j.ppercentcurrent / (j.ppercentcurrent + j.qpercentcurrent)))
fp.write(' power_fraction_{phase} {pf};\n'.format(phase=j.phase, pf=j.ppercentpower + j.qpercentpower))
fp.write(' power_pf_{phase} {ppf};\n'.format(phase=j.phase, ppf=j.ppercentpower / (j.ppercentpower + j.qpercentpower)))
fp.write(' impedance_fraction_{phase} {iff};\n'.format(phase=j.phase, iff=j.ppercentimpedance + j.qpercentimpedance))
fp.write(' impedance_pf_{phase} {ipf};\n'.format(phase=j.phase, ipf=j.ppercentimpedance / (j.ppercentimpedance + j.qpercentimpedance)))
fp.write(' base_power_{phase} {bp};\n'.format(phase=j.phase, bp=complex(j.p, j.q)))
elif hasattr(j, 'p') and j.p is not None and hasattr(j, 'q') and (j.q is not None) and hasattr(j, 'phase') and (j.phase is not None):
fp.write(' constant_power_{phase} {cp};\n'.format(phase=j.phase, cp=str(complex(j.p, j.q)).strip('()')))
fp.write('};\n\n')
if self.verbose:
logger.debug('Succesful!')
logger.info('Writing the Transformer Configurations...')
if self.verbose:
logger.debug('Writing the Transformer Configurations...')
configuration_count = 1
for i in model.models:
if isinstance(i, PowerTransformer):
dic = {}
if hasattr(i, 'install_type') and i.install_type is not None:
dic['install_type'] = i.install_type
if hasattr(i, 'noload_loss') and i.noload_loss is not None:
dic['no_load_loss'] = i.noload_loss
n_windings = 0
if hasattr(i, 'windings') and i.windings is not None and (len(i.windings) > 1):
winding1 = i.windings[0]
winding2 = i.windings[1]
logger.debug(winding1.nominal_voltage, winding2.nominal_voltage)
if len(i.windings) == 3:
dic['connect_type'] = 'SINGLE_PHASE_CENTER_TAPPED'
elif hasattr(winding1, 'connection_type') and winding1.connection_type is not None and hasattr(winding2, 'connection_type') and (winding2.connection_type is not None):
conn_type = ''
if winding1.connection_type == 'Y':
conn_type = 'WYE_'
elif winding1.connection_type == 'D':
conn_type = 'DELTA_'
else:
conn_type = 'ERR'
if winding2.connection_type == 'Y':
if winding1.connection_type == 'D':
conn_type = conn_type + 'GWYE'
else:
conn_type = conn_type + 'WYE'
elif winding2.connection_type == 'D':
conn_type = conn_type + 'DELTA'
else:
conn_type = conn_type + 'ERR'
if conn_type[:3] != 'ERR' and conn_type[-3:] != 'ERR':
dic['connect_type'] = conn_type
if hasattr(winding1, 'nominal_voltage') and winding1.nominal_voltage is not None:
if hasattr(winding1, 'voltage_type') and winding1.voltage_type == 2:
dic['secondary_voltage'] = winding1.nominal_voltage
else:
dic['primary_voltage'] = winding1.nominal_voltage
if hasattr(winding2, 'nominal_voltage') and winding2.nominal_voltage is not None:
if hasattr(winding2, 'voltage_type') and winding2.voltage_type == 0:
dic['primary_voltage'] = winding2.nominal_voltage
else:
dic['secondary_voltage'] = winding2.nominal_voltage
if hasattr(winding1, 'rated_power') and winding1.rated_power is not None:
dic['power_rating'] = winding1.rated_power / 1000.0
n_windings = len(i.windings)
else:
logger.debug('Warning - No windings included in the transformer')
if hasattr(i, 'reactances') and i.reactances is not None:
if len(i.reactances) == 1 and n_windings == 2:
dic['reactance'] = i.reactances[0]
dic['resistance'] = i.windings[0].resistance + i.windings[1].resistance
if len(i.reactances) == 3 and n_windings == 3:
resistance = i.windings[0].resistance * 2
dic['resistance'] = resistance
dic['reactance'] = i.reactance[0]
dic['impedance1'] = complex(resistance, i.reactance[1])
dic['impedance2'] = complex(resistance, i.reactance[2])
dic_set = set()
for (a, b) in dic.items():
dic_set.add((a, b))
dic_set = frozenset(dic_set)
if dic_set in self.transformer_configurations:
logger.debug(i.name)
self.transformer_configurations_name[i.name] = self.transformer_configurations[dic_set]
continue
self.transformer_configurations[dic_set] = 'transformer_config_{num}'.format(num=configuration_count)
dic['name'] = 'transformer_config_{num}'.format(num=configuration_count)
self.transformer_configurations_name[i.name] = 'transformer_config_{num}'.format(num=configuration_count)
fp.write('object transformer_configuration {\n')
for j in dic:
fp.write(' {key} {value};\n'.format(key=j, value=dic[j]))
fp.write('};\n\n')
configuration_count = configuration_count + 1
if self.verbose:
logger.debug('Succesful!')
logger.info('Writing the Transformers...')
if self.verbose:
logger.debug('Writing the Transformers...')
for i in model.models:
if isinstance(i, PowerTransformer):
is_reg = False
for j in model.models:
if isinstance(j, Regulator) and j.name == i.name:
is_reg = True
break
if is_reg:
continue
fp.write('object transformer{\n')
if hasattr(i, 'name') and i.name is not None:
fp.write(' name n{name};\n'.format(name=i.name))
if hasattr(i, 'from_element') and i.from_element is not None:
fp.write(' from n{fn};\n'.format(fn=i.from_element))
if hasattr(i, 'to_element') and i.to_element is not None:
fp.write(' to n{tn};\n'.format(tn=i.to_element))
phase_set = set()
if hasattr(i, 'windings') and i.windings is not None:
for w in i.windings:
if hasattr(w, 'phase_windings') and w.phase_windings is not None:
for pw in w.phase_windings:
if hasattr(pw, 'phase') and pw.phase is not None:
phase_set.add(pw.phase)
phase_set = sorted(list(phase_set))
phases = ''
for p in phase_set:
phases = phases + p
if phases != '':
fp.write(' phases {pw};\n'.format(pw=phases))
if hasattr(i, 'name') and i.name is not None and (i.name in self.transformer_configurations_name):
fp.write(' configuration {config};\n'.format(config=self.transformer_configurations_name[i.name]))
fp.write('};\n\n')
if self.verbose:
logger.debug('Succesful!')
logger.info('Writing the Regulator Configurations...')
if self.verbose:
logger.debug('Writing the Regulator Configurations...')
configuration_count = 1
for i in model.models:
if isinstance(i, Regulator):
dic = {}
if hasattr(i, 'delay') and i.delay is not None:
dic['time_delay'] = i.delay
if hasattr(i, 'highstep') and i.highstep is not None:
dic['raise_taps'] = i.highstep
if hasattr(i, 'lowstep') and i.lowstep is not None:
dic['lower_taps'] = i.lowstep
elif hasattr(i, 'highstep') and i.highstep is not None:
dic['lower_taps'] = i.highstep
if hasattr(i, 'pt_ratio') and i.pt_ratio is not None:
dic['power_transducer_ratio'] = i.pt_ratio
if hasattr(i, 'ct_ratio') and i.ct_ratio is not None:
dic['current_transducer_ratio'] = i.ct_ratio
if hasattr(i, 'bandwidth') and i.bandwidth is not None:
dic['band_width'] = i.bandwidth
if hasattr(i, 'bandcenter') and i.bandcenter is not None:
dic['band_center'] = i.bandcenter
if hasattr(i, 'pt_phase') and i.pt_phase is not None:
dic['pt_phase'] = i.pt_phase
dic['connect_type'] = 'WYE_WYE'
dic_set = set()
for (a, b) in dic.items():
dic_set.add((a, b))
dic_set = frozenset(dic_set)
if dic_set not in self.regulator_configurations:
self.regulator_phases[dic_set] = {}
if hasattr(i, 'connected_transformer') and i.connected_transformer is not None:
for j in model.models:
if isinstance(j, PowerTransformer) and j.name == i.connected_transformer:
if hasattr(j, 'windings') and j.windings is not None:
for w in j.windings:
if hasattr(w, 'phase_windings') and w.phase_windings is not None:
for pw in w.phase_windings:
if hasattr(pw, 'phase') and pw.phase is not None:
if hasattr(pw, 'tap_position'):
self.regulator_phases[dic_set]['tap_pos_{phase}'.format(phase=pw.phase)] = pw.tap_position
if hasattr(pw, 'compensator_r') and pw.compensator_r is not None:
self.regulator_phases[dic_set]['compensator_r_setting_{phase}'.format(phase=pw.phase)] = pw.compensator_r
if hasattr(pw, 'compensator_x') and pw.compensator_x is not None:
self.regulator_phases[dic_set]['compensator_r_setting_{phase}'.format(phase=pw.phase)] = pw.compensator_r
elif hasattr(i, 'windings') and i.windings is not None:
for w in i.windings:
if hasattr(w, 'phase_windings') and w.phase_windings is not None:
for pw in w.phase_windings:
if hasattr(pw, 'phase') and pw.phase is not None:
if hasattr(pw, 'tap_position'):
self.regulator_phases[dic_set]['tap_pos_{phase}'.format(phase=pw.phase)] = pw.tap_position
if hasattr(pw, 'compensator_r') and pw.compensator_r is not None:
self.regulator_phases[dic_set]['compensator_r_setting_{phase}'.format(phase=pw.phase)] = pw.compensator_r
if hasattr(pw, 'compensator_x') and pw.compensator_x is not None:
self.regulator_phases[dic_set]['compensator_r_setting_{phase}'.format(phase=pw.phase)] = pw.compensator_r
if dic_set in self.regulator_configurations:
logger.debug(i.name)
self.regulator_configurations_name[i.name] = self.regulator_configurations[dic_set]
continue
self.regulator_configurations[dic_set] = 'regulator_config_{num}'.format(num=configuration_count)
dic['name'] = 'regulator_config_{num}'.format(num=configuration_count)
self.regulator_configurations_name[i.name] = 'regulator_config_{num}'.format(num=configuration_count)
configuration_count = configuration_count + 1
for dic in self.regulator_configurations:
fp.write('object regulator_configuration {\n')
fp.write(' name {n};\n'.format(n=self.regulator_configurations[dic]))
for j in dic:
fp.write(' {key} {value};\n'.format(key=j[0], value=j[1]))
for j in self.regulator_phases[dic]:
logger.debug(j)
fp.write(' {key} {value};\n'.format(key=j, value=self.regulator_phases[dic][j]))
fp.write('};\n\n')
if self.verbose:
logger.debug('Succesful!')
logger.info('Writing the Regulators...')
if self.verbose:
logger.debug('Writing the Regulators...')
for i in model.models:
if isinstance(i, Regulator):
if hasattr(i, 'from_element') and i.from_element is not None and hasattr(i, 'to_element') and (i.to_element is not None):
if i.from_element + '_' + i.to_element in self.regulator_seen:
continue
self.regulator_seen.add(i.from_element + '_' + i.to_element)
fp.write('object regulator{\n')
if hasattr(i, 'name') and i.name is not None:
fp.write(' name n{name};\n'.format(name=i.name))
if hasattr(i, 'from_element') and i.from_element is not None:
fp.write(' from n{fn};\n'.format(fn=i.from_element))
if hasattr(i, 'to_element') and i.to_element is not None:
fp.write(' to n{tn};\n'.format(tn=i.to_element))
phases = ''
if hasattr(i, 'connected_transformer') and i.connected_transformer is not None:
for j in model.models:
if isinstance(j, PowerTransformer) and j.name == i.connected_transformer:
if hasattr(j, 'windings') and j.windings is not None:
for w in j.windings:
if hasattr(w, 'phase_windings') and w.phase_windings is not None:
for pw in w.phase_windings:
if hasattr(pw, 'phase') and pw.phase is not None:
phases = phases + pw.phase
elif hasattr(i, 'windings') and i.windings is not None:
for w in i.windings:
if hasattr(w, 'phase_windings') and w.phase_windings is not None:
for pw in w.phase_windings:
if hasattr(pw, 'phase') and pw.phase is not None:
phases = phases + pw.phase
if hasattr(i, 'name') and i.name is not None and (i.name in self.regulator_configurations_name):
fp.write(' configuration {config};\n'.format(config=self.regulator_configurations_name[i.name]))
fp.write('};\n\n')
if self.verbose:
logger.debug('Succesful!')
logger.info('Writing the Line Configurations...')
if self.verbose:
logger.debug('Writing the Line Configurations...')
configuration_count = 1
if self.write_wires:
pass
else:
for i in model.models:
if isinstance(i, Line):
if hasattr(i, 'is_switch') and i.is_switch == 1 or (hasattr(i, 'is_fuse') and i.is_fuse == 1):
continue
dic = {}
phase_map = {'A': 1, 'B': 2, 'C': 3, '1': 1, '2': 2}
phases = []
if hasattr(i, 'wires') and i.wires is not None:
for w in i.wires:
if hasattr(w, 'phase') and w.phase is not None and (w.phase != 'N'):
phases.append(w.phase)
phases.sort()
if hasattr(i, 'impedance_matrix') and i.impedance_matrix is not None:
lc = i.impedance_matrix
if len(phases) != len(lc):
logger.debug('Warning - impedance matrix size different from number of phases for line {ln}'.format(ln=i.name))
logger.debug(i.name, i.from_element, i.to_element)
logger.debug(phases)
logger.debug(lc)
for j_cnt in range(len(phases)):
for k_cnt in range(len(phases)):
j_val = phases[j_cnt]
k_val = phases[k_cnt]
j = phase_map[j_val] - 1
k = phase_map[k_val] - 1
if len(lc) < 3:
j = j_cnt
k = k_cnt
impedance = str(lc[j][k]).strip('()')
pattern = re.compile('[^e]-')
if '+' not in impedance and (not len(pattern.findall(impedance)) > 0):
impedance = '0+' + impedance
dic['z{one}{two}'.format(one=phase_map[j_val], two=phase_map[k_val])] = impedance
dic_set = set()
for (a, b) in dic.items():
dic_set.add((a, b))
dic_set = frozenset(dic_set)
if dic_set in self.line_configurations:
self.line_configurations_name[i.name] = self.line_configurations[dic_set]
continue
self.line_configurations[dic_set] = 'line_config_{num}'.format(num=configuration_count)
dic['name'] = 'line_config_{num}'.format(num=configuration_count)
self.line_configurations_name[i.name] = 'line_config_{num}'.format(num=configuration_count)
fp.write('object line_configuration {\n')
for j in dic:
fp.write(' {key} {value};\n'.format(key=j, value=dic[j]))
fp.write('};\n\n')
configuration_count = configuration_count + 1
if self.verbose:
logger.debug('Succesful!')
logger.info('Writing the Lines...')
if self.verbose:
logger.debug('Writing the Lines...')
for i in model.models:
if isinstance(i, Line):
if hasattr(i, 'line_type') and i.line_type is not None and (i.line_type == 'underground'):
fp.write('object underground_line{\n')
if hasattr(i, 'length') and i.length is not None:
fp.write(' length {len};\n'.format(len=i.length * 3.28084))
if hasattr(i, 'name') and i.name is not None and (i.name in self.line_configurations_name):
fp.write(' configuration {config};\n'.format(config=self.line_configurations_name[i.name]))
elif hasattr(i, 'is_fuse') and i.is_fuse is not None and (i.is_fuse == 1):
fp.write('object fuse{\n')
elif hasattr(i, 'is_switch') and i.is_switch is not None and (i.is_switch == 1):
fp.write('object switch{\n')
elif hasattr(i, 'line_type') and i.line_type is not None:
fp.write('object overhead_line{\n')
if hasattr(i, 'length') and i.length is not None:
fp.write(' length {len};\n'.format(len=i.length * 3.28084))
if hasattr(i, 'name') and i.name is not None and (i.name in self.line_configurations_name):
fp.write(' configuration {config};\n'.format(config=self.line_configurations_name[i.name]))
else:
fp.write('object overhead_line{\n')
if hasattr(i, 'length') and i.length is not None:
fp.write(' length {len};\n'.format(len=i.length * 3.28084))
if hasattr(i, 'name') and i.name is not None and (i.name in self.line_configurations_name):
fp.write(' configuration {config};\n'.format(config=self.line_configurations_name[i.name]))
if hasattr(i, 'name') and i.name is not None:
fp.write(' name n{name};\n'.format(name=i.name))
if hasattr(i, 'from_element') and i.from_element is not None:
fp.write(' from n{fn};\n'.format(fn=i.from_element))
if hasattr(i, 'to_element') and i.to_element is not None:
fp.write(' to n{tn};\n'.format(tn=i.to_element))
phases = ''
if hasattr(i, 'wires') and i.wires is not None:
for w in i.wires:
if hasattr(w, 'phase') and w.phase is not None:
phases = phases + w.phase
if phases != '':
fp.write(' phases {ph};\n'.format(ph=phases))
fp.write('};\n\n')
if self.verbose:
logger.debug('Succesful!')
|
ditto
|
positive
|
def __init__(self, grid_points=9, num_convs=8, roi_feat_size=14, in_channels=256, conv_kernel_size=3, point_feat_channels=64, deconv_kernel_size=4, class_agnostic=False, loss_grid=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=15), conv_cfg=None, norm_cfg=dict(type='GN', num_groups=36)):
super(GridHead, self).__init__()
self.grid_points = grid_points
self.num_convs = num_convs
self.roi_feat_size = roi_feat_size
self.in_channels = in_channels
self.conv_kernel_size = conv_kernel_size
self.point_feat_channels = point_feat_channels
self.conv_out_channels = self.point_feat_channels * self.grid_points
self.class_agnostic = class_agnostic
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
if isinstance(norm_cfg, dict) and norm_cfg['type'] == 'GN':
assert self.conv_out_channels % norm_cfg['num_groups'] == 0
assert self.grid_points >= 4
self.grid_size = int(np.sqrt(self.grid_points))
if self.grid_size * self.grid_size != self.grid_points:
raise ValueError('grid_points must be a square number')
if not isinstance(self.roi_feat_size, int):
raise ValueError('Only square RoIs are supporeted in Grid R-CNN')
self.whole_map_size = self.roi_feat_size * 4
<DeepExtract>
half_size = self.whole_map_size // 4 * 2
sub_regions = []
for i in range(self.grid_points):
x_idx = i // self.grid_size
y_idx = i % self.grid_size
if x_idx == 0:
sub_x1 = 0
elif x_idx == self.grid_size - 1:
sub_x1 = half_size
else:
ratio = x_idx / (self.grid_size - 1) - 0.25
sub_x1 = max(int(ratio * self.whole_map_size), 0)
if y_idx == 0:
sub_y1 = 0
elif y_idx == self.grid_size - 1:
sub_y1 = half_size
else:
ratio = y_idx / (self.grid_size - 1) - 0.25
sub_y1 = max(int(ratio * self.whole_map_size), 0)
sub_regions.append((sub_x1, sub_y1, sub_x1 + half_size, sub_y1 + half_size))
self.sub_regions = sub_regions
</DeepExtract>
self.convs = []
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else self.conv_out_channels
stride = 2 if i == 0 else 1
padding = (self.conv_kernel_size - 1) // 2
self.convs.append(ConvModule(in_channels, self.conv_out_channels, self.conv_kernel_size, stride=stride, padding=padding, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=True))
self.convs = nn.Sequential(*self.convs)
self.deconv1 = nn.ConvTranspose2d(self.conv_out_channels, self.conv_out_channels, kernel_size=deconv_kernel_size, stride=2, padding=(deconv_kernel_size - 2) // 2, groups=grid_points)
self.norm1 = nn.GroupNorm(grid_points, self.conv_out_channels)
self.deconv2 = nn.ConvTranspose2d(self.conv_out_channels, grid_points, kernel_size=deconv_kernel_size, stride=2, padding=(deconv_kernel_size - 2) // 2, groups=grid_points)
self.neighbor_points = []
grid_size = self.grid_size
for i in range(grid_size):
for j in range(grid_size):
neighbors = []
if i > 0:
neighbors.append((i - 1) * grid_size + j)
if j > 0:
neighbors.append(i * grid_size + j - 1)
if j < grid_size - 1:
neighbors.append(i * grid_size + j + 1)
if i < grid_size - 1:
neighbors.append((i + 1) * grid_size + j)
self.neighbor_points.append(tuple(neighbors))
self.num_edges = sum([len(p) for p in self.neighbor_points])
self.forder_trans = nn.ModuleList()
self.sorder_trans = nn.ModuleList()
for neighbors in self.neighbor_points:
fo_trans = nn.ModuleList()
so_trans = nn.ModuleList()
for _ in range(len(neighbors)):
fo_trans.append(nn.Sequential(nn.Conv2d(self.point_feat_channels, self.point_feat_channels, 5, stride=1, padding=2, groups=self.point_feat_channels), nn.Conv2d(self.point_feat_channels, self.point_feat_channels, 1)))
so_trans.append(nn.Sequential(nn.Conv2d(self.point_feat_channels, self.point_feat_channels, 5, 1, 2, groups=self.point_feat_channels), nn.Conv2d(self.point_feat_channels, self.point_feat_channels, 1)))
self.forder_trans.append(fo_trans)
self.sorder_trans.append(so_trans)
self.loss_grid = build_loss(loss_grid)
|
def __init__(self, grid_points=9, num_convs=8, roi_feat_size=14, in_channels=256, conv_kernel_size=3, point_feat_channels=64, deconv_kernel_size=4, class_agnostic=False, loss_grid=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=15), conv_cfg=None, norm_cfg=dict(type='GN', num_groups=36)):
super(GridHead, self).__init__()
self.grid_points = grid_points
self.num_convs = num_convs
self.roi_feat_size = roi_feat_size
self.in_channels = in_channels
self.conv_kernel_size = conv_kernel_size
self.point_feat_channels = point_feat_channels
self.conv_out_channels = self.point_feat_channels * self.grid_points
self.class_agnostic = class_agnostic
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
if isinstance(norm_cfg, dict) and norm_cfg['type'] == 'GN':
assert self.conv_out_channels % norm_cfg['num_groups'] == 0
assert self.grid_points >= 4
self.grid_size = int(np.sqrt(self.grid_points))
if self.grid_size * self.grid_size != self.grid_points:
raise ValueError('grid_points must be a square number')
if not isinstance(self.roi_feat_size, int):
raise ValueError('Only square RoIs are supporeted in Grid R-CNN')
self.whole_map_size = self.roi_feat_size * 4
half_size = self.whole_map_size // 4 * 2
sub_regions = []
for i in range(self.grid_points):
x_idx = i // self.grid_size
y_idx = i % self.grid_size
if x_idx == 0:
sub_x1 = 0
elif x_idx == self.grid_size - 1:
sub_x1 = half_size
else:
ratio = x_idx / (self.grid_size - 1) - 0.25
sub_x1 = max(int(ratio * self.whole_map_size), 0)
if y_idx == 0:
sub_y1 = 0
elif y_idx == self.grid_size - 1:
sub_y1 = half_size
else:
ratio = y_idx / (self.grid_size - 1) - 0.25
sub_y1 = max(int(ratio * self.whole_map_size), 0)
sub_regions.append((sub_x1, sub_y1, sub_x1 + half_size, sub_y1 + half_size))
self.sub_regions = sub_regions
self.convs = []
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else self.conv_out_channels
stride = 2 if i == 0 else 1
padding = (self.conv_kernel_size - 1) // 2
self.convs.append(ConvModule(in_channels, self.conv_out_channels, self.conv_kernel_size, stride=stride, padding=padding, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=True))
self.convs = nn.Sequential(*self.convs)
self.deconv1 = nn.ConvTranspose2d(self.conv_out_channels, self.conv_out_channels, kernel_size=deconv_kernel_size, stride=2, padding=(deconv_kernel_size - 2) // 2, groups=grid_points)
self.norm1 = nn.GroupNorm(grid_points, self.conv_out_channels)
self.deconv2 = nn.ConvTranspose2d(self.conv_out_channels, grid_points, kernel_size=deconv_kernel_size, stride=2, padding=(deconv_kernel_size - 2) // 2, groups=grid_points)
self.neighbor_points = []
grid_size = self.grid_size
for i in range(grid_size):
for j in range(grid_size):
neighbors = []
if i > 0:
neighbors.append((i - 1) * grid_size + j)
if j > 0:
neighbors.append(i * grid_size + j - 1)
if j < grid_size - 1:
neighbors.append(i * grid_size + j + 1)
if i < grid_size - 1:
neighbors.append((i + 1) * grid_size + j)
self.neighbor_points.append(tuple(neighbors))
self.num_edges = sum([len(p) for p in self.neighbor_points])
self.forder_trans = nn.ModuleList()
self.sorder_trans = nn.ModuleList()
for neighbors in self.neighbor_points:
fo_trans = nn.ModuleList()
so_trans = nn.ModuleList()
for _ in range(len(neighbors)):
fo_trans.append(nn.Sequential(nn.Conv2d(self.point_feat_channels, self.point_feat_channels, 5, stride=1, padding=2, groups=self.point_feat_channels), nn.Conv2d(self.point_feat_channels, self.point_feat_channels, 1)))
so_trans.append(nn.Sequential(nn.Conv2d(self.point_feat_channels, self.point_feat_channels, 5, 1, 2, groups=self.point_feat_channels), nn.Conv2d(self.point_feat_channels, self.point_feat_channels, 1)))
self.forder_trans.append(fo_trans)
self.sorder_trans.append(so_trans)
self.loss_grid = build_loss(loss_grid)
|
ATSS-EfficientDet-PyTorch
|
positive
|
@slack_buffer_required
@utf8_decode
def command_slash(data, current_buffer, args):
"""
/slack slash /customcommand arg1 arg2 arg3
Run a custom slack command.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
team = channel.team
split_args = args.split(' ', 1)
command = split_args[0]
text = split_args[1] if len(split_args) > 1 else ''
<DeepExtract>
usernames = team.get_username_map()
channels = team.get_channel_map()
usergroups = team.generate_usergroup_map()
if escape_characters:
text = text.replace('\x02', '*').replace('\x1d', '_').replace('\x1f', config.map_underline_to).replace('&', '&').replace('<', '<').replace('>', '>')
def linkify_word(match):
word = match.group(0)
(prefix, name) = match.groups()
if prefix == '@':
if name in ['channel', 'everyone', 'group', 'here']:
text_linkified = '<!{}>'.format(name)
elif name in usernames:
text_linkified = '<@{}>'.format(usernames[name])
elif word in usergroups.keys():
text_linkified = '<!subteam^{}|{}>'.format(usergroups[word], word)
elif prefix == '#' and (not True):
if word in channels:
text_linkified = '<#{}|{}>'.format(channels[word], name)
text_linkified = word
linkify_regex = "(?:^|(?<=\\s))([@#])([\\w\\(\\)\\'.-]+)"
text_linkified = re.sub(linkify_regex, linkify_word, text, flags=re.UNICODE)
</DeepExtract>
s = SlackRequest(team, 'chat.command', {'command': command, 'text': text_linkified, 'channel': channel.identifier}, channel=channel, metadata={'command': command, 'command_args': text})
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK_EAT
|
@slack_buffer_required
@utf8_decode
def command_slash(data, current_buffer, args):
"""
/slack slash /customcommand arg1 arg2 arg3
Run a custom slack command.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
team = channel.team
split_args = args.split(' ', 1)
command = split_args[0]
text = split_args[1] if len(split_args) > 1 else ''
usernames = team.get_username_map()
channels = team.get_channel_map()
usergroups = team.generate_usergroup_map()
if escape_characters:
text = text.replace('\x02', '*').replace('\x1d', '_').replace('\x1f', config.map_underline_to).replace('&', '&').replace('<', '<').replace('>', '>')
def linkify_word(match):
word = match.group(0)
(prefix, name) = match.groups()
if prefix == '@':
if name in ['channel', 'everyone', 'group', 'here']:
text_linkified = '<!{}>'.format(name)
elif name in usernames:
text_linkified = '<@{}>'.format(usernames[name])
elif word in usergroups.keys():
text_linkified = '<!subteam^{}|{}>'.format(usergroups[word], word)
elif prefix == '#' and (not True):
if word in channels:
text_linkified = '<#{}|{}>'.format(channels[word], name)
text_linkified = word
linkify_regex = "(?:^|(?<=\\s))([@#])([\\w\\(\\)\\'.-]+)"
text_linkified = re.sub(linkify_regex, linkify_word, text, flags=re.UNICODE)
s = SlackRequest(team, 'chat.command', {'command': command, 'text': text_linkified, 'channel': channel.identifier}, channel=channel, metadata={'command': command, 'command_args': text})
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK_EAT
|
dotfiles
|
positive
|
def calc_TEC(maindir, window=4096, incoh_int=100, sfactor=4, offset=0.0, timewin=[0, 0], snrmin=0.0):
"""
Estimation of phase curve using coherent and incoherent integration.
Args:
maindir (:obj:`str`): Path for data.
window (:obj:'int'): Window length in samples.
incoh_int (:obj:'int'): Number of incoherent integrations.
sfactor (:obj:'int'): Overlap factor.
offset (:obj:'int'): Overlap factor.
timewin ((:obj:'list'): Overlap factor.)
Returns:
outdict (dict[str, obj]): Output data dictionary::
{
"rTEC": Relative TEC in TECU,
"rTEC_sig":Relative TEC STD in TECU,
"S4": The S4 parameter,
"snr0":snr0,
"snr1":snr1,
"time": Time for each measurement in posix format,
}
"""
<DeepExtract>
ut0 = 25567.5
e2p = 3600.0 * 24
sitepath = os.path.expanduser(os.path.join(maindir, 'metadata/config/site'))
sitemeta = drf.DigitalMetadataReader(sitepath)
sdict = sitemeta.read_latest()
sdict1 = list(sdict.values())[0]
infopath = os.path.expanduser(os.path.join(maindir, 'metadata/info'))
infometa = drf.DigitalMetadataReader(infopath)
idict = infometa.read_latest()
idict1 = list(idict.values())[0]
passpath = os.path.expanduser(os.path.join(maindir, 'metadata/pass/'))
passmeta = drf.DigitalMetadataReader(passpath)
pdict = passmeta.read_latest()
pdict1 = list(pdict.values())[0]
rtime = (pdict1['rise_time'] - ut0) * e2p
tsave = list(pdict.keys())[0]
Dop_bw = pdict1['doppler_bandwidth']
t = sp.arange(0, (Dop_bw.shape[0] + 1) * 10, 10.0) + rtime
t = t.astype(float)
obsLoc = ephem.Observer()
obsLoc.lat = sdict1['latitude']
obsLoc.long = sdict1['longitude']
satObj = ephem.readtle(idict1['name'], idict1['tle1'][1:-1], idict1['tle2'][1:-1])
tephem = (t - rtime) * ephem.second + pdict1['rise_time']
sublat = sp.zeros_like(tephem)
sublon = sp.zeros_like(tephem)
for (i, itime) in enumerate(tephem):
obsLoc.date = itime
satObj.compute(obsLoc)
sublat[i] = sp.rad2deg(satObj.sublat)
sublon[i] = sp.rad2deg(satObj.sublong)
t[-1] = t[-1] + 600
t[-2] = t[-2] + 500
t[0] = t[0] - 240
tdop = (t[0:len(t) - 1] + t[1:len(t)]) / 2.0
tdop[0] = tdop[0] - 35.0
tdop = tdop - offset
tephem = (tdop - rtime) * ephem.second + pdict1['rise_time']
sublat = sp.zeros_like(tephem)
sublon = sp.zeros_like(tephem)
for (i, itime) in enumerate(tephem):
obsLoc.date = itime
satObj.compute(obsLoc)
sublat[i] = sp.rad2deg(satObj.sublat)
sublon[i] = sp.rad2deg(satObj.sublong)
e = {'t': t, 'tsave': tsave, 'dop1': sp.interpolate.interp1d(tdop, Dop_bw[:, 0], kind='cubic'), 'dop2': sp.interpolate.interp1d(tdop, Dop_bw[:, 1], kind='cubic'), 'sublat': sp.interpolate.interp1d(tdop, sublat, kind='cubic'), 'sublon': sp.interpolate.interp1d(tdop, sublon, kind='cubic'), 'site_latitude': float(sdict1['latitude']), 'site_longitude': float(sdict1['longitude'])}
</DeepExtract>
<DeepExtract>
bw_search0 = 500.0
bw_search1 = 1000.0
(drfObj, chandict, start_indx, end_indx) = open_file(maindir)
chans = list(chandict.keys())
sps = chandict[chans[0]]['sps']
start_indx = start_indx + timewin[0] * sps
end_indx = end_indx - timewin[1] * sps
start_vec = sp.linspace(start_indx, end_indx - window * 2, n_measure)
tvec = start_vec / sps
del_f = sps / window
fvec = sp.arange(-window / 2.0, window / 2.0) * del_f
bw_indx0 = int(bw_search0 / del_f)
bw_indx1 = int(bw_search1 / del_f)
fidx0 = sp.arange(-bw_indx0 // 2, bw_indx0 // 2, dtype=int) + window // 2
fidx1 = sp.arange(-bw_indx1 // 2, bw_indx1 // 2, dtype=int) + window // 2
res0 = sp.zeros([n_measure, window], dtype=float)
res1 = sp.zeros([n_measure, window], dtype=float)
snr0 = sp.zeros(n_measure, dtype=float)
snr1 = sp.zeros(n_measure, dtype=float)
freqm0 = sp.zeros([n_measure])
freqm1 = sp.zeros([n_measure])
wfun = sig.get_window('hann', window)
idx = sp.arange(window)
t_win = idx / sps
win_s = float(window) / sps
toff = window / sps
subchan = 0
for (i_t, c_st) in enumerate(start_vec):
t_cur = tvec[i_t]
z00 = drfObj.read_vector(c_st, window, chans[0], subchan)
z01 = drfObj.read_vector(c_st + window, window, chans[0], subchan)
z10 = drfObj.read_vector(c_st, window, chans[1], subchan)
z11 = drfObj.read_vector(c_st + window, window, chans[1], subchan)
tphase = sp.float64(t_cur + toff)
doppler0 = -e['dop1'](tphase)
doppler1 = -e['dop2'](tphase)
osc00 = wfun * sp.exp(1j * 2.0 * sp.pi * doppler0 * t_win)
osc01 = wfun * sp.exp(1j * 2.0 * sp.pi * doppler0 * (t_win + win_s))
osc10 = wfun * sp.exp(1j * 2.0 * sp.pi * doppler1 * t_win)
osc11 = wfun * sp.exp(1j * 2.0 * sp.pi * doppler1 * (t_win + win_s))
F0 = scfft.fftshift(scfft.fft(z00 * osc00.astype(z00.dtype)))
F1 = scfft.fftshift(scfft.fft(z01 * osc01.astype(z01.dtype)))
res_temp = F0 * F1.conj()
res0[i_t, :] = res_temp.real ** 2 + res_temp.imag ** 2
freqm0[i_t] = fvec[fidx0[sp.argmax(res0[i_t, fidx0])]]
nc0 = sp.median(res0[i_t, :]) / sp.log(2.0)
snr0[i_t] = res0[i_t, fidx0].max() / nc0
F0 = scfft.fftshift(scfft.fft(z10 * osc10.astype(z10.dtype)))
F1 = scfft.fftshift(scfft.fft(z11 * osc11.astype(z11.dtype)))
res_temp = F0 * F1.conj()
res1[i_t, :] = res_temp.real ** 2 + res_temp.imag ** 2
freqm1[i_t] = fvec[fidx1[sp.argmax(res1[i_t, fidx1])]]
nc1 = sp.median(res1[i_t, :]) / sp.log(2.0)
snr1[i_t] = res1[i_t, fidx1].max() / nc1
res0[i_t, :] = res0[i_t, :] / nc0
res1[i_t, :] = res1[i_t, :] / nc1
tvec[0] = tvec[0] - 100
tvec[len(tvec) - 1] = tvec[len(tvec) - 1] + 100
snrmean = 0.5 * snr0 + 0.5 * snr1
dopfit = outlier_removed_fit(0.5 * (snr0 * freqm0 * 400.0 / 150 + snr1 * freqm1) / snrmean, snrmean)
doppler_residual = sp.interpolate.interp1d(tvec, dopfit)
rescor = res0 * res1.conj()
cspec = sp.mean(rescor, axis=0)
resid = {'cspec': cspec.real, 'max_bin': sp.argmax(cspec), 'doppler_residual': doppler_residual, 'dopfit': dopfit, 'tvec': tvec, 'fvec': fvec, 'res1': res1, 'res0': res0}
</DeepExtract>
Nr = int((incoh_int + sfactor - 1) * (window / sfactor))
<DeepExtract>
mainpath = os.path.expanduser(maindir)
drfObj = drf.DigitalRFReader(mainpath)
chans = drfObj.get_channels()
chandict = {}
(start_indx, end_indx) = [0, sp.inf]
for ichan in chans:
curdict = {}
(curdict['sind'], curdict['eind']) = drfObj.get_bounds(ichan)
start_indx = sp.maximum(curdict['sind'], start_indx)
end_indx = sp.minimum(curdict['eind'], end_indx)
dmetadict = drfObj.read_metadata(start_indx, end_indx, ichan)
dmetakeys = list(dmetadict.keys())
curdict['sps'] = dmetadict[dmetakeys[0]]['samples_per_second']
curdict['fo'] = dmetadict[dmetakeys[0]]['center_frequencies'].ravel()[0]
chandict[ichan] = curdict
(drfObj, chandict, start_indx, end_indx) = (drfObj, chandict, start_indx, end_indx)
</DeepExtract>
chans = list(chandict.keys())
sps = chandict[chans[0]]['sps']
start_indx = start_indx + timewin[0] * sps
end_indx = end_indx - timewin[1] * sps
freq_ratio = chandict[chans[1]]['fo'] / chandict[chans[0]]['fo']
(om0, om1) = 2.0 * s_const.pi * sp.array([chandict[chans[0]]['fo'], chandict[chans[1]]['fo']])
start_vec = sp.arange(start_indx, end_indx - Nr, Nr, dtype=float)
tvec = start_vec / sps
soff = window / sfactor
toff = soff / sps
idx = sp.arange(window)
n_t1 = sp.arange(0, incoh_int) * soff
(IDX, N_t1) = sp.meshgrid(idx, n_t1)
Msamp = IDX + N_t1
ls_samp = float(Msamp.flatten()[-1])
wfun = sig.get_window('hann', window)
wmat = sp.tile(wfun[sp.newaxis, :], (incoh_int, 1))
phase_00 = sp.exp(1j * 0.0)
phase_10 = sp.exp(1j * 0.0)
phase0 = sp.zeros(len(start_vec), dtype=sp.complex64)
phase1 = sp.zeros(len(start_vec), dtype=sp.complex64)
phase_cs0 = sp.zeros(len(start_vec), dtype=float)
phase_cs1 = sp.zeros(len(start_vec), dtype=float)
snr0 = sp.zeros(len(start_vec))
snr1 = sp.zeros(len(start_vec))
std0 = sp.zeros(len(start_vec))
std1 = sp.zeros(len(start_vec))
fi = window // 2
subchan = 0
outspec0 = sp.zeros((len(tvec), window))
outspec1 = sp.zeros((len(tvec), window))
print('Start Beacon Processing')
for (i_t, c_st) in enumerate(start_vec):
<DeepExtract>
barLength = 100
status = ''
if isinstance(float(i_t) / float(len(start_vec)), int):
float(i_t) / float(len(start_vec)) = float(float(i_t) / float(len(start_vec)))
if not isinstance(float(i_t) / float(len(start_vec)), float):
float(i_t) / float(len(start_vec)) = 0
status = 'error: progress var must be float\r\n'
if float(i_t) / float(len(start_vec)) < 0:
float(i_t) / float(len(start_vec)) = 0
status = 'Halt...\r\n'
if float(i_t) / float(len(start_vec)) >= 1:
float(i_t) / float(len(start_vec)) = 1
status = 'Done...\r\n'
block = int(round(barLength * float(i_t) / float(len(start_vec))))
text = '\rPercent: [{0}] {1}% {2}'.format('#' * block + '-' * (barLength - block), float(i_t) / float(len(start_vec)) * 100, status)
sys.stdout.write(text)
sys.stdout.flush()
</DeepExtract>
t_cur = tvec[i_t]
z00 = drfObj.read_vector(c_st, Nr, chans[0], subchan)[Msamp]
z01 = drfObj.read_vector(c_st + soff, Nr, chans[0], subchan)[Msamp]
z10 = drfObj.read_vector(c_st, Nr, chans[1], subchan)[Msamp]
z11 = drfObj.read_vector(c_st + soff, Nr, chans[1], subchan)[Msamp]
tphase = sp.float64(t_cur + toff)
doppler0 = -1.0 * (150.0 / 400.0) * resid['doppler_residual'](t_cur) - e['dop1'](tphase)
doppler1 = -1.0 * resid['doppler_residual'](t_cur) - e['dop2'](tphase)
osc00 = phase_00 * wmat * sp.exp(1j * 2.0 * sp.pi * doppler0 * (Msamp / sps))
osc01 = phase_00 * wmat * sp.exp(1j * 2.0 * sp.pi * doppler0 * (Msamp / sps + float(soff) / sps))
osc10 = phase_10 * wmat * sp.exp(1j * 2.0 * sp.pi * doppler1 * (Msamp / sps))
osc11 = phase_10 * wmat * sp.exp(1j * 2.0 * sp.pi * doppler1 * (Msamp / sps + float(soff) / sps))
f00 = scfft.fftshift(scfft.fft(z00 * osc00.astype(z00.dtype), axis=-1), axes=-1)
f01 = scfft.fftshift(scfft.fft(z01 * osc01.astype(z01.dtype), axis=-1), axes=-1)
f00spec = sp.power(f00.real, 2).sum(0) + sp.power(f00.imag, 2).sum(0)
outspec0[i_t] = f00spec.real
f00_cor = f00[:, fi] * sp.conj(f01[:, fi])
phase0[i_t] = sp.cumprod(sp.power(f00_cor, 1.0 / float(incoh_int)))[-1]
phase_cs0[i_t] = sp.cumsum(sp.diff(sp.unwrap(sp.angle(f00[:, fi]))))[-1]
f10 = scfft.fftshift(scfft.fft(z10 * osc10.astype(z10.dtype), axis=-1), axes=-1)
f11 = scfft.fftshift(scfft.fft(z11 * osc11.astype(z11.dtype), axis=-1), axes=-1)
f10spec = sp.power(f10.real, 2).sum(0) + sp.power(f10.imag, 2).sum(0)
f10_cor = f10[:, fi] * sp.conj(f11[:, fi])
outspec1[i_t] = f10spec.real
phase1[i_t] = sp.cumprod(sp.power(f10_cor, 1.0 / float(incoh_int)))[-1]
phase_cs1[i_t] = sp.cumsum(sp.diff(sp.unwrap(sp.angle(f10[:, fi]))))[-1]
std0[i_t] = sp.std(sp.angle(f00_cor))
std1[i_t] = sp.std(sp.angle(f10_cor))
snr0[i_t] = f00spec.real[fi] / sp.median(f00spec.real)
snr1[i_t] = f10spec.real[fi] / sp.median(f10spec.real)
phase_00 = phase_00 * sp.exp(1j * 2.0 * sp.pi * doppler0 * ((ls_samp + 1.0) / sps))
phase_10 = phase_10 * sp.exp(1j * 2.0 * sp.pi * doppler1 * ((ls_samp + 1.0) / sps))
phasecurve = sp.cumsum(sp.angle(phase0) * freq_ratio - sp.angle(phase1))
phasecurve_amp = phase_cs0 * freq_ratio - phase_cs1
stdcurve = sp.sqrt(sp.cumsum(float(sfactor) * incoh_int * (std0 ** 2.0 + std1 ** 2.0)))
snrwin = sp.logical_and(snr0 > snrmin, snr1 > snrmin)
phasecurve = phasecurve[snrwin]
phasecurve_amp = phasecurve_amp[snrwin]
stdcurve = stdcurve[snrwin]
snr0 = snr0[snrwin]
snr1 = snr1[snrwin]
tvec = tvec[snrwin]
dt = sp.diff(tvec).mean()
Nside = int(1.0 / dt / 2.0)
lvec = sp.arange(-Nside, Nside)
(Lmat, Tmat) = sp.meshgrid(lvec, sp.arange(len(tvec)))
Sampmat = Lmat + Tmat
Sampclip = sp.clip(Sampmat, 0, len(tvec) - 1)
eps = s_const.e ** 2 / (8.0 * s_const.pi ** 2 * s_const.m_e * s_const.epsilon_0)
aconst = s_const.e ** 2 / (2 * s_const.m_e * s_const.epsilon_0 * s_const.c)
na = 9.0
nb = 24.0
f0 = 16668000.0
cTEC = 1e-16 * sp.power(om1 / om0 ** 2 - 1.0 / om1, -1) / aconst
rTEC = cTEC * phasecurve
rTEC = rTEC - rTEC.min()
rTEC_amp = cTEC * phasecurve_amp
rTEC_amp = rTEC_amp - rTEC_amp.min()
rTEC_sig = cTEC * stdcurve
S4 = sp.std(snr0[Sampclip], axis=-1) / sp.median(snr0, axis=-1)
outdict = {'rTEC': rTEC, 'rTEC_amp': rTEC_amp, 'rTEC_sig': rTEC_sig, 'S4': S4, 'snr0': snr0, 'snr1': snr1, 'time': tvec, 'resid': resid, 'phase': phasecurve, 'phase_amp': phasecurve_amp, 'phasestd': stdcurve, 'outspec0': outspec0, 'outspec1': outspec1}
return outdict
|
def calc_TEC(maindir, window=4096, incoh_int=100, sfactor=4, offset=0.0, timewin=[0, 0], snrmin=0.0):
"""
Estimation of phase curve using coherent and incoherent integration.
Args:
maindir (:obj:`str`): Path for data.
window (:obj:'int'): Window length in samples.
incoh_int (:obj:'int'): Number of incoherent integrations.
sfactor (:obj:'int'): Overlap factor.
offset (:obj:'int'): Overlap factor.
timewin ((:obj:'list'): Overlap factor.)
Returns:
outdict (dict[str, obj]): Output data dictionary::
{
"rTEC": Relative TEC in TECU,
"rTEC_sig":Relative TEC STD in TECU,
"S4": The S4 parameter,
"snr0":snr0,
"snr1":snr1,
"time": Time for each measurement in posix format,
}
"""
ut0 = 25567.5
e2p = 3600.0 * 24
sitepath = os.path.expanduser(os.path.join(maindir, 'metadata/config/site'))
sitemeta = drf.DigitalMetadataReader(sitepath)
sdict = sitemeta.read_latest()
sdict1 = list(sdict.values())[0]
infopath = os.path.expanduser(os.path.join(maindir, 'metadata/info'))
infometa = drf.DigitalMetadataReader(infopath)
idict = infometa.read_latest()
idict1 = list(idict.values())[0]
passpath = os.path.expanduser(os.path.join(maindir, 'metadata/pass/'))
passmeta = drf.DigitalMetadataReader(passpath)
pdict = passmeta.read_latest()
pdict1 = list(pdict.values())[0]
rtime = (pdict1['rise_time'] - ut0) * e2p
tsave = list(pdict.keys())[0]
Dop_bw = pdict1['doppler_bandwidth']
t = sp.arange(0, (Dop_bw.shape[0] + 1) * 10, 10.0) + rtime
t = t.astype(float)
obsLoc = ephem.Observer()
obsLoc.lat = sdict1['latitude']
obsLoc.long = sdict1['longitude']
satObj = ephem.readtle(idict1['name'], idict1['tle1'][1:-1], idict1['tle2'][1:-1])
tephem = (t - rtime) * ephem.second + pdict1['rise_time']
sublat = sp.zeros_like(tephem)
sublon = sp.zeros_like(tephem)
for (i, itime) in enumerate(tephem):
obsLoc.date = itime
satObj.compute(obsLoc)
sublat[i] = sp.rad2deg(satObj.sublat)
sublon[i] = sp.rad2deg(satObj.sublong)
t[-1] = t[-1] + 600
t[-2] = t[-2] + 500
t[0] = t[0] - 240
tdop = (t[0:len(t) - 1] + t[1:len(t)]) / 2.0
tdop[0] = tdop[0] - 35.0
tdop = tdop - offset
tephem = (tdop - rtime) * ephem.second + pdict1['rise_time']
sublat = sp.zeros_like(tephem)
sublon = sp.zeros_like(tephem)
for (i, itime) in enumerate(tephem):
obsLoc.date = itime
satObj.compute(obsLoc)
sublat[i] = sp.rad2deg(satObj.sublat)
sublon[i] = sp.rad2deg(satObj.sublong)
e = {'t': t, 'tsave': tsave, 'dop1': sp.interpolate.interp1d(tdop, Dop_bw[:, 0], kind='cubic'), 'dop2': sp.interpolate.interp1d(tdop, Dop_bw[:, 1], kind='cubic'), 'sublat': sp.interpolate.interp1d(tdop, sublat, kind='cubic'), 'sublon': sp.interpolate.interp1d(tdop, sublon, kind='cubic'), 'site_latitude': float(sdict1['latitude']), 'site_longitude': float(sdict1['longitude'])}
bw_search0 = 500.0
bw_search1 = 1000.0
(drfObj, chandict, start_indx, end_indx) = open_file(maindir)
chans = list(chandict.keys())
sps = chandict[chans[0]]['sps']
start_indx = start_indx + timewin[0] * sps
end_indx = end_indx - timewin[1] * sps
start_vec = sp.linspace(start_indx, end_indx - window * 2, n_measure)
tvec = start_vec / sps
del_f = sps / window
fvec = sp.arange(-window / 2.0, window / 2.0) * del_f
bw_indx0 = int(bw_search0 / del_f)
bw_indx1 = int(bw_search1 / del_f)
fidx0 = sp.arange(-bw_indx0 // 2, bw_indx0 // 2, dtype=int) + window // 2
fidx1 = sp.arange(-bw_indx1 // 2, bw_indx1 // 2, dtype=int) + window // 2
res0 = sp.zeros([n_measure, window], dtype=float)
res1 = sp.zeros([n_measure, window], dtype=float)
snr0 = sp.zeros(n_measure, dtype=float)
snr1 = sp.zeros(n_measure, dtype=float)
freqm0 = sp.zeros([n_measure])
freqm1 = sp.zeros([n_measure])
wfun = sig.get_window('hann', window)
idx = sp.arange(window)
t_win = idx / sps
win_s = float(window) / sps
toff = window / sps
subchan = 0
for (i_t, c_st) in enumerate(start_vec):
t_cur = tvec[i_t]
z00 = drfObj.read_vector(c_st, window, chans[0], subchan)
z01 = drfObj.read_vector(c_st + window, window, chans[0], subchan)
z10 = drfObj.read_vector(c_st, window, chans[1], subchan)
z11 = drfObj.read_vector(c_st + window, window, chans[1], subchan)
tphase = sp.float64(t_cur + toff)
doppler0 = -e['dop1'](tphase)
doppler1 = -e['dop2'](tphase)
osc00 = wfun * sp.exp(1j * 2.0 * sp.pi * doppler0 * t_win)
osc01 = wfun * sp.exp(1j * 2.0 * sp.pi * doppler0 * (t_win + win_s))
osc10 = wfun * sp.exp(1j * 2.0 * sp.pi * doppler1 * t_win)
osc11 = wfun * sp.exp(1j * 2.0 * sp.pi * doppler1 * (t_win + win_s))
F0 = scfft.fftshift(scfft.fft(z00 * osc00.astype(z00.dtype)))
F1 = scfft.fftshift(scfft.fft(z01 * osc01.astype(z01.dtype)))
res_temp = F0 * F1.conj()
res0[i_t, :] = res_temp.real ** 2 + res_temp.imag ** 2
freqm0[i_t] = fvec[fidx0[sp.argmax(res0[i_t, fidx0])]]
nc0 = sp.median(res0[i_t, :]) / sp.log(2.0)
snr0[i_t] = res0[i_t, fidx0].max() / nc0
F0 = scfft.fftshift(scfft.fft(z10 * osc10.astype(z10.dtype)))
F1 = scfft.fftshift(scfft.fft(z11 * osc11.astype(z11.dtype)))
res_temp = F0 * F1.conj()
res1[i_t, :] = res_temp.real ** 2 + res_temp.imag ** 2
freqm1[i_t] = fvec[fidx1[sp.argmax(res1[i_t, fidx1])]]
nc1 = sp.median(res1[i_t, :]) / sp.log(2.0)
snr1[i_t] = res1[i_t, fidx1].max() / nc1
res0[i_t, :] = res0[i_t, :] / nc0
res1[i_t, :] = res1[i_t, :] / nc1
tvec[0] = tvec[0] - 100
tvec[len(tvec) - 1] = tvec[len(tvec) - 1] + 100
snrmean = 0.5 * snr0 + 0.5 * snr1
dopfit = outlier_removed_fit(0.5 * (snr0 * freqm0 * 400.0 / 150 + snr1 * freqm1) / snrmean, snrmean)
doppler_residual = sp.interpolate.interp1d(tvec, dopfit)
rescor = res0 * res1.conj()
cspec = sp.mean(rescor, axis=0)
resid = {'cspec': cspec.real, 'max_bin': sp.argmax(cspec), 'doppler_residual': doppler_residual, 'dopfit': dopfit, 'tvec': tvec, 'fvec': fvec, 'res1': res1, 'res0': res0}
Nr = int((incoh_int + sfactor - 1) * (window / sfactor))
mainpath = os.path.expanduser(maindir)
drfObj = drf.DigitalRFReader(mainpath)
chans = drfObj.get_channels()
chandict = {}
(start_indx, end_indx) = [0, sp.inf]
for ichan in chans:
curdict = {}
(curdict['sind'], curdict['eind']) = drfObj.get_bounds(ichan)
start_indx = sp.maximum(curdict['sind'], start_indx)
end_indx = sp.minimum(curdict['eind'], end_indx)
dmetadict = drfObj.read_metadata(start_indx, end_indx, ichan)
dmetakeys = list(dmetadict.keys())
curdict['sps'] = dmetadict[dmetakeys[0]]['samples_per_second']
curdict['fo'] = dmetadict[dmetakeys[0]]['center_frequencies'].ravel()[0]
chandict[ichan] = curdict
(drfObj, chandict, start_indx, end_indx) = (drfObj, chandict, start_indx, end_indx)
chans = list(chandict.keys())
sps = chandict[chans[0]]['sps']
start_indx = start_indx + timewin[0] * sps
end_indx = end_indx - timewin[1] * sps
freq_ratio = chandict[chans[1]]['fo'] / chandict[chans[0]]['fo']
(om0, om1) = 2.0 * s_const.pi * sp.array([chandict[chans[0]]['fo'], chandict[chans[1]]['fo']])
start_vec = sp.arange(start_indx, end_indx - Nr, Nr, dtype=float)
tvec = start_vec / sps
soff = window / sfactor
toff = soff / sps
idx = sp.arange(window)
n_t1 = sp.arange(0, incoh_int) * soff
(IDX, N_t1) = sp.meshgrid(idx, n_t1)
Msamp = IDX + N_t1
ls_samp = float(Msamp.flatten()[-1])
wfun = sig.get_window('hann', window)
wmat = sp.tile(wfun[sp.newaxis, :], (incoh_int, 1))
phase_00 = sp.exp(1j * 0.0)
phase_10 = sp.exp(1j * 0.0)
phase0 = sp.zeros(len(start_vec), dtype=sp.complex64)
phase1 = sp.zeros(len(start_vec), dtype=sp.complex64)
phase_cs0 = sp.zeros(len(start_vec), dtype=float)
phase_cs1 = sp.zeros(len(start_vec), dtype=float)
snr0 = sp.zeros(len(start_vec))
snr1 = sp.zeros(len(start_vec))
std0 = sp.zeros(len(start_vec))
std1 = sp.zeros(len(start_vec))
fi = window // 2
subchan = 0
outspec0 = sp.zeros((len(tvec), window))
outspec1 = sp.zeros((len(tvec), window))
print('Start Beacon Processing')
for (i_t, c_st) in enumerate(start_vec):
barLength = 100
status = ''
if isinstance(float(i_t) / float(len(start_vec)), int):
float(i_t) / float(len(start_vec)) = float(float(i_t) / float(len(start_vec)))
if not isinstance(float(i_t) / float(len(start_vec)), float):
float(i_t) / float(len(start_vec)) = 0
status = 'error: progress var must be float\r\n'
if float(i_t) / float(len(start_vec)) < 0:
float(i_t) / float(len(start_vec)) = 0
status = 'Halt...\r\n'
if float(i_t) / float(len(start_vec)) >= 1:
float(i_t) / float(len(start_vec)) = 1
status = 'Done...\r\n'
block = int(round(barLength * float(i_t) / float(len(start_vec))))
text = '\rPercent: [{0}] {1}% {2}'.format('#' * block + '-' * (barLength - block), float(i_t) / float(len(start_vec)) * 100, status)
sys.stdout.write(text)
sys.stdout.flush()
t_cur = tvec[i_t]
z00 = drfObj.read_vector(c_st, Nr, chans[0], subchan)[Msamp]
z01 = drfObj.read_vector(c_st + soff, Nr, chans[0], subchan)[Msamp]
z10 = drfObj.read_vector(c_st, Nr, chans[1], subchan)[Msamp]
z11 = drfObj.read_vector(c_st + soff, Nr, chans[1], subchan)[Msamp]
tphase = sp.float64(t_cur + toff)
doppler0 = -1.0 * (150.0 / 400.0) * resid['doppler_residual'](t_cur) - e['dop1'](tphase)
doppler1 = -1.0 * resid['doppler_residual'](t_cur) - e['dop2'](tphase)
osc00 = phase_00 * wmat * sp.exp(1j * 2.0 * sp.pi * doppler0 * (Msamp / sps))
osc01 = phase_00 * wmat * sp.exp(1j * 2.0 * sp.pi * doppler0 * (Msamp / sps + float(soff) / sps))
osc10 = phase_10 * wmat * sp.exp(1j * 2.0 * sp.pi * doppler1 * (Msamp / sps))
osc11 = phase_10 * wmat * sp.exp(1j * 2.0 * sp.pi * doppler1 * (Msamp / sps + float(soff) / sps))
f00 = scfft.fftshift(scfft.fft(z00 * osc00.astype(z00.dtype), axis=-1), axes=-1)
f01 = scfft.fftshift(scfft.fft(z01 * osc01.astype(z01.dtype), axis=-1), axes=-1)
f00spec = sp.power(f00.real, 2).sum(0) + sp.power(f00.imag, 2).sum(0)
outspec0[i_t] = f00spec.real
f00_cor = f00[:, fi] * sp.conj(f01[:, fi])
phase0[i_t] = sp.cumprod(sp.power(f00_cor, 1.0 / float(incoh_int)))[-1]
phase_cs0[i_t] = sp.cumsum(sp.diff(sp.unwrap(sp.angle(f00[:, fi]))))[-1]
f10 = scfft.fftshift(scfft.fft(z10 * osc10.astype(z10.dtype), axis=-1), axes=-1)
f11 = scfft.fftshift(scfft.fft(z11 * osc11.astype(z11.dtype), axis=-1), axes=-1)
f10spec = sp.power(f10.real, 2).sum(0) + sp.power(f10.imag, 2).sum(0)
f10_cor = f10[:, fi] * sp.conj(f11[:, fi])
outspec1[i_t] = f10spec.real
phase1[i_t] = sp.cumprod(sp.power(f10_cor, 1.0 / float(incoh_int)))[-1]
phase_cs1[i_t] = sp.cumsum(sp.diff(sp.unwrap(sp.angle(f10[:, fi]))))[-1]
std0[i_t] = sp.std(sp.angle(f00_cor))
std1[i_t] = sp.std(sp.angle(f10_cor))
snr0[i_t] = f00spec.real[fi] / sp.median(f00spec.real)
snr1[i_t] = f10spec.real[fi] / sp.median(f10spec.real)
phase_00 = phase_00 * sp.exp(1j * 2.0 * sp.pi * doppler0 * ((ls_samp + 1.0) / sps))
phase_10 = phase_10 * sp.exp(1j * 2.0 * sp.pi * doppler1 * ((ls_samp + 1.0) / sps))
phasecurve = sp.cumsum(sp.angle(phase0) * freq_ratio - sp.angle(phase1))
phasecurve_amp = phase_cs0 * freq_ratio - phase_cs1
stdcurve = sp.sqrt(sp.cumsum(float(sfactor) * incoh_int * (std0 ** 2.0 + std1 ** 2.0)))
snrwin = sp.logical_and(snr0 > snrmin, snr1 > snrmin)
phasecurve = phasecurve[snrwin]
phasecurve_amp = phasecurve_amp[snrwin]
stdcurve = stdcurve[snrwin]
snr0 = snr0[snrwin]
snr1 = snr1[snrwin]
tvec = tvec[snrwin]
dt = sp.diff(tvec).mean()
Nside = int(1.0 / dt / 2.0)
lvec = sp.arange(-Nside, Nside)
(Lmat, Tmat) = sp.meshgrid(lvec, sp.arange(len(tvec)))
Sampmat = Lmat + Tmat
Sampclip = sp.clip(Sampmat, 0, len(tvec) - 1)
eps = s_const.e ** 2 / (8.0 * s_const.pi ** 2 * s_const.m_e * s_const.epsilon_0)
aconst = s_const.e ** 2 / (2 * s_const.m_e * s_const.epsilon_0 * s_const.c)
na = 9.0
nb = 24.0
f0 = 16668000.0
cTEC = 1e-16 * sp.power(om1 / om0 ** 2 - 1.0 / om1, -1) / aconst
rTEC = cTEC * phasecurve
rTEC = rTEC - rTEC.min()
rTEC_amp = cTEC * phasecurve_amp
rTEC_amp = rTEC_amp - rTEC_amp.min()
rTEC_sig = cTEC * stdcurve
S4 = sp.std(snr0[Sampclip], axis=-1) / sp.median(snr0, axis=-1)
outdict = {'rTEC': rTEC, 'rTEC_amp': rTEC_amp, 'rTEC_sig': rTEC_sig, 'S4': S4, 'snr0': snr0, 'snr1': snr1, 'time': tvec, 'resid': resid, 'phase': phasecurve, 'phase_amp': phasecurve_amp, 'phasestd': stdcurve, 'outspec0': outspec0, 'outspec1': outspec1}
return outdict
|
digital_rf
|
positive
|
def _element_link(target_collection, eventindex, index, key):
<DeepExtract>
load_column = awkward.materialized(target_collection[target_collection.fields[0]])
target_offsets = _get_target_offsets(load_column.layout.offsets, eventindex)
global_index = target_offsets + index
</DeepExtract>
global_index = awkward.where(key != 0, global_index, -1)
return target_collection._apply_global_index(global_index)
|
def _element_link(target_collection, eventindex, index, key):
load_column = awkward.materialized(target_collection[target_collection.fields[0]])
target_offsets = _get_target_offsets(load_column.layout.offsets, eventindex)
global_index = target_offsets + index
global_index = awkward.where(key != 0, global_index, -1)
return target_collection._apply_global_index(global_index)
|
coffea
|
positive
|
@override_settings(USE_TZ=False)
def test_get_absolute_url_no_timezone(self):
<DeepExtract>
params = {'title': 'My entry', 'content': 'My content', 'slug': 'my-entry', 'publication_date': datetime(2013, 1, 1, 12, 0)}
e = Entry.objects.create(**params)
self.assertTrue('/2013/01/01/my-entry/' in e.get_absolute_url())
</DeepExtract>
<DeepExtract>
params = {'title': 'My entry', 'content': 'My content', 'slug': 'my-entry', 'publication_date': datetime(2013, 1, 1, 23, 0)}
e = Entry.objects.create(**params)
self.assertTrue('/2013/01/01/my-entry/' in e.get_absolute_url())
</DeepExtract>
|
@override_settings(USE_TZ=False)
def test_get_absolute_url_no_timezone(self):
params = {'title': 'My entry', 'content': 'My content', 'slug': 'my-entry', 'publication_date': datetime(2013, 1, 1, 12, 0)}
e = Entry.objects.create(**params)
self.assertTrue('/2013/01/01/my-entry/' in e.get_absolute_url())
params = {'title': 'My entry', 'content': 'My content', 'slug': 'my-entry', 'publication_date': datetime(2013, 1, 1, 23, 0)}
e = Entry.objects.create(**params)
self.assertTrue('/2013/01/01/my-entry/' in e.get_absolute_url())
</DeepExtract>
|
django-blog-zinnia
|
positive
|
def _setbool(self, value):
if value in (1, 'True'):
<DeepExtract>
self._datastore = ByteStore(bytearray(b'\x80')[:], 1, 0)
assert self._assertsanity()
</DeepExtract>
elif value in (0, 'False'):
<DeepExtract>
self._datastore = ByteStore(bytearray(b'\x00')[:], 1, 0)
assert self._assertsanity()
</DeepExtract>
else:
raise CreationError('Cannot initialise boolean with {0}.', value)
|
def _setbool(self, value):
if value in (1, 'True'):
self._datastore = ByteStore(bytearray(b'\x80')[:], 1, 0)
assert self._assertsanity()
elif value in (0, 'False'):
self._datastore = ByteStore(bytearray(b'\x00')[:], 1, 0)
assert self._assertsanity()
else:
raise CreationError('Cannot initialise boolean with {0}.', value)
|
Arduino-Telescope-Control
|
positive
|
@expose('/dagrun_success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def dagrun_success(self):
dag_id = request.args.get('dag_id')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == 'true'
origin = request.args.get('origin')
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = pendulum.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag:
flash('Cannot find DAG: {}'.format(dag_id), 'error')
return redirect(origin)
new_dag_state = set_dag_run_state(dag, execution_date, state=State.SUCCESS, commit=confirmed)
if confirmed:
flash('Marked success on {} task instances'.format(len(new_dag_state)))
return redirect(origin)
else:
details = '\n'.join([str(t) for t in new_dag_state])
<DeepExtract>
out = ''
if isinstance('airflow/confirm.html', basestring):
out += pygment_html_render('airflow/confirm.html', lexer)
elif isinstance('airflow/confirm.html', (tuple, list)):
for (i, s) in enumerate('airflow/confirm.html'):
out += '<div>List item #{}</div>'.format(i)
out += '<div>' + pygment_html_render(s, lexer) + '</div>'
elif isinstance('airflow/confirm.html', dict):
for (k, v) in 'airflow/confirm.html'.items():
out += '<div>Dict item "{}"</div>'.format(k)
out += '<div>' + pygment_html_render(v, lexer) + '</div>'
response = out
</DeepExtract>
return response
|
@expose('/dagrun_success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def dagrun_success(self):
dag_id = request.args.get('dag_id')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == 'true'
origin = request.args.get('origin')
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = pendulum.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag:
flash('Cannot find DAG: {}'.format(dag_id), 'error')
return redirect(origin)
new_dag_state = set_dag_run_state(dag, execution_date, state=State.SUCCESS, commit=confirmed)
if confirmed:
flash('Marked success on {} task instances'.format(len(new_dag_state)))
return redirect(origin)
else:
details = '\n'.join([str(t) for t in new_dag_state])
out = ''
if isinstance('airflow/confirm.html', basestring):
out += pygment_html_render('airflow/confirm.html', lexer)
elif isinstance('airflow/confirm.html', (tuple, list)):
for (i, s) in enumerate('airflow/confirm.html'):
out += '<div>List item #{}</div>'.format(i)
out += '<div>' + pygment_html_render(s, lexer) + '</div>'
elif isinstance('airflow/confirm.html', dict):
for (k, v) in 'airflow/confirm.html'.items():
out += '<div>Dict item "{}"</div>'.format(k)
out += '<div>' + pygment_html_render(v, lexer) + '</div>'
response = out
return response
|
docker-airflow
|
positive
|
def save(self, obj):
"""Save this object"""
assert obj.__class__.__name__ in self.routes
url = self.routes[obj.__class__.__name__]
body = xmlize.dumps(obj)
if obj.id:
<DeepExtract>
tries = 0
if body and (not headers.has_key('Content-Length')):
headers['Content-Length'] = len(body)
while tries < self.max_tries:
tries += 1
self.connect()
if self.auth_header:
headers['Authorization'] = self.auth_header
self.conn.request('PUT', '/%s/%s' % (url, obj.id), body, headers)
resp = self.conn.getresponse()
if resp.status == 401:
self.close()
self.get_basic_auth()
continue
elif resp.status >= 500 or resp.status == 408:
log.info('Got %s: Retrying in %s second(s)' % (resp.status, tries ** 2))
time.sleep(tries ** 2)
continue
else:
return resp
if resp.status == 400:
log.exception(resp.read())
return resp
</DeepExtract>
else:
<DeepExtract>
tries = 0
if body and (not headers.has_key('Content-Length')):
headers['Content-Length'] = len(body)
while tries < self.max_tries:
tries += 1
self.connect()
if self.auth_header:
headers['Authorization'] = self.auth_header
self.conn.request('POST', '/%s' % url, body, headers)
resp = self.conn.getresponse()
if resp.status == 401:
self.close()
self.get_basic_auth()
continue
elif resp.status >= 500 or resp.status == 408:
log.info('Got %s: Retrying in %s second(s)' % (resp.status, tries ** 2))
time.sleep(tries ** 2)
continue
else:
return resp
if resp.status == 400:
log.exception(resp.read())
return resp
</DeepExtract>
|
def save(self, obj):
"""Save this object"""
assert obj.__class__.__name__ in self.routes
url = self.routes[obj.__class__.__name__]
body = xmlize.dumps(obj)
if obj.id:
tries = 0
if body and (not headers.has_key('Content-Length')):
headers['Content-Length'] = len(body)
while tries < self.max_tries:
tries += 1
self.connect()
if self.auth_header:
headers['Authorization'] = self.auth_header
self.conn.request('PUT', '/%s/%s' % (url, obj.id), body, headers)
resp = self.conn.getresponse()
if resp.status == 401:
self.close()
self.get_basic_auth()
continue
elif resp.status >= 500 or resp.status == 408:
log.info('Got %s: Retrying in %s second(s)' % (resp.status, tries ** 2))
time.sleep(tries ** 2)
continue
else:
return resp
if resp.status == 400:
log.exception(resp.read())
return resp
else:
tries = 0
if body and (not headers.has_key('Content-Length')):
headers['Content-Length'] = len(body)
while tries < self.max_tries:
tries += 1
self.connect()
if self.auth_header:
headers['Authorization'] = self.auth_header
self.conn.request('POST', '/%s' % url, body, headers)
resp = self.conn.getresponse()
if resp.status == 401:
self.close()
self.get_basic_auth()
continue
elif resp.status >= 500 or resp.status == 408:
log.info('Got %s: Retrying in %s second(s)' % (resp.status, tries ** 2))
time.sleep(tries ** 2)
continue
else:
return resp
if resp.status == 400:
log.exception(resp.read())
return resp
</DeepExtract>
|
botoweb
|
positive
|
def parse_commit_log(name, content, releases, get_head_fn):
"""
Parses the given commit log
:param name: str, package name
:param content: list, directory paths
:param releases: list, releases
:param get_head_fn: function
:return: dict, changelog
"""
log = ''
raw_log = ''
for (path, _) in content:
log += '\n'.join(changelog(repository=GitRepos(path), tag_filter_regexp='v?\\d+\\.\\d+(\\.\\d+)?'))
raw_log += '\n' + subprocess.check_output(['git', '-C', path, '--no-pager', 'log', '--decorate']).decode('utf-8')
shutil.rmtree(path)
<DeepExtract>
changelog = {}
releases = frozenset(releases)
head = False
url_regex = re.compile('(https?://[^#]+)#')
for line in log.splitlines():
new_head = get_head_fn(name=name, line=line, releases=releases)
if new_head:
head = new_head
changelog[head] = ''
continue
if not head:
continue
line = line.replace('@', '')
line = url_regex.sub('\\1::HASHTAG::', line)
line = line.replace('#', '')
line = line.replace('::HASHTAG::', '#')
changelog[head] += line + '\n'
log = changelog
</DeepExtract>
return (log, raw_log)
|
def parse_commit_log(name, content, releases, get_head_fn):
"""
Parses the given commit log
:param name: str, package name
:param content: list, directory paths
:param releases: list, releases
:param get_head_fn: function
:return: dict, changelog
"""
log = ''
raw_log = ''
for (path, _) in content:
log += '\n'.join(changelog(repository=GitRepos(path), tag_filter_regexp='v?\\d+\\.\\d+(\\.\\d+)?'))
raw_log += '\n' + subprocess.check_output(['git', '-C', path, '--no-pager', 'log', '--decorate']).decode('utf-8')
shutil.rmtree(path)
changelog = {}
releases = frozenset(releases)
head = False
url_regex = re.compile('(https?://[^#]+)#')
for line in log.splitlines():
new_head = get_head_fn(name=name, line=line, releases=releases)
if new_head:
head = new_head
changelog[head] = ''
continue
if not head:
continue
line = line.replace('@', '')
line = url_regex.sub('\\1::HASHTAG::', line)
line = line.replace('#', '')
line = line.replace('::HASHTAG::', '#')
changelog[head] += line + '\n'
log = changelog
return (log, raw_log)
|
changelogs
|
positive
|
def create_ts(self, t_start: float=0, t_end: float=0, centered: bool=True) -> tf.constant:
"""
Compute time samples.
Parameters
----------
t_start: float
Starting time for this device.
t_end: float
End time for this device.
centered: boolean
Sample in the middle of an interval, otherwise at the beginning.
"""
<DeepExtract>
res = self.resolution
self.slice_num = int(np.abs(t_start - t_end) * res)
</DeepExtract>
dt = 1 / self.resolution
if centered:
offset = dt / 2
num = self.slice_num
else:
offset = 0
num = self.slice_num + 1
t_start = tf.constant(t_start + offset, dtype=tf.float64)
t_end = tf.constant(t_end - offset, dtype=tf.float64)
ts = tf.linspace(t_start, t_end, num)
return ts
|
def create_ts(self, t_start: float=0, t_end: float=0, centered: bool=True) -> tf.constant:
"""
Compute time samples.
Parameters
----------
t_start: float
Starting time for this device.
t_end: float
End time for this device.
centered: boolean
Sample in the middle of an interval, otherwise at the beginning.
"""
res = self.resolution
self.slice_num = int(np.abs(t_start - t_end) * res)
dt = 1 / self.resolution
if centered:
offset = dt / 2
num = self.slice_num
else:
offset = 0
num = self.slice_num + 1
t_start = tf.constant(t_start + offset, dtype=tf.float64)
t_end = tf.constant(t_end - offset, dtype=tf.float64)
ts = tf.linspace(t_start, t_end, num)
return ts
|
c3
|
positive
|
def model_fcn8(features):
<DeepExtract>
W = get_variable('W' + str(0), [feature_dim, hidden_layer_dim])
B = get_variable('B' + str(0), [hidden_layer_dim], is_bias=True)
HL0 = tf.nn.sigmoid(tf.nn.xw_plus_b(features, W, B))
</DeepExtract>
<DeepExtract>
W = get_variable('W' + str(1), [hidden_layer_dim, hidden_layer_dim])
B = get_variable('B' + str(1), [hidden_layer_dim], is_bias=True)
HL1 = tf.nn.sigmoid(tf.nn.xw_plus_b(HL0, W, B))
</DeepExtract>
<DeepExtract>
W = get_variable('W' + str(2), [hidden_layer_dim, hidden_layer_dim])
B = get_variable('B' + str(2), [hidden_layer_dim], is_bias=True)
HL2 = tf.nn.sigmoid(tf.nn.xw_plus_b(HL1, W, B))
</DeepExtract>
<DeepExtract>
W = get_variable('W' + str(3), [hidden_layer_dim, hidden_layer_dim])
B = get_variable('B' + str(3), [hidden_layer_dim], is_bias=True)
HL3 = tf.nn.sigmoid(tf.nn.xw_plus_b(HL2, W, B))
</DeepExtract>
<DeepExtract>
W = get_variable('W' + str(4), [hidden_layer_dim, hidden_layer_dim])
B = get_variable('B' + str(4), [hidden_layer_dim], is_bias=True)
HL4 = tf.nn.sigmoid(tf.nn.xw_plus_b(HL3, W, B))
</DeepExtract>
<DeepExtract>
W = get_variable('W' + str(5), [hidden_layer_dim, hidden_layer_dim])
B = get_variable('B' + str(5), [hidden_layer_dim], is_bias=True)
HL5 = tf.nn.sigmoid(tf.nn.xw_plus_b(HL4, W, B))
</DeepExtract>
<DeepExtract>
if is_bias:
outputLayerW = tf.get_variable('W8', [hidden_layer_dim, label_dim], initializer=tf.constant_initializer(0.1))
outputLayerW = tf.get_variable('W8', [hidden_layer_dim, label_dim], initializer=tf.truncated_normal_initializer(stddev=0.1))
</DeepExtract>
<DeepExtract>
if is_bias:
outputLayerB = tf.get_variable('B8', [label_dim], initializer=tf.constant_initializer(0.1))
outputLayerB = tf.get_variable('B8', [label_dim], initializer=tf.truncated_normal_initializer(stddev=0.1))
</DeepExtract>
outputLayer = tf.nn.xw_plus_b(HL5, outputLayerW, outputLayerB)
return outputLayer
|
def model_fcn8(features):
W = get_variable('W' + str(0), [feature_dim, hidden_layer_dim])
B = get_variable('B' + str(0), [hidden_layer_dim], is_bias=True)
HL0 = tf.nn.sigmoid(tf.nn.xw_plus_b(features, W, B))
W = get_variable('W' + str(1), [hidden_layer_dim, hidden_layer_dim])
B = get_variable('B' + str(1), [hidden_layer_dim], is_bias=True)
HL1 = tf.nn.sigmoid(tf.nn.xw_plus_b(HL0, W, B))
W = get_variable('W' + str(2), [hidden_layer_dim, hidden_layer_dim])
B = get_variable('B' + str(2), [hidden_layer_dim], is_bias=True)
HL2 = tf.nn.sigmoid(tf.nn.xw_plus_b(HL1, W, B))
W = get_variable('W' + str(3), [hidden_layer_dim, hidden_layer_dim])
B = get_variable('B' + str(3), [hidden_layer_dim], is_bias=True)
HL3 = tf.nn.sigmoid(tf.nn.xw_plus_b(HL2, W, B))
W = get_variable('W' + str(4), [hidden_layer_dim, hidden_layer_dim])
B = get_variable('B' + str(4), [hidden_layer_dim], is_bias=True)
HL4 = tf.nn.sigmoid(tf.nn.xw_plus_b(HL3, W, B))
W = get_variable('W' + str(5), [hidden_layer_dim, hidden_layer_dim])
B = get_variable('B' + str(5), [hidden_layer_dim], is_bias=True)
HL5 = tf.nn.sigmoid(tf.nn.xw_plus_b(HL4, W, B))
if is_bias:
outputLayerW = tf.get_variable('W8', [hidden_layer_dim, label_dim], initializer=tf.constant_initializer(0.1))
outputLayerW = tf.get_variable('W8', [hidden_layer_dim, label_dim], initializer=tf.truncated_normal_initializer(stddev=0.1))
if is_bias:
outputLayerB = tf.get_variable('B8', [label_dim], initializer=tf.constant_initializer(0.1))
outputLayerB = tf.get_variable('B8', [label_dim], initializer=tf.truncated_normal_initializer(stddev=0.1))
outputLayer = tf.nn.xw_plus_b(HL5, outputLayerW, outputLayerB)
return outputLayer
|
dlbench
|
positive
|
def create_acm_cert(self):
"""
Method to set the ACM Certificate definition
"""
if self.properties:
props = import_record_properties(self.properties, CfnAcmCertificate)
elif self.parameters:
<DeepExtract>
tag_filter = re.compile('(^\\*.)')
validations = [DomainValidationOption(DomainName=domain_name, HostedZoneId=self.parameters['HostedZoneId']) for domain_name in self.parameters['DomainNames']]
props = {'DomainValidationOptions': validations, 'DomainName': self.parameters['DomainNames'][0], 'ValidationMethod': 'DNS', 'Tags': Tags(Name=tag_filter.sub('wildcard.', self.parameters['DomainNames'][0]), ZoneId=self.parameters['HostedZoneId']), 'SubjectAlternativeNames': self.parameters['DomainNames'][1:]}
props = props
</DeepExtract>
else:
raise ValueError('Failed to determine how to create the ACM certificate', self.logical_name)
self.cfn_resource = CfnAcmCertificate(f'{self.logical_name}AcmCert', **props)
<DeepExtract>
self.output_properties = {CERT_ARN: (f'{self.logical_name}', self.cfn_resource, Ref, None)}
</DeepExtract>
self.generate_outputs()
|
def create_acm_cert(self):
"""
Method to set the ACM Certificate definition
"""
if self.properties:
props = import_record_properties(self.properties, CfnAcmCertificate)
elif self.parameters:
tag_filter = re.compile('(^\\*.)')
validations = [DomainValidationOption(DomainName=domain_name, HostedZoneId=self.parameters['HostedZoneId']) for domain_name in self.parameters['DomainNames']]
props = {'DomainValidationOptions': validations, 'DomainName': self.parameters['DomainNames'][0], 'ValidationMethod': 'DNS', 'Tags': Tags(Name=tag_filter.sub('wildcard.', self.parameters['DomainNames'][0]), ZoneId=self.parameters['HostedZoneId']), 'SubjectAlternativeNames': self.parameters['DomainNames'][1:]}
props = props
else:
raise ValueError('Failed to determine how to create the ACM certificate', self.logical_name)
self.cfn_resource = CfnAcmCertificate(f'{self.logical_name}AcmCert', **props)
self.output_properties = {CERT_ARN: (f'{self.logical_name}', self.cfn_resource, Ref, None)}
self.generate_outputs()
|
ecs_composex
|
positive
|
def do_test(dim0, dim1, dim2, ntr, nsubj, max_blk_edge, rad):
comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size
mask = np.random.choice([True, False], (dim0, dim1, dim2))
data = [np.empty((dim0, dim1, dim2, ntr), dtype=np.object) if i % size == rank else None for i in range(0, nsubj)]
<DeepExtract>
comm = MPI.COMM_WORLD
rank = comm.rank
(dim0, dim1, dim2) = mask.shape
for subj in data:
if subj is not None:
for tr in range(subj.shape[3]):
for d1 in range(dim0):
for d2 in range(dim1):
for d3 in range(dim2):
subj[d1, d2, d3, tr] = np.array([d1, d2, d3, tr])
sl = Searchlight(sl_rad=rad, max_blk_edge=max_blk_edge)
sl.distribute(data, mask)
sl.broadcast(MaskRadBcast(mask, rad))
global_outputs = sl.run_searchlight(voxel_test_sfn)
if rank == 0:
for d0 in range(rad, global_outputs.shape[0] - rad):
for d1 in range(rad, global_outputs.shape[1] - rad):
for d2 in range(rad, global_outputs.shape[2] - rad):
if mask[d0, d1, d2]:
assert np.array_equal(np.array(global_outputs[d0, d1, d2]), np.array([d0, d1, d2]))
</DeepExtract>
<DeepExtract>
comm = MPI.COMM_WORLD
rank = comm.rank
(dim0, dim1, dim2) = mask.shape
for subj in data:
if subj is not None:
for tr in range(subj.shape[3]):
for d1 in range(dim0):
for d2 in range(dim1):
for d3 in range(dim2):
subj[d1, d2, d3, tr] = np.array([d1, d2, d3, tr])
sl = Searchlight(sl_rad=rad, max_blk_edge=max_blk_edge)
sl.distribute(data, mask)
sl.broadcast(mask)
global_outputs = sl.run_block_function(block_test_sfn)
if rank == 0:
for d0 in range(rad, global_outputs.shape[0] - rad):
for d1 in range(rad, global_outputs.shape[1] - rad):
for d2 in range(rad, global_outputs.shape[2] - rad):
if mask[d0, d1, d2]:
assert np.array_equal(np.array(global_outputs[d0, d1, d2]), np.array([d0, d1, d2, 0]))
</DeepExtract>
|
def do_test(dim0, dim1, dim2, ntr, nsubj, max_blk_edge, rad):
comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size
mask = np.random.choice([True, False], (dim0, dim1, dim2))
data = [np.empty((dim0, dim1, dim2, ntr), dtype=np.object) if i % size == rank else None for i in range(0, nsubj)]
comm = MPI.COMM_WORLD
rank = comm.rank
(dim0, dim1, dim2) = mask.shape
for subj in data:
if subj is not None:
for tr in range(subj.shape[3]):
for d1 in range(dim0):
for d2 in range(dim1):
for d3 in range(dim2):
subj[d1, d2, d3, tr] = np.array([d1, d2, d3, tr])
sl = Searchlight(sl_rad=rad, max_blk_edge=max_blk_edge)
sl.distribute(data, mask)
sl.broadcast(MaskRadBcast(mask, rad))
global_outputs = sl.run_searchlight(voxel_test_sfn)
if rank == 0:
for d0 in range(rad, global_outputs.shape[0] - rad):
for d1 in range(rad, global_outputs.shape[1] - rad):
for d2 in range(rad, global_outputs.shape[2] - rad):
if mask[d0, d1, d2]:
assert np.array_equal(np.array(global_outputs[d0, d1, d2]), np.array([d0, d1, d2]))
comm = MPI.COMM_WORLD
rank = comm.rank
(dim0, dim1, dim2) = mask.shape
for subj in data:
if subj is not None:
for tr in range(subj.shape[3]):
for d1 in range(dim0):
for d2 in range(dim1):
for d3 in range(dim2):
subj[d1, d2, d3, tr] = np.array([d1, d2, d3, tr])
sl = Searchlight(sl_rad=rad, max_blk_edge=max_blk_edge)
sl.distribute(data, mask)
sl.broadcast(mask)
global_outputs = sl.run_block_function(block_test_sfn)
if rank == 0:
for d0 in range(rad, global_outputs.shape[0] - rad):
for d1 in range(rad, global_outputs.shape[1] - rad):
for d2 in range(rad, global_outputs.shape[2] - rad):
if mask[d0, d1, d2]:
assert np.array_equal(np.array(global_outputs[d0, d1, d2]), np.array([d0, d1, d2, 0]))
</DeepExtract>
|
brainiak
|
positive
|
def HitAnimation(hit_result):
if hit_result == 'miss':
<DeepExtract>
if not MIXER_ACTIVE:
return
if campaign is not None:
if not campaign.sounds:
return
if not 'main_gun_miss' in SOUNDS:
return
if SOUNDS['main_gun_miss'] is None:
return
mixer.Mix_PlayChannel(-1, SOUNDS['main_gun_miss'], 0)
</DeepExtract>
return
if hit_result == 'smoke_hit':
<DeepExtract>
if not MIXER_ACTIVE:
return
if campaign is not None:
if not campaign.sounds:
return
if not 'smoke_hit' in SOUNDS:
return
if SOUNDS['smoke_hit'] is None:
return
mixer.Mix_PlayChannel(-1, SOUNDS['smoke_hit'], 0)
</DeepExtract>
return
<DeepExtract>
if not MIXER_ACTIVE:
return
if campaign is not None:
if not campaign.sounds:
return
if not hit_result in SOUNDS:
return
if SOUNDS[hit_result] is None:
return
mixer.Mix_PlayChannel(-1, SOUNDS[hit_result], 0)
</DeepExtract>
if not campaign.animations:
return
if hit_result == 'he_hit':
animations = HE_HIT_ANIMATION
else:
animations = AP_HIT_ANIMATION
x = MAP_CON_X + battle.target.x
y = MAP_CON_Y + battle.target.y
col = libtcod.console_get_char_background(map_con, battle.target.x, battle.target.y)
libtcod.console_set_char_background(0, x, y, col)
for (char, color, pause) in animations:
libtcod.console_set_char(0, x, y, char)
libtcod.console_set_char_foreground(0, x, y, color)
libtcod.console_flush()
<DeepExtract>
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)
libtcod.sys_sleep_milli(pause)
</DeepExtract>
libtcod.console_flush()
|
def HitAnimation(hit_result):
if hit_result == 'miss':
if not MIXER_ACTIVE:
return
if campaign is not None:
if not campaign.sounds:
return
if not 'main_gun_miss' in SOUNDS:
return
if SOUNDS['main_gun_miss'] is None:
return
mixer.Mix_PlayChannel(-1, SOUNDS['main_gun_miss'], 0)
return
if hit_result == 'smoke_hit':
if not MIXER_ACTIVE:
return
if campaign is not None:
if not campaign.sounds:
return
if not 'smoke_hit' in SOUNDS:
return
if SOUNDS['smoke_hit'] is None:
return
mixer.Mix_PlayChannel(-1, SOUNDS['smoke_hit'], 0)
return
if not MIXER_ACTIVE:
return
if campaign is not None:
if not campaign.sounds:
return
if not hit_result in SOUNDS:
return
if SOUNDS[hit_result] is None:
return
mixer.Mix_PlayChannel(-1, SOUNDS[hit_result], 0)
if not campaign.animations:
return
if hit_result == 'he_hit':
animations = HE_HIT_ANIMATION
else:
animations = AP_HIT_ANIMATION
x = MAP_CON_X + battle.target.x
y = MAP_CON_Y + battle.target.y
col = libtcod.console_get_char_background(map_con, battle.target.x, battle.target.y)
libtcod.console_set_char_background(0, x, y, col)
for (char, color, pause) in animations:
libtcod.console_set_char(0, x, y, char)
libtcod.console_set_char_foreground(0, x, y, color)
libtcod.console_flush()
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)
libtcod.sys_sleep_milli(pause)
libtcod.console_flush()
|
armcom
|
positive
|
def decode(self, rawdata):
"""Passed a CoAP message body after the token as rawdata, fill self
with the options starting at the beginning of rawdata, an return the
rest of the message (the body)."""
option_number = OptionNumber(0)
while rawdata:
if rawdata[0] == 255:
return rawdata[1:]
dllen = rawdata[0]
delta = (dllen & 240) >> 4
length = dllen & 15
rawdata = rawdata[1:]
<DeepExtract>
if delta >= 0 and delta < 13:
(delta, rawdata) = (delta, rawdata)
elif delta == 13:
if len(rawdata) < 1:
raise UnparsableMessage('Option ended prematurely')
(delta, rawdata) = (rawdata[0] + 13, rawdata[1:])
elif delta == 14:
if len(rawdata) < 2:
raise UnparsableMessage('Option ended prematurely')
(delta, rawdata) = (int.from_bytes(rawdata[:2], 'big') + 269, rawdata[2:])
else:
raise UnparsableMessage('Option contained partial payload marker.')
</DeepExtract>
<DeepExtract>
if length >= 0 and length < 13:
(length, rawdata) = (length, rawdata)
elif length == 13:
if len(rawdata) < 1:
raise UnparsableMessage('Option ended prematurely')
(length, rawdata) = (rawdata[0] + 13, rawdata[1:])
elif length == 14:
if len(rawdata) < 2:
raise UnparsableMessage('Option ended prematurely')
(length, rawdata) = (int.from_bytes(rawdata[:2], 'big') + 269, rawdata[2:])
else:
raise UnparsableMessage('Option contained partial payload marker.')
</DeepExtract>
option_number += delta
if len(rawdata) < length:
raise UnparsableMessage('Option announced but absent')
option = option_number.create_option(decode=rawdata[:length])
<DeepExtract>
self._options.setdefault(option.number, []).append(option)
</DeepExtract>
rawdata = rawdata[length:]
return b''
|
def decode(self, rawdata):
"""Passed a CoAP message body after the token as rawdata, fill self
with the options starting at the beginning of rawdata, an return the
rest of the message (the body)."""
option_number = OptionNumber(0)
while rawdata:
if rawdata[0] == 255:
return rawdata[1:]
dllen = rawdata[0]
delta = (dllen & 240) >> 4
length = dllen & 15
rawdata = rawdata[1:]
if delta >= 0 and delta < 13:
(delta, rawdata) = (delta, rawdata)
elif delta == 13:
if len(rawdata) < 1:
raise UnparsableMessage('Option ended prematurely')
(delta, rawdata) = (rawdata[0] + 13, rawdata[1:])
elif delta == 14:
if len(rawdata) < 2:
raise UnparsableMessage('Option ended prematurely')
(delta, rawdata) = (int.from_bytes(rawdata[:2], 'big') + 269, rawdata[2:])
else:
raise UnparsableMessage('Option contained partial payload marker.')
if length >= 0 and length < 13:
(length, rawdata) = (length, rawdata)
elif length == 13:
if len(rawdata) < 1:
raise UnparsableMessage('Option ended prematurely')
(length, rawdata) = (rawdata[0] + 13, rawdata[1:])
elif length == 14:
if len(rawdata) < 2:
raise UnparsableMessage('Option ended prematurely')
(length, rawdata) = (int.from_bytes(rawdata[:2], 'big') + 269, rawdata[2:])
else:
raise UnparsableMessage('Option contained partial payload marker.')
option_number += delta
if len(rawdata) < length:
raise UnparsableMessage('Option announced but absent')
option = option_number.create_option(decode=rawdata[:length])
self._options.setdefault(option.number, []).append(option)
rawdata = rawdata[length:]
return b''
|
aiocoap
|
positive
|
def _stop_flush_thread(self):
if not self._flush_thread:
log.warning('No statsd flush thread to stop')
return
try:
<DeepExtract>
with self._buffer_lock:
if self._buffer:
self._send_to_server('\n'.join(self._buffer))
self._reset_buffer()
</DeepExtract>
finally:
pass
self._flush_thread_stop.set()
self._flush_thread.join()
self._flush_thread = None
self._flush_thread_stop.clear()
|
def _stop_flush_thread(self):
if not self._flush_thread:
log.warning('No statsd flush thread to stop')
return
try:
with self._buffer_lock:
if self._buffer:
self._send_to_server('\n'.join(self._buffer))
self._reset_buffer()
finally:
pass
self._flush_thread_stop.set()
self._flush_thread.join()
self._flush_thread = None
self._flush_thread_stop.clear()
|
datadogpy
|
positive
|
def _LoadOsqueryPackToState(self, path: str) -> None:
"""Loads osquery from an osquery pack file and creates Osquery containers.
Args:
path: the path to the JSON file.
"""
with open(path, mode='r') as fd:
global_platform = []
query_pack = json.load(fd)
if 'platform' in query_pack:
<DeepExtract>
if not query_pack.get('platform'):
global_platform = []
unique_platforms = set()
for platform in query_pack.get('platform').split(','):
platform = platform.strip()
if platform in ('all', 'any'):
unique_platforms.update(_ALL_PLATFORMS)
elif platform == 'posix':
unique_platforms.update(['darwin', 'freebsd', 'linux'])
elif platform in _ALL_PLATFORMS:
unique_platforms.add(platform)
else:
self.logger.warning(f'Unexpected value {platform} in platform value.')
global_platform = list(unique_platforms)
</DeepExtract>
for (num, (name, entry)) in enumerate(query_pack.get('queries', {}).items()):
query = entry['query']
if not self._ValidateOsquery(query):
self.logger.warning(f'Entry {num} in query pack{path} does not appear to be valid.')
continue
if 'platform' in entry:
<DeepExtract>
if not entry.get('platform'):
platform = []
unique_platforms = set()
for platform in entry.get('platform').split(','):
platform = platform.strip()
if platform in ('all', 'any'):
unique_platforms.update(_ALL_PLATFORMS)
elif platform == 'posix':
unique_platforms.update(['darwin', 'freebsd', 'linux'])
elif platform in _ALL_PLATFORMS:
unique_platforms.add(platform)
else:
self.logger.warning(f'Unexpected value {platform} in platform value.')
platform = list(unique_platforms)
</DeepExtract>
else:
platform = global_platform
self.osqueries.append(containers.OsqueryQuery(query=query, name=name, description=entry.get('description', ''), platforms=platform))
|
def _LoadOsqueryPackToState(self, path: str) -> None:
"""Loads osquery from an osquery pack file and creates Osquery containers.
Args:
path: the path to the JSON file.
"""
with open(path, mode='r') as fd:
global_platform = []
query_pack = json.load(fd)
if 'platform' in query_pack:
if not query_pack.get('platform'):
global_platform = []
unique_platforms = set()
for platform in query_pack.get('platform').split(','):
platform = platform.strip()
if platform in ('all', 'any'):
unique_platforms.update(_ALL_PLATFORMS)
elif platform == 'posix':
unique_platforms.update(['darwin', 'freebsd', 'linux'])
elif platform in _ALL_PLATFORMS:
unique_platforms.add(platform)
else:
self.logger.warning(f'Unexpected value {platform} in platform value.')
global_platform = list(unique_platforms)
for (num, (name, entry)) in enumerate(query_pack.get('queries', {}).items()):
query = entry['query']
if not self._ValidateOsquery(query):
self.logger.warning(f'Entry {num} in query pack{path} does not appear to be valid.')
continue
if 'platform' in entry:
if not entry.get('platform'):
platform = []
unique_platforms = set()
for platform in entry.get('platform').split(','):
platform = platform.strip()
if platform in ('all', 'any'):
unique_platforms.update(_ALL_PLATFORMS)
elif platform == 'posix':
unique_platforms.update(['darwin', 'freebsd', 'linux'])
elif platform in _ALL_PLATFORMS:
unique_platforms.add(platform)
else:
self.logger.warning(f'Unexpected value {platform} in platform value.')
platform = list(unique_platforms)
else:
platform = global_platform
self.osqueries.append(containers.OsqueryQuery(query=query, name=name, description=entry.get('description', ''), platforms=platform))
|
dftimewolf
|
positive
|
def init_widget(self):
widget = self.widget
widget.ref = atomref(self)
focus_registry.register(widget, self)
d = self.declaration
self._extra_features = d.extra_features
<DeepExtract>
features = self._features = self.declaration.features
if not features:
return
if features & Feature.FocusTraversal:
self.hook_focus_traversal()
if features & Feature.FocusEvents:
self.hook_focus_events()
if features & Feature.DragEnabled:
self.hook_drag()
if features & Feature.DropEnabled:
self.hook_drop()
features = self._extra_features
if features & GraphicFeature.WheelEvent:
self.hook_wheel()
if features & GraphicFeature.DrawEvent:
self.hook_draw()
</DeepExtract>
if d.selectable:
<DeepExtract>
self.widget.setFlag(QGraphicsItem.ItemIsSelectable, d.selectable)
</DeepExtract>
if d.movable:
<DeepExtract>
self.widget.setFlag(QGraphicsItem.ItemIsMovable, d.movable)
</DeepExtract>
if d.tool_tip:
<DeepExtract>
self.widget.setToolTip(d.tool_tip)
</DeepExtract>
if d.status_tip:
<DeepExtract>
self.widget.setToolTip(d.status_tip)
</DeepExtract>
if not d.enabled:
<DeepExtract>
self.widget.setEnabled(d.enabled)
</DeepExtract>
if not d.visible:
<DeepExtract>
self.widget.setVisible(d.visible)
</DeepExtract>
if d.opacity != 1:
<DeepExtract>
self.widget.setOpacity(d.opacity)
</DeepExtract>
if d.rotation:
<DeepExtract>
self.widget.setRotation(d.rotation)
</DeepExtract>
if d.scale != 1:
<DeepExtract>
self.widget.setScale(d.scale)
</DeepExtract>
<DeepExtract>
if self._guards & 1:
return
pos = self.declaration.position
w = self.widget
w.setPos(pos.x, pos.y)
w.setZValue(pos.z)
</DeepExtract>
<DeepExtract>
widget = self.widget
widget.itemChange = self.itemChange
</DeepExtract>
|
def init_widget(self):
widget = self.widget
widget.ref = atomref(self)
focus_registry.register(widget, self)
d = self.declaration
self._extra_features = d.extra_features
features = self._features = self.declaration.features
if not features:
return
if features & Feature.FocusTraversal:
self.hook_focus_traversal()
if features & Feature.FocusEvents:
self.hook_focus_events()
if features & Feature.DragEnabled:
self.hook_drag()
if features & Feature.DropEnabled:
self.hook_drop()
features = self._extra_features
if features & GraphicFeature.WheelEvent:
self.hook_wheel()
if features & GraphicFeature.DrawEvent:
self.hook_draw()
if d.selectable:
self.widget.setFlag(QGraphicsItem.ItemIsSelectable, d.selectable)
if d.movable:
self.widget.setFlag(QGraphicsItem.ItemIsMovable, d.movable)
if d.tool_tip:
self.widget.setToolTip(d.tool_tip)
if d.status_tip:
self.widget.setToolTip(d.status_tip)
if not d.enabled:
self.widget.setEnabled(d.enabled)
if not d.visible:
self.widget.setVisible(d.visible)
if d.opacity != 1:
self.widget.setOpacity(d.opacity)
if d.rotation:
self.widget.setRotation(d.rotation)
if d.scale != 1:
self.widget.setScale(d.scale)
if self._guards & 1:
return
pos = self.declaration.position
w = self.widget
w.setPos(pos.x, pos.y)
w.setZValue(pos.z)
widget = self.widget
widget.itemChange = self.itemChange
</DeepExtract>
|
enamlx
|
positive
|
def from_sim(sim):
""" Create an age histogram from an already run sim """
if self.days is not None:
errormsg = 'If a simulation is being analyzed post-run, no day can be supplied: only the last day of the simulation is available'
raise ValueError(errormsg)
<DeepExtract>
self.initialized = True
self.finalized = False
return
</DeepExtract>
<DeepExtract>
raise NotImplementedError
</DeepExtract>
return
|
def from_sim(sim):
""" Create an age histogram from an already run sim """
if self.days is not None:
errormsg = 'If a simulation is being analyzed post-run, no day can be supplied: only the last day of the simulation is available'
raise ValueError(errormsg)
self.initialized = True
self.finalized = False
return
raise NotImplementedError
return
|
covasim
|
positive
|
def __init__(self, logger, cfg_name=None, pool=None):
self.logger = logger
self.config_name = cfg_name
if self.config_name is None:
self.config_name = settings.config.gateway_conf
if pool is None:
pool = settings.config.pool
self.pool = pool
self.ceph = None
self.error = False
self.reset = False
self.error_msg = ''
self.txn_list = []
self.config_locked = False
self.ceph = CephCluster()
if self.ceph.error:
self.error = True
self.error_msg = self.ceph.error_msg
return
if self.init_config():
<DeepExtract>
self.config = self._get_ceph_config()
</DeepExtract>
<DeepExtract>
update_hostname = self.needs_hostname_update()
if self.config['version'] >= Config.seed_config['version'] and (not update_hostname):
return
if self.config['version'] <= 2:
self.add_item('groups', element_name=None, initial_value={})
self.update_item('version', element_name=None, element_value=3)
if self.config['version'] == 3:
iqn = self.config['gateways'].get('iqn', None)
gateways = {}
portals = {}
self.add_item('targets', None, {})
self.add_item('discovery_auth', None, {'chap': '', 'chap_mutual': ''})
if iqn:
for (host, gateway_v3) in self.config['gateways'].items():
if isinstance(gateway_v3, dict):
portal = gateway_v3
portal.pop('iqn')
active_luns = portal.pop('active_luns')
updated = portal.pop('updated', None)
created = portal.pop('created', None)
gateway = {'active_luns': active_luns}
if created:
gateway['created'] = created
if updated:
gateway['updated'] = updated
gateways[host] = gateway
portals[host] = portal
for (_, client) in self.config['clients'].items():
client.pop('created', None)
client.pop('updated', None)
client['auth']['chap_mutual'] = ''
for (_, group) in self.config['groups'].items():
group.pop('created', None)
group.pop('updated', None)
target = {'disks': list(self.config['disks'].keys()), 'clients': self.config['clients'], 'portals': portals, 'groups': self.config['groups'], 'controls': self.config.get('controls', {}), 'ip_list': self.config['gateways']['ip_list']}
self.add_item('targets', iqn, target)
self.update_item('targets', iqn, target)
self.update_item('gateways', None, gateways)
if 'controls' in self.config:
self.del_item('controls', None)
self.del_item('clients', None)
self.del_item('groups', None)
self.update_item('version', None, 4)
if self.config['version'] == 4:
for (disk_id, disk) in self.config['disks'].items():
disk['backstore'] = USER_RBD
self.update_item('disks', disk_id, disk)
self.update_item('version', None, 5)
if self.config['version'] == 5:
for (target_iqn, target) in self.config['targets'].items():
target['acl_enabled'] = True
self.update_item('targets', target_iqn, target)
self.update_item('version', None, 6)
if self.config['version'] == 6:
new_disks = {}
old_disks = []
for (disk_id, disk) in self.config['disks'].items():
disk['backstore_object_name'] = disk_id
new_disk_id = disk_id.replace('.', '/')
new_disks[new_disk_id] = disk
old_disks.append(disk_id)
for old_disk_id in old_disks:
self.del_item('disks', old_disk_id)
for (new_disk_id, new_disk) in new_disks.items():
self.add_item('disks', new_disk_id, new_disk)
for (iqn, target) in self.config['targets'].items():
new_disk_ids = []
for disk_id in target['disks']:
new_disk_id = disk_id.replace('.', '/')
new_disk_ids.append(new_disk_id)
target['disks'] = new_disk_ids
for (_, client) in target['clients'].items():
new_luns = {}
for (lun_id, lun) in client['luns'].items():
new_lun_id = lun_id.replace('.', '/')
new_luns[new_lun_id] = lun
client['luns'] = new_luns
for (_, group) in target['groups'].items():
new_group_disks = {}
for (group_disk_id, group_disk) in group['disks'].items():
new_group_disk_id = group_disk_id.replace('.', '/')
new_group_disks[new_group_disk_id] = group_disk
group['disks'] = new_group_disks
self.update_item('targets', iqn, target)
self.update_item('version', None, 7)
if self.config['version'] == 7:
if '/' in self.config['discovery_auth']['chap']:
(duser, dpassword) = self.config['discovery_auth']['chap'].split('/', 1)
else:
duser = ''
dpassword = ''
self.config['discovery_auth']['username'] = duser
self.config['discovery_auth']['password'] = dpassword
self.config['discovery_auth']['password_encryption_enabled'] = False
self.config['discovery_auth'].pop('chap', None)
if '/' in self.config['discovery_auth']['chap_mutual']:
(dmuser, dmpassword) = self.config['discovery_auth']['chap_mutual'].split('/', 1)
else:
dmuser = ''
dmpassword = ''
self.config['discovery_auth']['mutual_username'] = dmuser
self.config['discovery_auth']['mutual_password'] = dmpassword
self.config['discovery_auth']['mutual_password_encryption_enabled'] = False
self.config['discovery_auth'].pop('chap_mutual', None)
self.update_item('discovery_auth', None, self.config['discovery_auth'])
for (target_iqn, target) in self.config['targets'].items():
for (_, client) in target['clients'].items():
if '/' in client['auth']['chap']:
(user, password) = client['auth']['chap'].split('/', 1)
else:
user = ''
password = ''
client['auth']['username'] = user
client['auth']['password'] = password
client['auth']['password_encryption_enabled'] = len(password) > 16 and encryption_available()
client['auth'].pop('chap', None)
if '/' in client['auth']['chap_mutual']:
(muser, mpassword) = client['auth']['chap_mutual'].split('/', 1)
else:
muser = ''
mpassword = ''
client['auth']['mutual_username'] = muser
client['auth']['mutual_password'] = mpassword
client['auth']['mutual_password_encryption_enabled'] = len(mpassword) > 16 and encryption_available()
client['auth'].pop('chap_mutual', None)
self.update_item('targets', target_iqn, target)
self.update_item('version', None, 8)
if self.config['version'] == 8:
for (target_iqn, target) in self.config['targets'].items():
for (_, portal) in target['portals'].items():
portal['portal_ip_addresses'] = [portal['portal_ip_address']]
portal.pop('portal_ip_address')
self.update_item('targets', target_iqn, target)
self.update_item('version', None, 9)
if self.config['version'] == 9 or update_hostname:
gateways_upgraded = self.config.get('gateways_upgraded')
if not gateways_upgraded:
gateways_upgraded = []
self.add_item('gateways_upgraded', None, gateways_upgraded)
this_shortname = socket.gethostname().split('.')[0]
this_fqdn = socket.getfqdn()
if this_fqdn not in gateways_upgraded:
gateways_config = self.config['gateways']
gateway_config = gateways_config.get(this_shortname)
if gateway_config:
gateways_config.pop(this_shortname)
gateways_config[this_fqdn] = gateway_config
self.update_item('gateways', None, gateways_config)
for (target_iqn, target) in self.config['targets'].items():
portals_config = target['portals']
portal_config = portals_config.get(this_shortname)
if portal_config:
portals_config.pop(this_shortname)
portals_config[this_fqdn] = portal_config
self.update_item('targets', target_iqn, target)
for (disk_id, disk) in self.config['disks'].items():
if disk.get('allocating_host') == this_shortname:
disk['allocating_host'] = this_fqdn
if disk.get('owner') == this_shortname:
disk['owner'] = this_fqdn
self.update_item('disks', disk_id, disk)
gateways_upgraded.append(this_fqdn)
self.update_item('gateways_upgraded', None, gateways_upgraded)
if any((gateway_name not in gateways_upgraded for gateway_name in self.config['gateways'].keys())):
self.logger.debug('gateways upgraded to 10: {}'.format(gateways_upgraded))
else:
self.del_item('gateways_upgraded', None)
if self.config['version'] == 9:
self.update_item('version', None, 10)
if self.config['version'] == 10:
for (target_iqn, target) in self.config['targets'].items():
target['auth'] = {'username': '', 'password': '', 'password_encryption_enabled': False, 'mutual_username': '', 'mutual_password': '', 'mutual_password_encryption_enabled': False}
disks = {}
for (disk_index, disk) in enumerate(sorted(target['disks'])):
disks[disk] = {'lun_id': disk_index}
target['disks'] = disks
self.update_item('targets', target_iqn, target)
self.update_item('version', None, 11)
self.commit('retain')
</DeepExtract>
self.changed = False
|
def __init__(self, logger, cfg_name=None, pool=None):
self.logger = logger
self.config_name = cfg_name
if self.config_name is None:
self.config_name = settings.config.gateway_conf
if pool is None:
pool = settings.config.pool
self.pool = pool
self.ceph = None
self.error = False
self.reset = False
self.error_msg = ''
self.txn_list = []
self.config_locked = False
self.ceph = CephCluster()
if self.ceph.error:
self.error = True
self.error_msg = self.ceph.error_msg
return
if self.init_config():
self.config = self._get_ceph_config()
update_hostname = self.needs_hostname_update()
if self.config['version'] >= Config.seed_config['version'] and (not update_hostname):
return
if self.config['version'] <= 2:
self.add_item('groups', element_name=None, initial_value={})
self.update_item('version', element_name=None, element_value=3)
if self.config['version'] == 3:
iqn = self.config['gateways'].get('iqn', None)
gateways = {}
portals = {}
self.add_item('targets', None, {})
self.add_item('discovery_auth', None, {'chap': '', 'chap_mutual': ''})
if iqn:
for (host, gateway_v3) in self.config['gateways'].items():
if isinstance(gateway_v3, dict):
portal = gateway_v3
portal.pop('iqn')
active_luns = portal.pop('active_luns')
updated = portal.pop('updated', None)
created = portal.pop('created', None)
gateway = {'active_luns': active_luns}
if created:
gateway['created'] = created
if updated:
gateway['updated'] = updated
gateways[host] = gateway
portals[host] = portal
for (_, client) in self.config['clients'].items():
client.pop('created', None)
client.pop('updated', None)
client['auth']['chap_mutual'] = ''
for (_, group) in self.config['groups'].items():
group.pop('created', None)
group.pop('updated', None)
target = {'disks': list(self.config['disks'].keys()), 'clients': self.config['clients'], 'portals': portals, 'groups': self.config['groups'], 'controls': self.config.get('controls', {}), 'ip_list': self.config['gateways']['ip_list']}
self.add_item('targets', iqn, target)
self.update_item('targets', iqn, target)
self.update_item('gateways', None, gateways)
if 'controls' in self.config:
self.del_item('controls', None)
self.del_item('clients', None)
self.del_item('groups', None)
self.update_item('version', None, 4)
if self.config['version'] == 4:
for (disk_id, disk) in self.config['disks'].items():
disk['backstore'] = USER_RBD
self.update_item('disks', disk_id, disk)
self.update_item('version', None, 5)
if self.config['version'] == 5:
for (target_iqn, target) in self.config['targets'].items():
target['acl_enabled'] = True
self.update_item('targets', target_iqn, target)
self.update_item('version', None, 6)
if self.config['version'] == 6:
new_disks = {}
old_disks = []
for (disk_id, disk) in self.config['disks'].items():
disk['backstore_object_name'] = disk_id
new_disk_id = disk_id.replace('.', '/')
new_disks[new_disk_id] = disk
old_disks.append(disk_id)
for old_disk_id in old_disks:
self.del_item('disks', old_disk_id)
for (new_disk_id, new_disk) in new_disks.items():
self.add_item('disks', new_disk_id, new_disk)
for (iqn, target) in self.config['targets'].items():
new_disk_ids = []
for disk_id in target['disks']:
new_disk_id = disk_id.replace('.', '/')
new_disk_ids.append(new_disk_id)
target['disks'] = new_disk_ids
for (_, client) in target['clients'].items():
new_luns = {}
for (lun_id, lun) in client['luns'].items():
new_lun_id = lun_id.replace('.', '/')
new_luns[new_lun_id] = lun
client['luns'] = new_luns
for (_, group) in target['groups'].items():
new_group_disks = {}
for (group_disk_id, group_disk) in group['disks'].items():
new_group_disk_id = group_disk_id.replace('.', '/')
new_group_disks[new_group_disk_id] = group_disk
group['disks'] = new_group_disks
self.update_item('targets', iqn, target)
self.update_item('version', None, 7)
if self.config['version'] == 7:
if '/' in self.config['discovery_auth']['chap']:
(duser, dpassword) = self.config['discovery_auth']['chap'].split('/', 1)
else:
duser = ''
dpassword = ''
self.config['discovery_auth']['username'] = duser
self.config['discovery_auth']['password'] = dpassword
self.config['discovery_auth']['password_encryption_enabled'] = False
self.config['discovery_auth'].pop('chap', None)
if '/' in self.config['discovery_auth']['chap_mutual']:
(dmuser, dmpassword) = self.config['discovery_auth']['chap_mutual'].split('/', 1)
else:
dmuser = ''
dmpassword = ''
self.config['discovery_auth']['mutual_username'] = dmuser
self.config['discovery_auth']['mutual_password'] = dmpassword
self.config['discovery_auth']['mutual_password_encryption_enabled'] = False
self.config['discovery_auth'].pop('chap_mutual', None)
self.update_item('discovery_auth', None, self.config['discovery_auth'])
for (target_iqn, target) in self.config['targets'].items():
for (_, client) in target['clients'].items():
if '/' in client['auth']['chap']:
(user, password) = client['auth']['chap'].split('/', 1)
else:
user = ''
password = ''
client['auth']['username'] = user
client['auth']['password'] = password
client['auth']['password_encryption_enabled'] = len(password) > 16 and encryption_available()
client['auth'].pop('chap', None)
if '/' in client['auth']['chap_mutual']:
(muser, mpassword) = client['auth']['chap_mutual'].split('/', 1)
else:
muser = ''
mpassword = ''
client['auth']['mutual_username'] = muser
client['auth']['mutual_password'] = mpassword
client['auth']['mutual_password_encryption_enabled'] = len(mpassword) > 16 and encryption_available()
client['auth'].pop('chap_mutual', None)
self.update_item('targets', target_iqn, target)
self.update_item('version', None, 8)
if self.config['version'] == 8:
for (target_iqn, target) in self.config['targets'].items():
for (_, portal) in target['portals'].items():
portal['portal_ip_addresses'] = [portal['portal_ip_address']]
portal.pop('portal_ip_address')
self.update_item('targets', target_iqn, target)
self.update_item('version', None, 9)
if self.config['version'] == 9 or update_hostname:
gateways_upgraded = self.config.get('gateways_upgraded')
if not gateways_upgraded:
gateways_upgraded = []
self.add_item('gateways_upgraded', None, gateways_upgraded)
this_shortname = socket.gethostname().split('.')[0]
this_fqdn = socket.getfqdn()
if this_fqdn not in gateways_upgraded:
gateways_config = self.config['gateways']
gateway_config = gateways_config.get(this_shortname)
if gateway_config:
gateways_config.pop(this_shortname)
gateways_config[this_fqdn] = gateway_config
self.update_item('gateways', None, gateways_config)
for (target_iqn, target) in self.config['targets'].items():
portals_config = target['portals']
portal_config = portals_config.get(this_shortname)
if portal_config:
portals_config.pop(this_shortname)
portals_config[this_fqdn] = portal_config
self.update_item('targets', target_iqn, target)
for (disk_id, disk) in self.config['disks'].items():
if disk.get('allocating_host') == this_shortname:
disk['allocating_host'] = this_fqdn
if disk.get('owner') == this_shortname:
disk['owner'] = this_fqdn
self.update_item('disks', disk_id, disk)
gateways_upgraded.append(this_fqdn)
self.update_item('gateways_upgraded', None, gateways_upgraded)
if any((gateway_name not in gateways_upgraded for gateway_name in self.config['gateways'].keys())):
self.logger.debug('gateways upgraded to 10: {}'.format(gateways_upgraded))
else:
self.del_item('gateways_upgraded', None)
if self.config['version'] == 9:
self.update_item('version', None, 10)
if self.config['version'] == 10:
for (target_iqn, target) in self.config['targets'].items():
target['auth'] = {'username': '', 'password': '', 'password_encryption_enabled': False, 'mutual_username': '', 'mutual_password': '', 'mutual_password_encryption_enabled': False}
disks = {}
for (disk_index, disk) in enumerate(sorted(target['disks'])):
disks[disk] = {'lun_id': disk_index}
target['disks'] = disks
self.update_item('targets', target_iqn, target)
self.update_item('version', None, 11)
self.commit('retain')
self.changed = False
|
ceph-iscsi
|
positive
|
def make_wires_adjacent(scan, diagram, inputs):
if not inputs:
return (scan, diagram, len(scan))
offset = scan.index(inputs[0])
for (i, _) in enumerate(inputs[1:]):
(source, target) = (scan.index(inputs[i + 1]), offset + i + 1)
<DeepExtract>
if target < source:
swaps = Id(target) @ Diagram.swap(source - target, 1) @ Id(len(scan) - source - 1)
scan = scan[:target] + (scan[source],) + scan[target:source] + scan[source + 1:]
elif target > source:
swaps = Id(source) @ Diagram.swap(1, target - source) @ Id(len(scan) - target - 1)
scan = scan[:source] + scan[source + 1:target] + (scan[source],) + scan[target:]
else:
swaps = Id(len(scan))
(scan, swaps) = (scan, swaps)
</DeepExtract>
diagram = diagram >> swaps
return (scan, diagram, offset)
|
def make_wires_adjacent(scan, diagram, inputs):
if not inputs:
return (scan, diagram, len(scan))
offset = scan.index(inputs[0])
for (i, _) in enumerate(inputs[1:]):
(source, target) = (scan.index(inputs[i + 1]), offset + i + 1)
if target < source:
swaps = Id(target) @ Diagram.swap(source - target, 1) @ Id(len(scan) - source - 1)
scan = scan[:target] + (scan[source],) + scan[target:source] + scan[source + 1:]
elif target > source:
swaps = Id(source) @ Diagram.swap(1, target - source) @ Id(len(scan) - target - 1)
scan = scan[:source] + scan[source + 1:target] + (scan[source],) + scan[target:]
else:
swaps = Id(len(scan))
(scan, swaps) = (scan, swaps)
diagram = diagram >> swaps
return (scan, diagram, offset)
|
discopy
|
positive
|
def __init__(self):
self.headers = default_headers()
self.auth = None
self.proxies = {}
self.hooks = default_hooks()
self.params = {}
self.stream = False
self.verify = True
self.cert = None
self.max_redirects = DEFAULT_REDIRECT_LIMIT
self.trust_env = True
self.cookies = cookiejar_from_dict({})
self.adapters = OrderedDict()
<DeepExtract>
self.adapters['https://'] = HTTPAdapter()
keys_to_move = [k for k in self.adapters if len(k) < len('https://')]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
</DeepExtract>
<DeepExtract>
self.adapters['http://'] = HTTPAdapter()
keys_to_move = [k for k in self.adapters if len(k) < len('http://')]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
</DeepExtract>
self.redirect_cache = {}
|
def __init__(self):
self.headers = default_headers()
self.auth = None
self.proxies = {}
self.hooks = default_hooks()
self.params = {}
self.stream = False
self.verify = True
self.cert = None
self.max_redirects = DEFAULT_REDIRECT_LIMIT
self.trust_env = True
self.cookies = cookiejar_from_dict({})
self.adapters = OrderedDict()
self.adapters['https://'] = HTTPAdapter()
keys_to_move = [k for k in self.adapters if len(k) < len('https://')]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
self.adapters['http://'] = HTTPAdapter()
keys_to_move = [k for k in self.adapters if len(k) < len('http://')]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
self.redirect_cache = {}
|
crunchy-xml-decoder
|
positive
|
def get_region(team_name):
nonlocal team_regions
if team_regions is False:
return ''
if not team_regions:
page = REQ.get(self.info['use_icpc.kimden.online'])
matches = re.finditer('<label[^>]*for="(?P<selector>[^"]*)"[^"]*onclick="setRegion[^"]*"[^>]*>(?P<name>[^>]*)</', page)
regions = {}
for match in matches:
selector = match.group('selector').replace('selector', '').replace('--', '-')
regions[selector] = match.group('name')
pprint(regions)
matches = re.finditer('\n <tr[^>]*class="(?P<class>[^"]*)"[^>]*>\\s*<td[^>]*>[^<]*</td>\\s*<td[^>]*title="(?P<name>[^"]*)">[^<]*</td>\n ', page, re.VERBOSE)
for match in matches:
classes = match.group('class').split()
name = match.group('name')
name = html.unescape(name)
<DeepExtract>
name = re.sub('^\\*\\s*', '', name)
name = re.sub(':', '', name)
name = re.sub('\\s+', '', name)
name = name
</DeepExtract>
for c in classes:
if c in regions:
team_regions[name] = regions[c]
break
if not team_regions:
team_regions = False
<DeepExtract>
team_name = re.sub('^\\*\\s*', '', team_name)
team_name = re.sub(':', '', team_name)
team_name = re.sub('\\s+', '', team_name)
team_name = team_name
</DeepExtract>
return team_regions.get(team_name, '')
|
def get_region(team_name):
nonlocal team_regions
if team_regions is False:
return ''
if not team_regions:
page = REQ.get(self.info['use_icpc.kimden.online'])
matches = re.finditer('<label[^>]*for="(?P<selector>[^"]*)"[^"]*onclick="setRegion[^"]*"[^>]*>(?P<name>[^>]*)</', page)
regions = {}
for match in matches:
selector = match.group('selector').replace('selector', '').replace('--', '-')
regions[selector] = match.group('name')
pprint(regions)
matches = re.finditer('\n <tr[^>]*class="(?P<class>[^"]*)"[^>]*>\\s*<td[^>]*>[^<]*</td>\\s*<td[^>]*title="(?P<name>[^"]*)">[^<]*</td>\n ', page, re.VERBOSE)
for match in matches:
classes = match.group('class').split()
name = match.group('name')
name = html.unescape(name)
name = re.sub('^\\*\\s*', '', name)
name = re.sub(':', '', name)
name = re.sub('\\s+', '', name)
name = name
for c in classes:
if c in regions:
team_regions[name] = regions[c]
break
if not team_regions:
team_regions = False
team_name = re.sub('^\\*\\s*', '', team_name)
team_name = re.sub(':', '', team_name)
team_name = re.sub('\\s+', '', team_name)
team_name = team_name
return team_regions.get(team_name, '')
|
clist
|
positive
|
def test_menu_prologue_ascii_no_text_with_both_borders(self):
<DeepExtract>
msg = '{title:{fill}^{width}}'.format(title='simulate screen edges', fill='-', width=width - 2)
print('{edge}{msg}{edge}'.format(edge='|', msg=msg))
</DeepExtract>
pro = MenuTextSection(MenuStyle(), show_top_border=True, show_bottom_border=True)
for line in pro.generate():
print(line)
|
def test_menu_prologue_ascii_no_text_with_both_borders(self):
msg = '{title:{fill}^{width}}'.format(title='simulate screen edges', fill='-', width=width - 2)
print('{edge}{msg}{edge}'.format(edge='|', msg=msg))
pro = MenuTextSection(MenuStyle(), show_top_border=True, show_bottom_border=True)
for line in pro.generate():
print(line)
|
console-menu
|
positive
|
def test_date_with_spaces_is_not_parsed(self):
datestring = '2013 25 12'
<DeepExtract>
self.parser = _no_spaces_parser
</DeepExtract>
<DeepExtract>
self.settings = settings
</DeepExtract>
<DeepExtract>
try:
self.result = self.parser.parse(datestring, self.settings)
except Exception as error:
self.error = error
</DeepExtract>
self.then_error_was_raised(ValueError, ['Unable to parse date from: %s' % datestring])
|
def test_date_with_spaces_is_not_parsed(self):
datestring = '2013 25 12'
self.parser = _no_spaces_parser
self.settings = settings
try:
self.result = self.parser.parse(datestring, self.settings)
except Exception as error:
self.error = error
self.then_error_was_raised(ValueError, ['Unable to parse date from: %s' % datestring])
|
dateparser
|
positive
|
def addTokenizedMessages(self):
"""Creates a parsed version of the message table
Returns
-------
tableName : str
Name of tokenized message table: corptable_tok.
"""
tableName = '%s_tok' % self.corptable
tokenizer = Tokenizer(use_unicode=self.use_unicode)
<DeepExtract>
(columnNames, messageIndex, messageIdIndex) = self._createTable(tableName, modify='LONGTEXT')
cfRows = self._findAllGroups()
groupsWritten = 0
for groups in dlac.chunks(cfRows, self.groupsAtTime):
rows = self._getMsgsForGroups(groups, columnNames, messageIndex)
messages = [r[messageIndex] for r in rows]
if messages:
parses = [json.dumps(tokenizer.tokenize(m)) for m in messages]
self._writeMsgsForGroups(rows, parses, messageIndex, tableName, columnNames)
groupsWritten += self.groupsAtTime
if groupsWritten % 100 == 0:
dlac.warn(" %.1fk %ss' messages tagged and written" % (groupsWritten / float(1000), self.correl_field))
else:
dlac.warn(' Warning: No messages for:' + str(groups))
mm.enableTableKeys(self.corpdb, self.dbCursor, tableName, charset=self.encoding, use_unicode=self.use_unicode, mysql_config_file=self.mysql_config_file)
return tableName
</DeepExtract>
return tableName
|
def addTokenizedMessages(self):
"""Creates a parsed version of the message table
Returns
-------
tableName : str
Name of tokenized message table: corptable_tok.
"""
tableName = '%s_tok' % self.corptable
tokenizer = Tokenizer(use_unicode=self.use_unicode)
(columnNames, messageIndex, messageIdIndex) = self._createTable(tableName, modify='LONGTEXT')
cfRows = self._findAllGroups()
groupsWritten = 0
for groups in dlac.chunks(cfRows, self.groupsAtTime):
rows = self._getMsgsForGroups(groups, columnNames, messageIndex)
messages = [r[messageIndex] for r in rows]
if messages:
parses = [json.dumps(tokenizer.tokenize(m)) for m in messages]
self._writeMsgsForGroups(rows, parses, messageIndex, tableName, columnNames)
groupsWritten += self.groupsAtTime
if groupsWritten % 100 == 0:
dlac.warn(" %.1fk %ss' messages tagged and written" % (groupsWritten / float(1000), self.correl_field))
else:
dlac.warn(' Warning: No messages for:' + str(groups))
mm.enableTableKeys(self.corpdb, self.dbCursor, tableName, charset=self.encoding, use_unicode=self.use_unicode, mysql_config_file=self.mysql_config_file)
return tableName
return tableName
|
dlatk
|
positive
|
def test_doctest(self):
"""Test unit_tested index with package that uses doctest."""
def setup(project_dir):
main_filename = os.path.join(project_dir, 'main.py')
<DeepExtract>
fd = file(main_filename, 'w')
fd.write(main_contents)
fd.close()
</DeepExtract>
def asserts(cheesecake):
assert cheesecake.functions == ['main.function_without_docstring']
assert cheesecake.classes == ['main.SomeClass']
<DeepExtract>
package_name = 'index_test'
project_dir = os.path.join(self.sandbox_dir, package_name)
os.mkdir(project_dir)
if setup:
setup(project_dir)
logger.setconsumer('console', logger.STDOUT)
console_log = logger.MultipleProducer('cheesecake console')
pkg_name = package_name
class CheesecakeMockup(Cheesecake):
def __init__(self):
pass
sandbox = self.sandbox_dir
package_name = pkg_name
log = console_log
cheesecake = CheesecakeMockup()
cheesecake.walk_pkg()
if asserts:
asserts(cheesecake)
index = IndexUnitTested()
index.compute_with(cheesecake)
print('Index: %d/%d -- %s' % (index.value, index.max_value, index.details))
assert index.value == index.max_value
</DeepExtract>
|
def test_doctest(self):
"""Test unit_tested index with package that uses doctest."""
def setup(project_dir):
main_filename = os.path.join(project_dir, 'main.py')
fd = file(main_filename, 'w')
fd.write(main_contents)
fd.close()
def asserts(cheesecake):
assert cheesecake.functions == ['main.function_without_docstring']
assert cheesecake.classes == ['main.SomeClass']
package_name = 'index_test'
project_dir = os.path.join(self.sandbox_dir, package_name)
os.mkdir(project_dir)
if setup:
setup(project_dir)
logger.setconsumer('console', logger.STDOUT)
console_log = logger.MultipleProducer('cheesecake console')
pkg_name = package_name
class CheesecakeMockup(Cheesecake):
def __init__(self):
pass
sandbox = self.sandbox_dir
package_name = pkg_name
log = console_log
cheesecake = CheesecakeMockup()
cheesecake.walk_pkg()
if asserts:
asserts(cheesecake)
index = IndexUnitTested()
index.compute_with(cheesecake)
print('Index: %d/%d -- %s' % (index.value, index.max_value, index.details))
assert index.value == index.max_value
</DeepExtract>
|
cheesecake
|
positive
|
def log_pdf(self, mask: th.Tensor, obs: ComplexTensor) -> th.Tensor:
"""
Compute log-pdf of the cacgmm distributions
Args:
mask (Tensor): N x F x T
obs (ComplexTensor): N x F x C x T
Return:
log_pdf (Tensor)
"""
(_, _, C, _) = obs.shape
<DeepExtract>
(_, _, C, _) = obs.shape
mask = mask.unsqueeze(-2)
nominator = obs * mask @ obs.conj_transpose(-1, -2)
denominator = th.clamp(mask.sum(-1, keepdims=True), min=self.eps)
Bk = C * nominator / denominator
Bk = (Bk + Bk.conj_transpose(-1, -2)) / 2
Bk = Bk
</DeepExtract>
I = th.eye(C, device=Bk.device, dtype=Bk.dtype)
Bk = Bk + I * self.eps
<DeepExtract>
m = th.cat([Bk.real, -Bk.imag], -1)
n = th.cat([Bk.imag, Bk.real], -1)
Rk = th.cat([m, n], -2)
(ev, _) = th.linalg.eigh(Rk, UPLO='U')
det = th.cumprod(ev[..., ::2], dim=-1)
det = th.clamp(det[..., -1], min=self.eps)
Dk = det
</DeepExtract>
Bk_inv = Bk.inverse()
K = (obs.conj() * (Bk_inv @ obs)).sum(-2)
K = th.clamp(K.real, min=self.eps)
log_pdf = -C * th.log(K) - th.log(Dk[..., None])
return log_pdf
|
def log_pdf(self, mask: th.Tensor, obs: ComplexTensor) -> th.Tensor:
"""
Compute log-pdf of the cacgmm distributions
Args:
mask (Tensor): N x F x T
obs (ComplexTensor): N x F x C x T
Return:
log_pdf (Tensor)
"""
(_, _, C, _) = obs.shape
(_, _, C, _) = obs.shape
mask = mask.unsqueeze(-2)
nominator = obs * mask @ obs.conj_transpose(-1, -2)
denominator = th.clamp(mask.sum(-1, keepdims=True), min=self.eps)
Bk = C * nominator / denominator
Bk = (Bk + Bk.conj_transpose(-1, -2)) / 2
Bk = Bk
I = th.eye(C, device=Bk.device, dtype=Bk.dtype)
Bk = Bk + I * self.eps
m = th.cat([Bk.real, -Bk.imag], -1)
n = th.cat([Bk.imag, Bk.real], -1)
Rk = th.cat([m, n], -2)
(ev, _) = th.linalg.eigh(Rk, UPLO='U')
det = th.cumprod(ev[..., ::2], dim=-1)
det = th.clamp(det[..., -1], min=self.eps)
Dk = det
Bk_inv = Bk.inverse()
K = (obs.conj() * (Bk_inv @ obs)).sum(-2)
K = th.clamp(K.real, min=self.eps)
log_pdf = -C * th.log(K) - th.log(Dk[..., None])
return log_pdf
|
aps
|
positive
|
def expand_nodelist(system, node_str):
hostname = socket.gethostname()
if 'theta' in hostname:
<DeepExtract>
node_ids = []
ranges = node_str.split(',')
lo = None
hi = None
for node_range in ranges:
(lo, *hi) = node_range.split('-')
lo = int(lo)
if hi:
hi = int(hi[0])
node_ids.extend(list(range(lo, hi + 1)))
else:
node_ids.append(lo)
node_list = [f'nid{node_id:05d}' for node_id in node_ids]
</DeepExtract>
else:
node_list = [node_str]
return node_list
|
def expand_nodelist(system, node_str):
hostname = socket.gethostname()
if 'theta' in hostname:
node_ids = []
ranges = node_str.split(',')
lo = None
hi = None
for node_range in ranges:
(lo, *hi) = node_range.split('-')
lo = int(lo)
if hi:
hi = int(hi[0])
node_ids.extend(list(range(lo, hi + 1)))
else:
node_ids.append(lo)
node_list = [f'nid{node_id:05d}' for node_id in node_ids]
else:
node_list = [node_str]
return node_list
|
deephyper
|
positive
|
def test_first_derivative_gradient_richardson(example_function_gradient_fixtures):
f = example_function_gradient_fixtures['func']
fprime = example_function_gradient_fixtures['func_prime']
<DeepExtract>
(x1, x2, x3) = (np.ones(3)[0], np.ones(3)[1], np.ones(3)[2])
grad = np.array([np.cos(x1), -np.sin(x2), x3 - x3])
true_fprime = grad
</DeepExtract>
scipy_fprime = approx_derivative(f, np.ones(3))
our_fprime = first_derivative(f, np.ones(3), n_steps=3, method='central', n_cores=1)
aaae(scipy_fprime, our_fprime['derivative'])
aaae(true_fprime, our_fprime['derivative'])
|
def test_first_derivative_gradient_richardson(example_function_gradient_fixtures):
f = example_function_gradient_fixtures['func']
fprime = example_function_gradient_fixtures['func_prime']
(x1, x2, x3) = (np.ones(3)[0], np.ones(3)[1], np.ones(3)[2])
grad = np.array([np.cos(x1), -np.sin(x2), x3 - x3])
true_fprime = grad
scipy_fprime = approx_derivative(f, np.ones(3))
our_fprime = first_derivative(f, np.ones(3), n_steps=3, method='central', n_cores=1)
aaae(scipy_fprime, our_fprime['derivative'])
aaae(true_fprime, our_fprime['derivative'])
|
estimagic
|
positive
|
def print_model_with_flops(model, units='GMac', precision=3, ost=sys.stdout):
total_flops = model.compute_average_flops_cost()
def accumulate_flops(self):
if is_supported_instance(self):
return self.__flops__ / model.__batch_counter__
else:
sum = 0
for m in self.children():
sum += m.accumulate_flops()
return sum
def flops_repr(self):
<DeepExtract>
if is_supported_instance(self):
accumulated_flops_cost = self.__flops__ / model.__batch_counter__
else:
sum = 0
for m in self.children():
sum += m.accumulate_flops()
accumulated_flops_cost = sum
</DeepExtract>
return ', '.join([flops_to_string(accumulated_flops_cost, units=units, precision=precision), '{:.3%} MACs'.format(accumulated_flops_cost / total_flops), self.original_extra_repr()])
def add_extra_repr(m):
m.accumulate_flops = accumulate_flops.__get__(m)
flops_extra_repr = flops_repr.__get__(m)
if m.extra_repr != flops_extra_repr:
m.original_extra_repr = m.extra_repr
m.extra_repr = flops_extra_repr
assert m.extra_repr != m.original_extra_repr
def del_extra_repr(m):
if hasattr(m, 'original_extra_repr'):
m.extra_repr = m.original_extra_repr
del m.original_extra_repr
if hasattr(m, 'accumulate_flops'):
del m.accumulate_flops
model.apply(add_extra_repr)
print(model, file=ost)
model.apply(del_extra_repr)
|
def print_model_with_flops(model, units='GMac', precision=3, ost=sys.stdout):
total_flops = model.compute_average_flops_cost()
def accumulate_flops(self):
if is_supported_instance(self):
return self.__flops__ / model.__batch_counter__
else:
sum = 0
for m in self.children():
sum += m.accumulate_flops()
return sum
def flops_repr(self):
if is_supported_instance(self):
accumulated_flops_cost = self.__flops__ / model.__batch_counter__
else:
sum = 0
for m in self.children():
sum += m.accumulate_flops()
accumulated_flops_cost = sum
return ', '.join([flops_to_string(accumulated_flops_cost, units=units, precision=precision), '{:.3%} MACs'.format(accumulated_flops_cost / total_flops), self.original_extra_repr()])
def add_extra_repr(m):
m.accumulate_flops = accumulate_flops.__get__(m)
flops_extra_repr = flops_repr.__get__(m)
if m.extra_repr != flops_extra_repr:
m.original_extra_repr = m.extra_repr
m.extra_repr = flops_extra_repr
assert m.extra_repr != m.original_extra_repr
def del_extra_repr(m):
if hasattr(m, 'original_extra_repr'):
m.extra_repr = m.original_extra_repr
del m.original_extra_repr
if hasattr(m, 'accumulate_flops'):
del m.accumulate_flops
model.apply(add_extra_repr)
print(model, file=ost)
model.apply(del_extra_repr)
|
DNL-Object-Detection
|
positive
|
def _old_sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
<DeepExtract>
(att_feats, att_masks) = self.clip_att(att_feats, att_masks)
fc_feats = self.fc_embed(fc_feats)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
p_att_feats = self.ctx2att(att_feats)
(p_fc_feats, p_att_feats, pp_att_feats, p_att_masks) = (fc_feats, att_feats, p_att_feats, att_masks)
</DeepExtract>
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size * sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size * sample_n, self.seq_length, self.vocab_size + 1)
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
<DeepExtract>
weight = self.logit.weight if hasattr(self.logit, 'weight') else self.logit[0].weight
state = (weight.new_zeros(self.num_layers, beam_size, self.rnn_size), weight.new_zeros(self.num_layers, beam_size, self.rnn_size))
</DeepExtract>
(tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks) = utils.repeat_tensors(beam_size, [p_fc_feats[k:k + 1], p_att_feats[k:k + 1], pp_att_feats[k:k + 1], p_att_masks[k:k + 1] if att_masks is not None else None])
for t in range(1):
if t == 0:
it = fc_feats.new_full([beam_size], self.bos_idx, dtype=torch.long)
<DeepExtract>
xt = self.embed(it)
if task == 'caption' or task == 'show':
if task == 'show':
(output, state) = self.core(xt, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, state, tmp_att_masks, state, trace_masks, show_gate_labels, task)
else:
(output, state) = self.core(xt, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, state, tmp_att_masks, state, trace_masks, task=task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
(logprobs, state) = (logprobs, state)
elif task == 'both':
(output, state, output_trace) = self.core(xt, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, state, tmp_att_masks, state, trace_masks, task=task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
(logprobs, state) = (logprobs, state, self.model.generator_trace(output_trace))
</DeepExtract>
self.done_beams[k] = self.old_beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, opt=opt)
if sample_n == beam_size:
for _n in range(sample_n):
seq[k * sample_n + _n, :] = self.done_beams[k][_n]['seq']
seqLogprobs[k * sample_n + _n, :] = self.done_beams[k][_n]['logps']
else:
seq[k, :] = self.done_beams[k][0]['seq']
seqLogprobs[k, :] = self.done_beams[k][0]['logps']
return (seq, seqLogprobs)
|
def _old_sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
(att_feats, att_masks) = self.clip_att(att_feats, att_masks)
fc_feats = self.fc_embed(fc_feats)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
p_att_feats = self.ctx2att(att_feats)
(p_fc_feats, p_att_feats, pp_att_feats, p_att_masks) = (fc_feats, att_feats, p_att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size * sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size * sample_n, self.seq_length, self.vocab_size + 1)
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
weight = self.logit.weight if hasattr(self.logit, 'weight') else self.logit[0].weight
state = (weight.new_zeros(self.num_layers, beam_size, self.rnn_size), weight.new_zeros(self.num_layers, beam_size, self.rnn_size))
(tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks) = utils.repeat_tensors(beam_size, [p_fc_feats[k:k + 1], p_att_feats[k:k + 1], pp_att_feats[k:k + 1], p_att_masks[k:k + 1] if att_masks is not None else None])
for t in range(1):
if t == 0:
it = fc_feats.new_full([beam_size], self.bos_idx, dtype=torch.long)
xt = self.embed(it)
if task == 'caption' or task == 'show':
if task == 'show':
(output, state) = self.core(xt, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, state, tmp_att_masks, state, trace_masks, show_gate_labels, task)
else:
(output, state) = self.core(xt, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, state, tmp_att_masks, state, trace_masks, task=task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
(logprobs, state) = (logprobs, state)
elif task == 'both':
(output, state, output_trace) = self.core(xt, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, state, tmp_att_masks, state, trace_masks, task=task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
(logprobs, state) = (logprobs, state, self.model.generator_trace(output_trace))
self.done_beams[k] = self.old_beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, opt=opt)
if sample_n == beam_size:
for _n in range(sample_n):
seq[k * sample_n + _n, :] = self.done_beams[k][_n]['seq']
seqLogprobs[k * sample_n + _n, :] = self.done_beams[k][_n]['logps']
else:
seq[k, :] = self.done_beams[k][0]['seq']
seqLogprobs[k, :] = self.done_beams[k][0]['logps']
return (seq, seqLogprobs)
|
connect-caption-and-trace
|
positive
|
def __eq__(self, other):
"""Compare two MapLocations for deep equality.
:type self: MapLocation
:type other: MapLocation
:rtype: bool
"""
assert type(other) is MapLocation, 'incorrect type of arg other: should be MapLocation, is {}'.format(type(other))
result = _lib.bc_MapLocation_eq(self._ptr, other._ptr)
<DeepExtract>
if _lib.bc_has_err():
_lasterror = _ffi.new('char**')
err = _lib.bc_get_last_err(_lasterror)
errtext = _ffi.string(_lasterror[0])
_lib.bc_free_string(_lasterror[0])
raise Exception(errtext)
</DeepExtract>
result = bool(result)
return result
|
def __eq__(self, other):
"""Compare two MapLocations for deep equality.
:type self: MapLocation
:type other: MapLocation
:rtype: bool
"""
assert type(other) is MapLocation, 'incorrect type of arg other: should be MapLocation, is {}'.format(type(other))
result = _lib.bc_MapLocation_eq(self._ptr, other._ptr)
if _lib.bc_has_err():
_lasterror = _ffi.new('char**')
err = _lib.bc_get_last_err(_lasterror)
errtext = _ffi.string(_lasterror[0])
_lib.bc_free_string(_lasterror[0])
raise Exception(errtext)
result = bool(result)
return result
|
bc18-scaffold
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.