before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def compute_context_vector(self, prev_state, inputs, precomputed_values=None, mask=None):
"""
Compute the context vector with soft attention.
"""
precomputed_values = precomputed_values if precomputed_values else self.precompute(inputs)
<DeepExtract>
WaSp = T.dot(prev_state, self.Wa)
UaH = precomputed_values
if UaH.ndim == 2:
preact = WaSp[:, None, :] + UaH[None, :, :]
else:
preact = WaSp[:, None, :] + UaH
act = T.activate(preact, 'tanh')
align_scores = T.dot(act, self.Va)
if mask:
mask = (1 - mask) * -99.0
if align_scores.ndim == 3:
align_scores += mask[None, :]
else:
align_scores += mask
align_weights = T.nnet.softmax(align_scores)
align_weights = align_weights
</DeepExtract>
context_vector = T.sum(align_weights[:, :, None] * inputs, axis=1)
return context_vector
|
def compute_context_vector(self, prev_state, inputs, precomputed_values=None, mask=None):
"""
Compute the context vector with soft attention.
"""
precomputed_values = precomputed_values if precomputed_values else self.precompute(inputs)
WaSp = T.dot(prev_state, self.Wa)
UaH = precomputed_values
if UaH.ndim == 2:
preact = WaSp[:, None, :] + UaH[None, :, :]
else:
preact = WaSp[:, None, :] + UaH
act = T.activate(preact, 'tanh')
align_scores = T.dot(act, self.Va)
if mask:
mask = (1 - mask) * -99.0
if align_scores.ndim == 3:
align_scores += mask[None, :]
else:
align_scores += mask
align_weights = T.nnet.softmax(align_scores)
align_weights = align_weights
context_vector = T.sum(align_weights[:, :, None] * inputs, axis=1)
return context_vector
|
deepy
|
positive
|
def tabfile2doefile(tabfile, doefile):
"""tabfile2doefile"""
<DeepExtract>
data = mylib1.readfileasmac(tabfile)
alist = data.split('\r')
blist = alist[1].split('\t')
clist = []
for num in range(0, len(alist)):
ilist = alist[num].split('\t')
clist = clist + [ilist]
cclist = clist[:-1]
alist = cclist
</DeepExtract>
<DeepExtract>
theequal = ''
astr = ''
lenj = len(alist)
leni = len(alist[0])
for i in range(0, leni - 1):
for j in range(0, lenj):
if j == 0:
astr = astr + alist[j][i + 1] + theequal + alist[j][0] + RET
else:
astr = astr + alist[j][0] + theequal + alist[j][i + 1] + RET
astr = astr + RET
astr = astr
</DeepExtract>
mylib1.write_str2file(doefile, astr)
|
def tabfile2doefile(tabfile, doefile):
"""tabfile2doefile"""
data = mylib1.readfileasmac(tabfile)
alist = data.split('\r')
blist = alist[1].split('\t')
clist = []
for num in range(0, len(alist)):
ilist = alist[num].split('\t')
clist = clist + [ilist]
cclist = clist[:-1]
alist = cclist
theequal = ''
astr = ''
lenj = len(alist)
leni = len(alist[0])
for i in range(0, leni - 1):
for j in range(0, lenj):
if j == 0:
astr = astr + alist[j][i + 1] + theequal + alist[j][0] + RET
else:
astr = astr + alist[j][0] + theequal + alist[j][i + 1] + RET
astr = astr + RET
astr = astr
mylib1.write_str2file(doefile, astr)
|
eppy
|
positive
|
def replaceWithChildren(self):
myParent = self.parent
myIndex = self.parent.index(self)
<DeepExtract>
if self.parent:
try:
del self.parent.contents[self.parent.index(self)]
except ValueError:
pass
lastChild = self._lastRecursiveChild()
nextElement = lastChild.next
if self.previous:
self.previous.next = nextElement
if nextElement:
nextElement.previous = self.previous
self.previous = None
lastChild.next = None
self.parent = None
if self.previousSibling:
self.previousSibling.nextSibling = self.nextSibling
if self.nextSibling:
self.nextSibling.previousSibling = self.previousSibling
self.previousSibling = self.nextSibling = None
return self
</DeepExtract>
reversedChildren = list(self.contents)
reversedChildren.reverse()
for child in reversedChildren:
myParent.insert(myIndex, child)
|
def replaceWithChildren(self):
myParent = self.parent
myIndex = self.parent.index(self)
if self.parent:
try:
del self.parent.contents[self.parent.index(self)]
except ValueError:
pass
lastChild = self._lastRecursiveChild()
nextElement = lastChild.next
if self.previous:
self.previous.next = nextElement
if nextElement:
nextElement.previous = self.previous
self.previous = None
lastChild.next = None
self.parent = None
if self.previousSibling:
self.previousSibling.nextSibling = self.nextSibling
if self.nextSibling:
self.nextSibling.previousSibling = self.previousSibling
self.previousSibling = self.nextSibling = None
return self
reversedChildren = list(self.contents)
reversedChildren.reverse()
for child in reversedChildren:
myParent.insert(myIndex, child)
|
commix
|
positive
|
def bpcExport(self):
name = unicode(self.parent.current_result.station.name.value).title().replace("'S", "'s")
system = unicode(self.parent.result_table.item(0, 9).text())
time = strftime('%Y-%m-%dT%H.%M.%S')
dir = self.parent.settings['export_dir'] + os.sep + system + '.' + name + '.' + time + '.bpc'
if self.parent.settings['native_dialog']:
file = QFileDialog.getSaveFileName(None, 'Save', dir, "Slopey's Best Price Calculator CSV-File (*.bpc)", "Slopey's Best Price Calculator CSV-File (*.bpc)")
else:
file = QFileDialog.getSaveFileName(None, 'Save', dir, "Slopey's Best Price Calculator CSV-File (*.bpc)", "Slopey's Best Price Calculator CSV-File (*.bpc)", QFileDialog.DontUseNativeDialog)
if not file:
return
<DeepExtract>
language = unicode(self.parent.settings['ocr_language'])
file = codecs.open(self.parent.settings.app_path + '' + os.sep + 'commodities.json', 'r')
self.comm_list = json.loads(file.read())
if language == 'big' or language == 'eng':
result = self.tableToList(False)
else:
self.comm_list = {v[language]: k for (k, v) in self.comm_list.iteritems()}
levels = {u'deu': {u'NIEDRIG': u'LOW', u'MITTEL': u'MED', u'HOCH': u'HIGH', u'': u''}, u'fra': {u'FAIBLE': u'LOW', u'MOYEN': u'MED', u'ÉLEVÉ': u'HIGH', u'': u''}}
translated = [self.tableToList(False)[0]]
for line in self.tableToList(False)[1:]:
if line[2].upper() in self.comm_list:
commodity = self.comm_list[line[2].upper()].title()
else:
commodity = line[2]
translated.append([line[0], line[1], commodity, line[3], line[4], line[5], levels[language][line[6].upper()].title(), line[7], levels[language][line[8].upper()].title(), line[9]])
result = translated
</DeepExtract>
all_rows = self.parent.result_table.rowCount()
'\n for row in xrange(0, all_rows):\n if int(self.parent.result_table.item(row,10).text()) < 1185:\n QMessageBox.warning(self.parent,"Screenshots too small", "The market table in some of your screenshots is under 1190 pixel wide. This is too little for reliable OCR result. There were too many faulty contributions in the past caused by such screenshots. Export aborted. ")\n return\n '
id = self.parent.settings['userID']
bpc_format = [['userID', 'System', 'Station', 'Commodity', 'Sell', 'Buy', 'Demand', '', 'Supply', '', 'Date']]
allowedtime = datetime.utcnow() - timedelta(hours=2)
for row in result[1:]:
if len(row[0]) == 0:
QMessageBox.warning(self.parent, 'No System Name', 'There are rows missing system name! Could not export to BPC format.')
return
timescreenshot = datetime.strptime(row[9], '%Y-%m-%dT%H:%M:%S+00:00')
if allowedtime > timescreenshot:
QMessageBox.warning(self.parent, 'Too old for BPC', 'You have been using at least one screenshot which is too old. BPC format only allows screenshots younger than 2 hours. Export aborted.')
return
bpc_format.append([unicode(id)] + row)
<DeepExtract>
for row in bpc_format:
if len(row[0]) == 0:
QMessageBox.warning(self.parent, 'No System Name', 'There are rows missing system name! \nThe exported CSV file might be incompatible with some tools.')
break
towrite = ''
for row in bpc_format:
for cell in row:
towrite += unicode(cell) + ';'
towrite += '\n'
file = unicode(file).encode(sys.getfilesystemencoding())
csv_file = codecs.open(file, 'w', sys.getfilesystemencoding())
csv_file.write(towrite)
csv_file.close()
</DeepExtract>
self.parent.statusbar.showMessage('To support the community consider exporting your data to EDDN', 4000)
|
def bpcExport(self):
name = unicode(self.parent.current_result.station.name.value).title().replace("'S", "'s")
system = unicode(self.parent.result_table.item(0, 9).text())
time = strftime('%Y-%m-%dT%H.%M.%S')
dir = self.parent.settings['export_dir'] + os.sep + system + '.' + name + '.' + time + '.bpc'
if self.parent.settings['native_dialog']:
file = QFileDialog.getSaveFileName(None, 'Save', dir, "Slopey's Best Price Calculator CSV-File (*.bpc)", "Slopey's Best Price Calculator CSV-File (*.bpc)")
else:
file = QFileDialog.getSaveFileName(None, 'Save', dir, "Slopey's Best Price Calculator CSV-File (*.bpc)", "Slopey's Best Price Calculator CSV-File (*.bpc)", QFileDialog.DontUseNativeDialog)
if not file:
return
language = unicode(self.parent.settings['ocr_language'])
file = codecs.open(self.parent.settings.app_path + '' + os.sep + 'commodities.json', 'r')
self.comm_list = json.loads(file.read())
if language == 'big' or language == 'eng':
result = self.tableToList(False)
else:
self.comm_list = {v[language]: k for (k, v) in self.comm_list.iteritems()}
levels = {u'deu': {u'NIEDRIG': u'LOW', u'MITTEL': u'MED', u'HOCH': u'HIGH', u'': u''}, u'fra': {u'FAIBLE': u'LOW', u'MOYEN': u'MED', u'ÉLEVÉ': u'HIGH', u'': u''}}
translated = [self.tableToList(False)[0]]
for line in self.tableToList(False)[1:]:
if line[2].upper() in self.comm_list:
commodity = self.comm_list[line[2].upper()].title()
else:
commodity = line[2]
translated.append([line[0], line[1], commodity, line[3], line[4], line[5], levels[language][line[6].upper()].title(), line[7], levels[language][line[8].upper()].title(), line[9]])
result = translated
all_rows = self.parent.result_table.rowCount()
'\n for row in xrange(0, all_rows):\n if int(self.parent.result_table.item(row,10).text()) < 1185:\n QMessageBox.warning(self.parent,"Screenshots too small", "The market table in some of your screenshots is under 1190 pixel wide. This is too little for reliable OCR result. There were too many faulty contributions in the past caused by such screenshots. Export aborted. ")\n return\n '
id = self.parent.settings['userID']
bpc_format = [['userID', 'System', 'Station', 'Commodity', 'Sell', 'Buy', 'Demand', '', 'Supply', '', 'Date']]
allowedtime = datetime.utcnow() - timedelta(hours=2)
for row in result[1:]:
if len(row[0]) == 0:
QMessageBox.warning(self.parent, 'No System Name', 'There are rows missing system name! Could not export to BPC format.')
return
timescreenshot = datetime.strptime(row[9], '%Y-%m-%dT%H:%M:%S+00:00')
if allowedtime > timescreenshot:
QMessageBox.warning(self.parent, 'Too old for BPC', 'You have been using at least one screenshot which is too old. BPC format only allows screenshots younger than 2 hours. Export aborted.')
return
bpc_format.append([unicode(id)] + row)
for row in bpc_format:
if len(row[0]) == 0:
QMessageBox.warning(self.parent, 'No System Name', 'There are rows missing system name! \nThe exported CSV file might be incompatible with some tools.')
break
towrite = ''
for row in bpc_format:
for cell in row:
towrite += unicode(cell) + ';'
towrite += '\n'
file = unicode(file).encode(sys.getfilesystemencoding())
csv_file = codecs.open(file, 'w', sys.getfilesystemencoding())
csv_file.write(towrite)
csv_file.close()
self.parent.statusbar.showMessage('To support the community consider exporting your data to EDDN', 4000)
|
EliteOCR
|
positive
|
def test_delete_not_found_token(self):
owner = get_user_model().objects.create_user(username='owner', password='owner', email='owner@owner.com')
<DeepExtract>
app = App.objects.create(id='news', owner=owner)
app.co_maintainers.set(co_maintainers)
app.save()
return AppRelease.objects.create(version=version, app=app)
</DeepExtract>
self._login_token()
response = self.api_client.delete(self.delete_url_nightly)
self.assertEqual(404, response.status_code)
|
def test_delete_not_found_token(self):
owner = get_user_model().objects.create_user(username='owner', password='owner', email='owner@owner.com')
app = App.objects.create(id='news', owner=owner)
app.co_maintainers.set(co_maintainers)
app.save()
return AppRelease.objects.create(version=version, app=app)
self._login_token()
response = self.api_client.delete(self.delete_url_nightly)
self.assertEqual(404, response.status_code)
|
appstore
|
positive
|
@retrying.retry(stop_max_attempt_number=3, wait_fixed=1000, retry_on_result=lambda result: not result)
def create_task_text_file(marathon_task_name: str, filename: str, lines: List[str]) -> bool:
output_cmd = 'bash -c "cat > {output_file} << EOL\n{content}\nEOL\nwc -l {output_file}"'.format(output_file=filename, content='\n'.join(lines))
<DeepExtract>
(rc, stdout, stderr) = _task_exec(marathon_task_name, output_cmd, print_output=print_output)
</DeepExtract>
if rc:
log.warning('Error creating file %s. rc=%s stdout=%s stderr=%s', filename, rc, stdout, stderr)
return False
written_lines = 0
try:
written_lines = int(stdout.split(' ')[0])
except Exception as e:
log.warning(e)
expected_lines = len('\n'.join(lines).split('\n'))
if written_lines != expected_lines:
log.warning('Number of written lines do not match. stdout=%s expected=%s written=%s', stdout, expected_lines, written_lines)
return False
return True
|
@retrying.retry(stop_max_attempt_number=3, wait_fixed=1000, retry_on_result=lambda result: not result)
def create_task_text_file(marathon_task_name: str, filename: str, lines: List[str]) -> bool:
output_cmd = 'bash -c "cat > {output_file} << EOL\n{content}\nEOL\nwc -l {output_file}"'.format(output_file=filename, content='\n'.join(lines))
(rc, stdout, stderr) = _task_exec(marathon_task_name, output_cmd, print_output=print_output)
if rc:
log.warning('Error creating file %s. rc=%s stdout=%s stderr=%s', filename, rc, stdout, stderr)
return False
written_lines = 0
try:
written_lines = int(stdout.split(' ')[0])
except Exception as e:
log.warning(e)
expected_lines = len('\n'.join(lines).split('\n'))
if written_lines != expected_lines:
log.warning('Number of written lines do not match. stdout=%s expected=%s written=%s', stdout, expected_lines, written_lines)
return False
return True
|
dcos-kafka-service
|
positive
|
def test_key_att(self):
<DeepExtract>
intel_machine.re_init()
intel_machine.base = 'dec'
intel_machine.flavor = 'att'
test_code = self.read_test_code(TEST_DIR_NAME + 'key_test.asm')
assemble(test_code, intel_machine)
</DeepExtract>
self.assertEqual(intel_machine.registers['EAX'], 71)
self.assertEqual(intel_machine.registers['EBX'], 71)
self.assertEqual(intel_machine.registers['ECX'], 1)
self.assertEqual(intel_machine.registers['ESP'], 512)
self.assertEqual(intel_machine.memory['9'], 83)
|
def test_key_att(self):
intel_machine.re_init()
intel_machine.base = 'dec'
intel_machine.flavor = 'att'
test_code = self.read_test_code(TEST_DIR_NAME + 'key_test.asm')
assemble(test_code, intel_machine)
self.assertEqual(intel_machine.registers['EAX'], 71)
self.assertEqual(intel_machine.registers['EBX'], 71)
self.assertEqual(intel_machine.registers['ECX'], 1)
self.assertEqual(intel_machine.registers['ESP'], 512)
self.assertEqual(intel_machine.memory['9'], 83)
|
Emu86
|
positive
|
def test_backoff_func(self):
"""Retry back-off defined via callable."""
def backoff(retry):
return 2 ** retry
with mock.patch('time.sleep') as sleep:
@retried(retries=3, backoff=backoff)
def f():
raise ValueError
with self.assertRaises(ValueError):
<DeepExtract>
return next(counter)
</DeepExtract>
calls = [mock.call(backoff(1)), mock.call(backoff(2)), mock.call(backoff(3))]
sleep.assert_has_calls(calls)
|
def test_backoff_func(self):
"""Retry back-off defined via callable."""
def backoff(retry):
return 2 ** retry
with mock.patch('time.sleep') as sleep:
@retried(retries=3, backoff=backoff)
def f():
raise ValueError
with self.assertRaises(ValueError):
return next(counter)
calls = [mock.call(backoff(1)), mock.call(backoff(2)), mock.call(backoff(3))]
sleep.assert_has_calls(calls)
|
dwave-cloud-client
|
positive
|
def ga_shape_target(approx_list, inside_flag_list, square_list, gt_bboxes_list, img_metas, approxs_per_octave, cfg, gt_bboxes_ignore_list=None, sampling=True, unmap_outputs=True):
"""Compute guided anchoring targets.
Args:
approx_list (list[list]): Multi level approxs of each image.
inside_flag_list (list[list]): Multi level inside flags of each image.
square_list (list[list]): Multi level squares of each image.
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
img_metas (list[dict]): Meta info of each image.
approxs_per_octave (int): number of approxs per octave
cfg (dict): RPN train configs.
gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes.
sampling (bool): sampling or not.
unmap_outputs (bool): unmap outputs or not.
Returns:
tuple
"""
num_imgs = len(img_metas)
assert len(approx_list) == len(inside_flag_list) == len(square_list) == num_imgs
num_level_squares = [squares.size(0) for squares in square_list[0]]
inside_flag_flat_list = []
approx_flat_list = []
square_flat_list = []
for i in range(num_imgs):
assert len(square_list[i]) == len(inside_flag_list[i])
inside_flag_flat_list.append(torch.cat(inside_flag_list[i]))
approx_flat_list.append(torch.cat(approx_list[i]))
square_flat_list.append(torch.cat(square_list[i]))
if gt_bboxes_ignore_list is None:
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
(all_bbox_anchors, all_bbox_gts, all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply(ga_shape_target_single, approx_flat_list, inside_flag_flat_list, square_flat_list, gt_bboxes_list, gt_bboxes_ignore_list, img_metas, approxs_per_octave=approxs_per_octave, cfg=cfg, sampling=sampling, unmap_outputs=unmap_outputs)
if any([bbox_anchors is None for bbox_anchors in all_bbox_anchors]):
return None
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
<DeepExtract>
all_bbox_anchors = torch.stack(all_bbox_anchors, 0)
level_targets = []
start = 0
for n in num_level_squares:
end = start + n
level_targets.append(all_bbox_anchors[:, start:end].squeeze(0))
start = end
bbox_anchors_list = level_targets
</DeepExtract>
<DeepExtract>
all_bbox_gts = torch.stack(all_bbox_gts, 0)
level_targets = []
start = 0
for n in num_level_squares:
end = start + n
level_targets.append(all_bbox_gts[:, start:end].squeeze(0))
start = end
bbox_gts_list = level_targets
</DeepExtract>
<DeepExtract>
all_bbox_weights = torch.stack(all_bbox_weights, 0)
level_targets = []
start = 0
for n in num_level_squares:
end = start + n
level_targets.append(all_bbox_weights[:, start:end].squeeze(0))
start = end
bbox_weights_list = level_targets
</DeepExtract>
return (bbox_anchors_list, bbox_gts_list, bbox_weights_list, num_total_pos, num_total_neg)
|
def ga_shape_target(approx_list, inside_flag_list, square_list, gt_bboxes_list, img_metas, approxs_per_octave, cfg, gt_bboxes_ignore_list=None, sampling=True, unmap_outputs=True):
"""Compute guided anchoring targets.
Args:
approx_list (list[list]): Multi level approxs of each image.
inside_flag_list (list[list]): Multi level inside flags of each image.
square_list (list[list]): Multi level squares of each image.
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
img_metas (list[dict]): Meta info of each image.
approxs_per_octave (int): number of approxs per octave
cfg (dict): RPN train configs.
gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes.
sampling (bool): sampling or not.
unmap_outputs (bool): unmap outputs or not.
Returns:
tuple
"""
num_imgs = len(img_metas)
assert len(approx_list) == len(inside_flag_list) == len(square_list) == num_imgs
num_level_squares = [squares.size(0) for squares in square_list[0]]
inside_flag_flat_list = []
approx_flat_list = []
square_flat_list = []
for i in range(num_imgs):
assert len(square_list[i]) == len(inside_flag_list[i])
inside_flag_flat_list.append(torch.cat(inside_flag_list[i]))
approx_flat_list.append(torch.cat(approx_list[i]))
square_flat_list.append(torch.cat(square_list[i]))
if gt_bboxes_ignore_list is None:
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
(all_bbox_anchors, all_bbox_gts, all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply(ga_shape_target_single, approx_flat_list, inside_flag_flat_list, square_flat_list, gt_bboxes_list, gt_bboxes_ignore_list, img_metas, approxs_per_octave=approxs_per_octave, cfg=cfg, sampling=sampling, unmap_outputs=unmap_outputs)
if any([bbox_anchors is None for bbox_anchors in all_bbox_anchors]):
return None
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
all_bbox_anchors = torch.stack(all_bbox_anchors, 0)
level_targets = []
start = 0
for n in num_level_squares:
end = start + n
level_targets.append(all_bbox_anchors[:, start:end].squeeze(0))
start = end
bbox_anchors_list = level_targets
all_bbox_gts = torch.stack(all_bbox_gts, 0)
level_targets = []
start = 0
for n in num_level_squares:
end = start + n
level_targets.append(all_bbox_gts[:, start:end].squeeze(0))
start = end
bbox_gts_list = level_targets
all_bbox_weights = torch.stack(all_bbox_weights, 0)
level_targets = []
start = 0
for n in num_level_squares:
end = start + n
level_targets.append(all_bbox_weights[:, start:end].squeeze(0))
start = end
bbox_weights_list = level_targets
return (bbox_anchors_list, bbox_gts_list, bbox_weights_list, num_total_pos, num_total_neg)
|
Dense-RepPoints
|
positive
|
def update_partner_vat_continue(self):
<DeepExtract>
if not self.partner_id:
raise UserError(_('You must select a vendor to update.'))
self.partner_id.write(self._prepare_partner_update())
</DeepExtract>
return self.import_invoice()
|
def update_partner_vat_continue(self):
if not self.partner_id:
raise UserError(_('You must select a vendor to update.'))
self.partner_id.write(self._prepare_partner_update())
return self.import_invoice()
|
edi
|
positive
|
def mock_is_dir_check(dirname):
<DeepExtract>
dirs = ['/tmp', '~/tmp', '~/abc']
</DeepExtract>
return True if dirname in dirs else False
|
def mock_is_dir_check(dirname):
dirs = ['/tmp', '~/tmp', '~/abc']
return True if dirname in dirs else False
|
cloud-validation-framework
|
positive
|
@optional_auth
@arg.project
@arg.json
def cloud__list(self) -> None:
"""List cloud types"""
<DeepExtract>
if getattr(self.args, 'project', None) and self.args.project:
project = self.args.project
default_project = self.config.get('default_project', '')
if False and (not default_project):
raise argx.UserError('Specify project: use --project in the command line or the default_project item in the config file.')
project = default_project
</DeepExtract>
if project and (not self.client.auth_token):
raise argx.UserError('authentication is required to list clouds for a specific project')
self.print_response(self.client.get_clouds(project=project), json=self.args.json)
|
@optional_auth
@arg.project
@arg.json
def cloud__list(self) -> None:
"""List cloud types"""
if getattr(self.args, 'project', None) and self.args.project:
project = self.args.project
default_project = self.config.get('default_project', '')
if False and (not default_project):
raise argx.UserError('Specify project: use --project in the command line or the default_project item in the config file.')
project = default_project
if project and (not self.client.auth_token):
raise argx.UserError('authentication is required to list clouds for a specific project')
self.print_response(self.client.get_clouds(project=project), json=self.args.json)
|
aiven-client
|
positive
|
def set(self):
<DeepExtract>
reg = self.versionCode_ref['write']
result = self.set_Float(reg, self.versionCode)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.versionCode = self.versionCode
return result
</DeepExtract>
<DeepExtract>
reg = self.infoConfig_ref['write']
result = self.set_String(reg, self.infoConfig, self.infoConfig_ref['size'])
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.infoConfig = self.infoConfig
return result
</DeepExtract>
<DeepExtract>
reg = self.skeletonPosition_ref['write']
result = self.set_String(reg, self.skeletonPosition, self.skeletonPosition_ref['size'])
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.skeletonPosition = self.skeletonPosition
return result
</DeepExtract>
<DeepExtract>
reg = self.mode_ref['write']
result = True
if self.mode >= self.mode_ref['min'] and self.mode <= self.mode_ref['max']:
result = self.set_Byte(reg, self.mode)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.mode = self.mode
return result
</DeepExtract>
<DeepExtract>
reg = self.limitBW_ref['write']
if self.limitBW < self.limitBW_ref['min']:
self.limitBW = self.limitBW_ref['min']
if self.limitBW > self.limitBW_ref['max']:
self.limitBW = self.limitBW_ref['max']
result = self.set_Int(reg, self.limitBW)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.limitBW = self.limitBW
return result
</DeepExtract>
<DeepExtract>
reg = self.limitFW_ref['write']
if self.limitFW < self.limitFW_ref['min']:
self.limitFW = self.limitFW_ref['min']
if self.limitFW > self.limitFW_ref['max']:
self.limitFW = self.limitFW_ref['max']
result = self.set_Int(reg, self.limitFW)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.limitFW = self.limitFW
return result
</DeepExtract>
<DeepExtract>
reg = ''
if self.sensibility < self.sensibility_ref['min']:
self.sensibility = self.sensibility_ref['min']
if self.sensibility > self.sensibility_ref['max']:
self.sensibility = self.sensibility_ref['max']
result = self.set_Byte(self.sensibility_ref['write'], self.sensibility)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.sensibility = self.sensibility
return result
</DeepExtract>
<DeepExtract>
reg = self.offset_ref['write']
if self.offset < self.offset_ref['min']:
self.offset = self.offset_ref['min']
if self.offset > self.offset_ref['max']:
self.offset = self.offset_ref['max']
result = self.set_Byte(reg, self.offset)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.offset = self.offset
return result
</DeepExtract>
<DeepExtract>
reg = self.deadBand_ref['write']
if self.deadBand < self.deadBand_ref['min']:
self.deadBand = self.deadBand_ref['min']
if self.deadBand > self.deadBand_ref['max']:
self.deadBand = self.deadBand_ref['max']
result = self.set_Int(reg, self.deadBand)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.deadBand = self.deadBand
return result
</DeepExtract>
<DeepExtract>
reg = self.kpPunch_ref['write']
if self.kpPunch < self.kpPunch_ref['min']:
self.kpPunch = self.kpPunch_ref['min']
if self.kpPunch > self.kpPunch_ref['max']:
self.kpPunch = self.kpPunch_ref['max']
result = self.set_Float(reg, self.kpPunch)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.kpPunch = self.kpPunch
return result
</DeepExtract>
<DeepExtract>
reg = self.kdDumping_ref['write']
if self.kdDumping < self.kdDumping_ref['min']:
self.kdDumping = self.kdDumping_ref['min']
if self.kdDumping > self.kdDumping_ref['max']:
self.kdDumping = self.kdDumping_ref['max']
result = self.set_Float(reg, self.kdDumping)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.kdDumping = self.kdDumping
return result
</DeepExtract>
<DeepExtract>
reg = self.kiStretch_ref['write']
if self.kiStretch < self.kiStretch_ref['min']:
self.kiStretch = self.kiStretch_ref['min']
if self.kiStretch > self.kiStretch_ref['max']:
self.kiStretch = self.kiStretch_ref['max']
result = self.set_Float(reg, self.kiStretch)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.kiStretch = self.kiStretch
return result
</DeepExtract>
<DeepExtract>
self.currentMaxSet = int(self.currentMaxSet * 10)
reg = self.currentMaxSet_ref['write']
if self.currentMaxSet < self.currentMaxSet_ref['min']:
self.currentMaxSet = self.currentMaxSet_ref['min']
if self.currentMaxSet > self.currentMaxSet_ref['max']:
self.currentMaxSet = self.currentMaxSet_ref['max']
result = self.set_Byte(reg, self.currentMaxSet)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.currentMaxSet = self.currentMaxSet
return result
</DeepExtract>
<DeepExtract>
reg = ''
if self.protectionGoSet < self.protectionGoSet_ref['min']:
self.protectionGoSet = self.protectionGoSet_ref['min']
if self.protectionGoSet > self.protectionGoSet_ref['max']:
self.protectionGoSet = self.protectionGoSet_ref['max']
result = self.set_Byte(self.protectionGoSet_ref['write'], self.protectionGoSet)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.protectionGoSet = self.protectionGoSet
return result
</DeepExtract>
<DeepExtract>
reg = ''
if self.temperatureMaxSet < self.temperatureMaxSet_ref['min']:
self.temperatureMaxSet = self.temperatureMaxSet_ref['min']
if self.temperatureMaxSet > self.temperatureMaxSet_ref['max']:
self.temperatureMaxSet = self.temperatureMaxSet_ref['max']
result = self.set_Byte(self.temperatureMaxSet_ref['write'], self.temperatureMaxSet)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.temperatureMaxSet = self.temperatureMaxSet
return result
</DeepExtract>
<DeepExtract>
reg = ''
if self.positionCurrent < self.limitBW_ref['min']:
self.positionCurrent = self.limitBW_ref['min']
if self.positionCurrent > self.limitFW_ref['max']:
self.positionCurrent = self.limitFW_ref['max']
result = self.set_Int(self.positionCurrent_ref['write'], self.positionCurrent)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.positionCurrent = self.positionCurrent
return result
</DeepExtract>
<DeepExtract>
reg = ''
if self.speedCurrent < self.speedCurrent_ref['min']:
self.speedCurrent = self.speedCurrent_ref['min']
if self.speedCurrent > self.speedCurrent_ref['max']:
self.speedCurrent = self.speedCurrent_ref['max']
result = self.set_Byte(self.speedCurrent_ref['write'], self.speedCurrent)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.speedCurrent = self.speedCurrent
return result
</DeepExtract>
|
def set(self):
reg = self.versionCode_ref['write']
result = self.set_Float(reg, self.versionCode)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.versionCode = self.versionCode
return result
reg = self.infoConfig_ref['write']
result = self.set_String(reg, self.infoConfig, self.infoConfig_ref['size'])
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.infoConfig = self.infoConfig
return result
reg = self.skeletonPosition_ref['write']
result = self.set_String(reg, self.skeletonPosition, self.skeletonPosition_ref['size'])
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.skeletonPosition = self.skeletonPosition
return result
reg = self.mode_ref['write']
result = True
if self.mode >= self.mode_ref['min'] and self.mode <= self.mode_ref['max']:
result = self.set_Byte(reg, self.mode)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.mode = self.mode
return result
reg = self.limitBW_ref['write']
if self.limitBW < self.limitBW_ref['min']:
self.limitBW = self.limitBW_ref['min']
if self.limitBW > self.limitBW_ref['max']:
self.limitBW = self.limitBW_ref['max']
result = self.set_Int(reg, self.limitBW)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.limitBW = self.limitBW
return result
reg = self.limitFW_ref['write']
if self.limitFW < self.limitFW_ref['min']:
self.limitFW = self.limitFW_ref['min']
if self.limitFW > self.limitFW_ref['max']:
self.limitFW = self.limitFW_ref['max']
result = self.set_Int(reg, self.limitFW)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.limitFW = self.limitFW
return result
reg = ''
if self.sensibility < self.sensibility_ref['min']:
self.sensibility = self.sensibility_ref['min']
if self.sensibility > self.sensibility_ref['max']:
self.sensibility = self.sensibility_ref['max']
result = self.set_Byte(self.sensibility_ref['write'], self.sensibility)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.sensibility = self.sensibility
return result
reg = self.offset_ref['write']
if self.offset < self.offset_ref['min']:
self.offset = self.offset_ref['min']
if self.offset > self.offset_ref['max']:
self.offset = self.offset_ref['max']
result = self.set_Byte(reg, self.offset)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.offset = self.offset
return result
reg = self.deadBand_ref['write']
if self.deadBand < self.deadBand_ref['min']:
self.deadBand = self.deadBand_ref['min']
if self.deadBand > self.deadBand_ref['max']:
self.deadBand = self.deadBand_ref['max']
result = self.set_Int(reg, self.deadBand)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.deadBand = self.deadBand
return result
reg = self.kpPunch_ref['write']
if self.kpPunch < self.kpPunch_ref['min']:
self.kpPunch = self.kpPunch_ref['min']
if self.kpPunch > self.kpPunch_ref['max']:
self.kpPunch = self.kpPunch_ref['max']
result = self.set_Float(reg, self.kpPunch)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.kpPunch = self.kpPunch
return result
reg = self.kdDumping_ref['write']
if self.kdDumping < self.kdDumping_ref['min']:
self.kdDumping = self.kdDumping_ref['min']
if self.kdDumping > self.kdDumping_ref['max']:
self.kdDumping = self.kdDumping_ref['max']
result = self.set_Float(reg, self.kdDumping)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.kdDumping = self.kdDumping
return result
reg = self.kiStretch_ref['write']
if self.kiStretch < self.kiStretch_ref['min']:
self.kiStretch = self.kiStretch_ref['min']
if self.kiStretch > self.kiStretch_ref['max']:
self.kiStretch = self.kiStretch_ref['max']
result = self.set_Float(reg, self.kiStretch)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.kiStretch = self.kiStretch
return result
self.currentMaxSet = int(self.currentMaxSet * 10)
reg = self.currentMaxSet_ref['write']
if self.currentMaxSet < self.currentMaxSet_ref['min']:
self.currentMaxSet = self.currentMaxSet_ref['min']
if self.currentMaxSet > self.currentMaxSet_ref['max']:
self.currentMaxSet = self.currentMaxSet_ref['max']
result = self.set_Byte(reg, self.currentMaxSet)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.currentMaxSet = self.currentMaxSet
return result
reg = ''
if self.protectionGoSet < self.protectionGoSet_ref['min']:
self.protectionGoSet = self.protectionGoSet_ref['min']
if self.protectionGoSet > self.protectionGoSet_ref['max']:
self.protectionGoSet = self.protectionGoSet_ref['max']
result = self.set_Byte(self.protectionGoSet_ref['write'], self.protectionGoSet)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.protectionGoSet = self.protectionGoSet
return result
reg = ''
if self.temperatureMaxSet < self.temperatureMaxSet_ref['min']:
self.temperatureMaxSet = self.temperatureMaxSet_ref['min']
if self.temperatureMaxSet > self.temperatureMaxSet_ref['max']:
self.temperatureMaxSet = self.temperatureMaxSet_ref['max']
result = self.set_Byte(self.temperatureMaxSet_ref['write'], self.temperatureMaxSet)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.temperatureMaxSet = self.temperatureMaxSet
return result
reg = ''
if self.positionCurrent < self.limitBW_ref['min']:
self.positionCurrent = self.limitBW_ref['min']
if self.positionCurrent > self.limitFW_ref['max']:
self.positionCurrent = self.limitFW_ref['max']
result = self.set_Int(self.positionCurrent_ref['write'], self.positionCurrent)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.positionCurrent = self.positionCurrent
return result
reg = ''
if self.speedCurrent < self.speedCurrent_ref['min']:
self.speedCurrent = self.speedCurrent_ref['min']
if self.speedCurrent > self.speedCurrent_ref['max']:
self.speedCurrent = self.speedCurrent_ref['max']
result = self.set_Byte(self.speedCurrent_ref['write'], self.speedCurrent)
if result & self.debug:
print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
else:
self.speedCurrent = self.speedCurrent
return result
</DeepExtract>
|
choreograph-git
|
positive
|
def exit(self, status=0, message=None):
if message:
<DeepExtract>
if message:
if _sys.stderr is None:
_sys.stderr = _sys.stderr
_sys.stderr.write(message)
</DeepExtract>
_sys.exit(status)
|
def exit(self, status=0, message=None):
if message:
if message:
if _sys.stderr is None:
_sys.stderr = _sys.stderr
_sys.stderr.write(message)
_sys.exit(status)
|
coq-tools
|
positive
|
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
<DeepExtract>
if rank > len(spectrum):
raise ValueError('The tested rank cannot exceed the rank of the dataset')
pu = -rank * log(2.0)
for i in range(rank):
pu += gammaln((n_features - i) / 2.0) - log(np.pi) * (n_features - i) / 2.0
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.0
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.0
m = n_features * rank - rank * (rank + 1.0) / 2.0
pp = log(2.0 * np.pi) * (m + rank + 1.0) / 2.0
pa = 0.0
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) * (1.0 / spectrum_[j] - 1.0 / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2.0 - rank * log(n_samples) / 2.0
ll[rank] = ll
</DeepExtract>
return ll.argmax()
|
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
if rank > len(spectrum):
raise ValueError('The tested rank cannot exceed the rank of the dataset')
pu = -rank * log(2.0)
for i in range(rank):
pu += gammaln((n_features - i) / 2.0) - log(np.pi) * (n_features - i) / 2.0
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.0
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.0
m = n_features * rank - rank * (rank + 1.0) / 2.0
pp = log(2.0 * np.pi) * (m + rank + 1.0) / 2.0
pa = 0.0
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) * (1.0 / spectrum_[j] - 1.0 / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2.0 - rank * log(n_samples) / 2.0
ll[rank] = ll
return ll.argmax()
|
dlatk
|
positive
|
def add_assume_role_principals(self, principals):
for principal in principals:
if isinstance(principal, dict):
<DeepExtract>
if principal and (not isinstance(principal, dict)):
principal = dict(AWS=principal)
if self.contains(principal=principal, action='sts:AssumeRole', effect=effect, resource=resource):
return
statement = dict(Action=[], Effect=effect)
if principal:
statement['Principal'] = principal
self.policy['Statement'].append(statement)
if 'sts:AssumeRole':
for 'sts:AssumeRole' in 'sts:AssumeRole' if isinstance('sts:AssumeRole', list) else ['sts:AssumeRole']:
self.add_action('sts:AssumeRole')
if resource:
for resource in resource if isinstance(resource, list) else [resource]:
self.add_resource(resource)
</DeepExtract>
elif hasattr(principal, 'arn'):
<DeepExtract>
if {'AWS': principal.arn} and (not isinstance({'AWS': principal.arn}, dict)):
{'AWS': principal.arn} = dict(AWS={'AWS': principal.arn})
if self.contains(principal={'AWS': principal.arn}, action='sts:AssumeRole', effect=effect, resource=resource):
return
statement = dict(Action=[], Effect=effect)
if {'AWS': principal.arn}:
statement['Principal'] = {'AWS': principal.arn}
self.policy['Statement'].append(statement)
if 'sts:AssumeRole':
for 'sts:AssumeRole' in 'sts:AssumeRole' if isinstance('sts:AssumeRole', list) else ['sts:AssumeRole']:
self.add_action('sts:AssumeRole')
if resource:
for resource in resource if isinstance(resource, list) else [resource]:
self.add_resource(resource)
</DeepExtract>
else:
<DeepExtract>
if {'Service': principal + '.amazonaws.com'} and (not isinstance({'Service': principal + '.amazonaws.com'}, dict)):
{'Service': principal + '.amazonaws.com'} = dict(AWS={'Service': principal + '.amazonaws.com'})
if self.contains(principal={'Service': principal + '.amazonaws.com'}, action='sts:AssumeRole', effect=effect, resource=resource):
return
statement = dict(Action=[], Effect=effect)
if {'Service': principal + '.amazonaws.com'}:
statement['Principal'] = {'Service': principal + '.amazonaws.com'}
self.policy['Statement'].append(statement)
if 'sts:AssumeRole':
for 'sts:AssumeRole' in 'sts:AssumeRole' if isinstance('sts:AssumeRole', list) else ['sts:AssumeRole']:
self.add_action('sts:AssumeRole')
if resource:
for resource in resource if isinstance(resource, list) else [resource]:
self.add_resource(resource)
</DeepExtract>
|
def add_assume_role_principals(self, principals):
for principal in principals:
if isinstance(principal, dict):
if principal and (not isinstance(principal, dict)):
principal = dict(AWS=principal)
if self.contains(principal=principal, action='sts:AssumeRole', effect=effect, resource=resource):
return
statement = dict(Action=[], Effect=effect)
if principal:
statement['Principal'] = principal
self.policy['Statement'].append(statement)
if 'sts:AssumeRole':
for 'sts:AssumeRole' in 'sts:AssumeRole' if isinstance('sts:AssumeRole', list) else ['sts:AssumeRole']:
self.add_action('sts:AssumeRole')
if resource:
for resource in resource if isinstance(resource, list) else [resource]:
self.add_resource(resource)
elif hasattr(principal, 'arn'):
if {'AWS': principal.arn} and (not isinstance({'AWS': principal.arn}, dict)):
{'AWS': principal.arn} = dict(AWS={'AWS': principal.arn})
if self.contains(principal={'AWS': principal.arn}, action='sts:AssumeRole', effect=effect, resource=resource):
return
statement = dict(Action=[], Effect=effect)
if {'AWS': principal.arn}:
statement['Principal'] = {'AWS': principal.arn}
self.policy['Statement'].append(statement)
if 'sts:AssumeRole':
for 'sts:AssumeRole' in 'sts:AssumeRole' if isinstance('sts:AssumeRole', list) else ['sts:AssumeRole']:
self.add_action('sts:AssumeRole')
if resource:
for resource in resource if isinstance(resource, list) else [resource]:
self.add_resource(resource)
else:
if {'Service': principal + '.amazonaws.com'} and (not isinstance({'Service': principal + '.amazonaws.com'}, dict)):
{'Service': principal + '.amazonaws.com'} = dict(AWS={'Service': principal + '.amazonaws.com'})
if self.contains(principal={'Service': principal + '.amazonaws.com'}, action='sts:AssumeRole', effect=effect, resource=resource):
return
statement = dict(Action=[], Effect=effect)
if {'Service': principal + '.amazonaws.com'}:
statement['Principal'] = {'Service': principal + '.amazonaws.com'}
self.policy['Statement'].append(statement)
if 'sts:AssumeRole':
for 'sts:AssumeRole' in 'sts:AssumeRole' if isinstance('sts:AssumeRole', list) else ['sts:AssumeRole']:
self.add_action('sts:AssumeRole')
if resource:
for resource in resource if isinstance(resource, list) else [resource]:
self.add_resource(resource)
</DeepExtract>
|
aegea
|
positive
|
def test_merge_requirements_devtools(self):
<DeepExtract>
self.recipe = TestingRecipe(self.buildout, name, options)
</DeepExtract>
self.recipe.version_detected = '10.0-1'
self.recipe.merge_requirements()
from .. import devtools
self.assertTrue(set(devtools.requirements).issubset(self.recipe.requirements))
|
def test_merge_requirements_devtools(self):
self.recipe = TestingRecipe(self.buildout, name, options)
self.recipe.version_detected = '10.0-1'
self.recipe.merge_requirements()
from .. import devtools
self.assertTrue(set(devtools.requirements).issubset(self.recipe.requirements))
|
anybox.recipe.odoo
|
positive
|
def __init__(self, rule, args=None):
super(NewTermsRule, self).__init__(rule, args)
self.seen_values = {}
if 'fields' not in self.rules:
if 'query_key' not in self.rules:
raise EAException('fields or query_key must be specified')
self.fields = self.rules['query_key']
else:
self.fields = self.rules['fields']
if not self.fields:
raise EAException('fields must not be an empty list')
if type(self.fields) != list:
self.fields = [self.fields]
if self.rules.get('use_terms_query') and (len(self.fields) != 1 or (len(self.fields) == 1 and type(self.fields[0]) == list)):
raise EAException('use_terms_query can only be used with a single non-composite field')
if self.rules.get('use_terms_query'):
if [self.rules['query_key']] != self.fields:
raise EAException('If use_terms_query is specified, you cannot specify different query_key and fields')
if not self.rules.get('query_key').endswith('.keyword') and (not self.rules.get('query_key').endswith('.raw')):
if self.rules.get('use_keyword_postfix', True):
elastalert_logger.warn('Warning: If query_key is a non-keyword field, you must set use_keyword_postfix to false, or add .keyword/.raw to your query_key.')
try:
<DeepExtract>
self.es = elasticsearch_client(self.rules)
window_size = datetime.timedelta(**self.rules.get('terms_window_size', {'days': 30}))
field_name = {'field': '', 'size': 2147483647}
query_template = {'aggs': {'values': {'terms': field_name}}}
if args and hasattr(args, 'start') and args.start:
end = ts_to_dt(args.start)
elif 'start_date' in self.rules:
end = ts_to_dt(self.rules['start_date'])
else:
end = ts_now()
start = end - window_size
step = datetime.timedelta(**self.rules.get('window_step_size', {'days': 1}))
for field in self.fields:
tmp_start = start
tmp_end = min(start + step, end)
time_filter = {self.rules['timestamp_field']: {'lt': self.rules['dt_to_ts'](tmp_end), 'gte': self.rules['dt_to_ts'](tmp_start)}}
query_template['filter'] = {'bool': {'must': [{'range': time_filter}]}}
query = {'aggs': {'filtered': query_template}, 'size': 0}
if 'filter' in self.rules:
for item in self.rules['filter']:
query_template['filter']['bool']['must'].append(item)
if type(field) == list:
self.seen_values.setdefault(tuple(field), [])
level = query_template['aggs']
for (i, sub_field) in enumerate(field):
if self.rules.get('use_keyword_postfix', True):
level['values']['terms']['field'] = add_keyword_postfix(sub_field)
else:
level['values']['terms']['field'] = sub_field
if i < len(field) - 1:
level['values']['aggs'] = {'values': {'terms': copy.deepcopy(field_name)}}
level = level['values']['aggs']
else:
self.seen_values.setdefault(field, [])
if self.rules.get('use_keyword_postfix', True):
field_name['field'] = add_keyword_postfix(field)
else:
field_name['field'] = field
while tmp_start < end:
if self.rules.get('use_strftime_index'):
index = format_index(self.rules['index'], tmp_start, tmp_end)
else:
index = self.rules['index']
res = self.es.search(body=query, index=index, ignore_unavailable=True, timeout='50s')
if 'aggregations' in res:
buckets = res['aggregations']['filtered']['values']['buckets']
if type(field) == list:
for bucket in buckets:
self.seen_values[tuple(field)] += self.flatten_aggregation_hierarchy(bucket)
else:
keys = [bucket['key'] for bucket in buckets]
self.seen_values[field] += keys
elif type(field) == list:
self.seen_values.setdefault(tuple(field), [])
else:
self.seen_values.setdefault(field, [])
if tmp_start == tmp_end:
break
tmp_start = tmp_end
tmp_end = min(tmp_start + step, end)
time_filter[self.rules['timestamp_field']] = {'lt': self.rules['dt_to_ts'](tmp_end), 'gte': self.rules['dt_to_ts'](tmp_start)}
for (key, values) in self.seen_values.items():
if not values:
if type(key) == tuple:
elastalert_logger.warning('No results were found from all sub-aggregations. This can either indicate that there is no baseline data OR that a non-primitive field was used in a composite key.')
else:
elastalert_logger.info('Found no values for %s' % field)
continue
self.seen_values[key] = list(set(values))
elastalert_logger.info('Found %s unique values for %s' % (len(set(values)), key))
</DeepExtract>
except Exception as e:
raise EAException('Error searching for existing terms: %s' % repr(e)).with_traceback(sys.exc_info()[2])
|
def __init__(self, rule, args=None):
super(NewTermsRule, self).__init__(rule, args)
self.seen_values = {}
if 'fields' not in self.rules:
if 'query_key' not in self.rules:
raise EAException('fields or query_key must be specified')
self.fields = self.rules['query_key']
else:
self.fields = self.rules['fields']
if not self.fields:
raise EAException('fields must not be an empty list')
if type(self.fields) != list:
self.fields = [self.fields]
if self.rules.get('use_terms_query') and (len(self.fields) != 1 or (len(self.fields) == 1 and type(self.fields[0]) == list)):
raise EAException('use_terms_query can only be used with a single non-composite field')
if self.rules.get('use_terms_query'):
if [self.rules['query_key']] != self.fields:
raise EAException('If use_terms_query is specified, you cannot specify different query_key and fields')
if not self.rules.get('query_key').endswith('.keyword') and (not self.rules.get('query_key').endswith('.raw')):
if self.rules.get('use_keyword_postfix', True):
elastalert_logger.warn('Warning: If query_key is a non-keyword field, you must set use_keyword_postfix to false, or add .keyword/.raw to your query_key.')
try:
self.es = elasticsearch_client(self.rules)
window_size = datetime.timedelta(**self.rules.get('terms_window_size', {'days': 30}))
field_name = {'field': '', 'size': 2147483647}
query_template = {'aggs': {'values': {'terms': field_name}}}
if args and hasattr(args, 'start') and args.start:
end = ts_to_dt(args.start)
elif 'start_date' in self.rules:
end = ts_to_dt(self.rules['start_date'])
else:
end = ts_now()
start = end - window_size
step = datetime.timedelta(**self.rules.get('window_step_size', {'days': 1}))
for field in self.fields:
tmp_start = start
tmp_end = min(start + step, end)
time_filter = {self.rules['timestamp_field']: {'lt': self.rules['dt_to_ts'](tmp_end), 'gte': self.rules['dt_to_ts'](tmp_start)}}
query_template['filter'] = {'bool': {'must': [{'range': time_filter}]}}
query = {'aggs': {'filtered': query_template}, 'size': 0}
if 'filter' in self.rules:
for item in self.rules['filter']:
query_template['filter']['bool']['must'].append(item)
if type(field) == list:
self.seen_values.setdefault(tuple(field), [])
level = query_template['aggs']
for (i, sub_field) in enumerate(field):
if self.rules.get('use_keyword_postfix', True):
level['values']['terms']['field'] = add_keyword_postfix(sub_field)
else:
level['values']['terms']['field'] = sub_field
if i < len(field) - 1:
level['values']['aggs'] = {'values': {'terms': copy.deepcopy(field_name)}}
level = level['values']['aggs']
else:
self.seen_values.setdefault(field, [])
if self.rules.get('use_keyword_postfix', True):
field_name['field'] = add_keyword_postfix(field)
else:
field_name['field'] = field
while tmp_start < end:
if self.rules.get('use_strftime_index'):
index = format_index(self.rules['index'], tmp_start, tmp_end)
else:
index = self.rules['index']
res = self.es.search(body=query, index=index, ignore_unavailable=True, timeout='50s')
if 'aggregations' in res:
buckets = res['aggregations']['filtered']['values']['buckets']
if type(field) == list:
for bucket in buckets:
self.seen_values[tuple(field)] += self.flatten_aggregation_hierarchy(bucket)
else:
keys = [bucket['key'] for bucket in buckets]
self.seen_values[field] += keys
elif type(field) == list:
self.seen_values.setdefault(tuple(field), [])
else:
self.seen_values.setdefault(field, [])
if tmp_start == tmp_end:
break
tmp_start = tmp_end
tmp_end = min(tmp_start + step, end)
time_filter[self.rules['timestamp_field']] = {'lt': self.rules['dt_to_ts'](tmp_end), 'gte': self.rules['dt_to_ts'](tmp_start)}
for (key, values) in self.seen_values.items():
if not values:
if type(key) == tuple:
elastalert_logger.warning('No results were found from all sub-aggregations. This can either indicate that there is no baseline data OR that a non-primitive field was used in a composite key.')
else:
elastalert_logger.info('Found no values for %s' % field)
continue
self.seen_values[key] = list(set(values))
elastalert_logger.info('Found %s unique values for %s' % (len(set(values)), key))
except Exception as e:
raise EAException('Error searching for existing terms: %s' % repr(e)).with_traceback(sys.exc_info()[2])
|
elastalert2
|
positive
|
def system_data(lines, type_map=None, type_idx_zero=True, unwrap=False):
<DeepExtract>
marks = []
for (idx, ii) in enumerate(lines):
if 'ITEM: TIMESTEP' in ii:
marks.append(idx)
if len(marks) == 0:
array_lines = None
elif len(marks) == 1:
array_lines = [lines]
else:
block_size = marks[1] - marks[0]
ret = []
for ii in marks:
ret.append(lines[ii:ii + block_size])
array_lines = ret
array_lines = None
</DeepExtract>
lines = array_lines[0]
system = {}
<DeepExtract>
atype = get_atype(lines)
natoms_vec = []
natomtypes = get_natomtypes(lines)
for ii in range(natomtypes):
natoms_vec.append(sum(atype == ii + 1))
assert sum(natoms_vec) == get_natoms(lines)
system['atom_numbs'] = natoms_vec
</DeepExtract>
system['atom_names'] = []
if type_map == None:
for ii in range(len(system['atom_numbs'])):
system['atom_names'].append('TYPE_%d' % ii)
else:
assert len(type_map) >= len(system['atom_numbs'])
for ii in range(len(system['atom_numbs'])):
system['atom_names'].append(type_map[ii])
<DeepExtract>
(blk, h) = _get_block(lines, 'BOX BOUNDS')
bounds = np.zeros([3, 2])
tilt = np.zeros([3])
load_tilt = 'xy xz yz' in h
for dd in range(3):
info = [float(jj) for jj in blk[dd].split()]
bounds[dd][0] = info[0]
bounds[dd][1] = info[1]
if load_tilt:
tilt[dd] = info[2]
(bounds, tilt) = (bounds, tilt)
</DeepExtract>
<DeepExtract>
xy = tilt[0]
xz = tilt[1]
yz = tilt[2]
xlo = bounds[0][0] - min(0.0, xy, xz, xy + xz)
xhi = bounds[0][1] - max(0.0, xy, xz, xy + xz)
ylo = bounds[1][0] - min(0.0, yz)
yhi = bounds[1][1] - max(0.0, yz)
zlo = bounds[2][0]
zhi = bounds[2][1]
info = [[xlo, xhi], [ylo, yhi], [zlo, zhi]]
(orig, cell) = lmp.lmpbox2box(info, tilt)
</DeepExtract>
system['orig'] = np.array(orig) - np.array(orig)
system['cells'] = [np.array(cell)]
<DeepExtract>
(blk, head) = _get_block(lines, 'ATOMS')
keys = head.split()
id_idx = keys.index('id') - 2
tidx = keys.index('type') - 2
atype = []
for ii in blk:
atype.append([int(ii.split()[id_idx]), int(ii.split()[tidx])])
atype.sort()
atype = np.array(atype, dtype=int)
if type_idx_zero:
system['atom_types'] = atype[:, 1] - 1
else:
system['atom_types'] = atype[:, 1]
</DeepExtract>
system['coords'] = [safe_get_posi(lines, cell, np.array(orig), unwrap)]
for ii in range(1, len(array_lines)):
<DeepExtract>
(blk, h) = _get_block(array_lines[ii], 'BOX BOUNDS')
bounds = np.zeros([3, 2])
tilt = np.zeros([3])
load_tilt = 'xy xz yz' in h
for dd in range(3):
info = [float(jj) for jj in blk[dd].split()]
bounds[dd][0] = info[0]
bounds[dd][1] = info[1]
if load_tilt:
tilt[dd] = info[2]
(bounds, tilt) = (bounds, tilt)
</DeepExtract>
<DeepExtract>
xy = tilt[0]
xz = tilt[1]
yz = tilt[2]
xlo = bounds[0][0] - min(0.0, xy, xz, xy + xz)
xhi = bounds[0][1] - max(0.0, xy, xz, xy + xz)
ylo = bounds[1][0] - min(0.0, yz)
yhi = bounds[1][1] - max(0.0, yz)
zlo = bounds[2][0]
zhi = bounds[2][1]
info = [[xlo, xhi], [ylo, yhi], [zlo, zhi]]
(orig, cell) = lmp.lmpbox2box(info, tilt)
</DeepExtract>
system['cells'].append(cell)
<DeepExtract>
(blk, head) = _get_block(array_lines[ii], 'ATOMS')
keys = head.split()
id_idx = keys.index('id') - 2
tidx = keys.index('type') - 2
atype = []
for ii in blk:
atype.append([int(ii.split()[id_idx]), int(ii.split()[tidx])])
atype.sort()
atype = np.array(atype, dtype=int)
if type_idx_zero:
atype = atype[:, 1] - 1
else:
atype = atype[:, 1]
</DeepExtract>
idx = np.argsort(atype)[np.argsort(np.argsort(system['atom_types']))]
system['coords'].append(safe_get_posi(array_lines[ii], cell, np.array(orig), unwrap)[idx])
system['cells'] = np.array(system['cells'])
system['coords'] = np.array(system['coords'])
return system
|
def system_data(lines, type_map=None, type_idx_zero=True, unwrap=False):
marks = []
for (idx, ii) in enumerate(lines):
if 'ITEM: TIMESTEP' in ii:
marks.append(idx)
if len(marks) == 0:
array_lines = None
elif len(marks) == 1:
array_lines = [lines]
else:
block_size = marks[1] - marks[0]
ret = []
for ii in marks:
ret.append(lines[ii:ii + block_size])
array_lines = ret
array_lines = None
lines = array_lines[0]
system = {}
atype = get_atype(lines)
natoms_vec = []
natomtypes = get_natomtypes(lines)
for ii in range(natomtypes):
natoms_vec.append(sum(atype == ii + 1))
assert sum(natoms_vec) == get_natoms(lines)
system['atom_numbs'] = natoms_vec
system['atom_names'] = []
if type_map == None:
for ii in range(len(system['atom_numbs'])):
system['atom_names'].append('TYPE_%d' % ii)
else:
assert len(type_map) >= len(system['atom_numbs'])
for ii in range(len(system['atom_numbs'])):
system['atom_names'].append(type_map[ii])
(blk, h) = _get_block(lines, 'BOX BOUNDS')
bounds = np.zeros([3, 2])
tilt = np.zeros([3])
load_tilt = 'xy xz yz' in h
for dd in range(3):
info = [float(jj) for jj in blk[dd].split()]
bounds[dd][0] = info[0]
bounds[dd][1] = info[1]
if load_tilt:
tilt[dd] = info[2]
(bounds, tilt) = (bounds, tilt)
xy = tilt[0]
xz = tilt[1]
yz = tilt[2]
xlo = bounds[0][0] - min(0.0, xy, xz, xy + xz)
xhi = bounds[0][1] - max(0.0, xy, xz, xy + xz)
ylo = bounds[1][0] - min(0.0, yz)
yhi = bounds[1][1] - max(0.0, yz)
zlo = bounds[2][0]
zhi = bounds[2][1]
info = [[xlo, xhi], [ylo, yhi], [zlo, zhi]]
(orig, cell) = lmp.lmpbox2box(info, tilt)
system['orig'] = np.array(orig) - np.array(orig)
system['cells'] = [np.array(cell)]
(blk, head) = _get_block(lines, 'ATOMS')
keys = head.split()
id_idx = keys.index('id') - 2
tidx = keys.index('type') - 2
atype = []
for ii in blk:
atype.append([int(ii.split()[id_idx]), int(ii.split()[tidx])])
atype.sort()
atype = np.array(atype, dtype=int)
if type_idx_zero:
system['atom_types'] = atype[:, 1] - 1
else:
system['atom_types'] = atype[:, 1]
system['coords'] = [safe_get_posi(lines, cell, np.array(orig), unwrap)]
for ii in range(1, len(array_lines)):
(blk, h) = _get_block(array_lines[ii], 'BOX BOUNDS')
bounds = np.zeros([3, 2])
tilt = np.zeros([3])
load_tilt = 'xy xz yz' in h
for dd in range(3):
info = [float(jj) for jj in blk[dd].split()]
bounds[dd][0] = info[0]
bounds[dd][1] = info[1]
if load_tilt:
tilt[dd] = info[2]
(bounds, tilt) = (bounds, tilt)
xy = tilt[0]
xz = tilt[1]
yz = tilt[2]
xlo = bounds[0][0] - min(0.0, xy, xz, xy + xz)
xhi = bounds[0][1] - max(0.0, xy, xz, xy + xz)
ylo = bounds[1][0] - min(0.0, yz)
yhi = bounds[1][1] - max(0.0, yz)
zlo = bounds[2][0]
zhi = bounds[2][1]
info = [[xlo, xhi], [ylo, yhi], [zlo, zhi]]
(orig, cell) = lmp.lmpbox2box(info, tilt)
system['cells'].append(cell)
(blk, head) = _get_block(array_lines[ii], 'ATOMS')
keys = head.split()
id_idx = keys.index('id') - 2
tidx = keys.index('type') - 2
atype = []
for ii in blk:
atype.append([int(ii.split()[id_idx]), int(ii.split()[tidx])])
atype.sort()
atype = np.array(atype, dtype=int)
if type_idx_zero:
atype = atype[:, 1] - 1
else:
atype = atype[:, 1]
idx = np.argsort(atype)[np.argsort(np.argsort(system['atom_types']))]
system['coords'].append(safe_get_posi(array_lines[ii], cell, np.array(orig), unwrap)[idx])
system['cells'] = np.array(system['cells'])
system['coords'] = np.array(system['coords'])
return system
|
dpdata
|
positive
|
def handle_m2m_field(self, obj, field):
if isinstance(field, TagField):
<DeepExtract>
tag_string = str(getattr(obj, field.name))
fake_obj = FakeObject(field.name, tag_string)
fake_field = FakeField(field.name)
self.handle_field(fake_obj, fake_field)
</DeepExtract>
else:
super(Serializer, self).handle_m2m_field(obj, field)
|
def handle_m2m_field(self, obj, field):
if isinstance(field, TagField):
tag_string = str(getattr(obj, field.name))
fake_obj = FakeObject(field.name, tag_string)
fake_field = FakeField(field.name)
self.handle_field(fake_obj, fake_field)
else:
super(Serializer, self).handle_m2m_field(obj, field)
|
django-tagulous
|
positive
|
def __on_popup_style_reloaded(self, *args):
<DeepExtract>
if self.no_arrow:
a = 0
else:
a = int(self.popup_style.get('arrow_size', 9))
</DeepExtract>
p = int(self.popup_style.get('%s_padding' % self.popup_type, 7))
padding = {'up': (p + a, p, p, p), 'down': (p, p + a, p, p), 'left': (p, p, p + a, p), 'right': (p, p, p, p + a)}[self.pointer]
self.alignment.set_padding(*padding)
if self.popup_type == 'locked_list':
self.resize(10, 10)
|
def __on_popup_style_reloaded(self, *args):
if self.no_arrow:
a = 0
else:
a = int(self.popup_style.get('arrow_size', 9))
p = int(self.popup_style.get('%s_padding' % self.popup_type, 7))
padding = {'up': (p + a, p, p, p), 'down': (p, p + a, p, p), 'left': (p, p, p + a, p), 'right': (p, p, p, p + a)}[self.pointer]
self.alignment.set_padding(*padding)
if self.popup_type == 'locked_list':
self.resize(10, 10)
|
dockbarx
|
positive
|
def upload(self, file_prefix, fileobj):
<DeepExtract>
with resource_stream(__name__, 'data/managed-upload-infrastructure.yaml') as f:
template = f.read()
for output_name in [BUCKET_OUTPUT_NAME, LOG_DELIVERY_ROLE_ARN_OUTPUT_NAME]:
if output_name not in template:
LOG.debug("Output '%s' not found in managed upload infrastructure template:\n%s", output_name, template)
raise InternalError('Output not found in managed upload infrastructure template')
template = template
</DeepExtract>
<DeepExtract>
args = {'StackName': INFRA_STACK_NAME, 'TemplateBody': template}
LOG.info('Creating %s', INFRA_STACK_NAME)
try:
result = self.cfn_client.create_stack(**args, EnableTerminationProtection=True, Capabilities=['CAPABILITY_IAM'])
except self.cfn_client.exceptions.AlreadyExistsException:
LOG.info('%s already exists. Attempting to update', INFRA_STACK_NAME)
try:
result = self.cfn_client.update_stack(**args, Capabilities=['CAPABILITY_IAM'])
except ClientError as e:
msg = str(e)
if 'No updates are to be performed' in msg:
LOG.info('%s stack is up to date', INFRA_STACK_NAME)
stack_id = INFRA_STACK_NAME
else:
LOG.debug('%s stack update resulted in unknown ClientError', INFRA_STACK_NAME, exc_info=e)
raise DownstreamError('Unknown CloudFormation error') from e
else:
stack_id = result['StackId']
self._wait_for_stack(stack_id, 'stack_update_complete', '{} stack is up to date'.format(INFRA_STACK_NAME))
except ClientError as e:
LOG.debug('%s stack create resulted in unknown ClientError', INFRA_STACK_NAME, exc_info=e)
raise DownstreamError('Unknown CloudFormation error') from e
else:
stack_id = result['StackId']
self._wait_for_stack(stack_id, 'stack_create_complete', '{} stack was successfully created'.format(INFRA_STACK_NAME))
stack_id = stack_id
</DeepExtract>
<DeepExtract>
result = self.cfn_client.describe_stacks(StackName=stack_id)
outputs = result['Stacks'][0]['Outputs']
try:
self.bucket_name = next((output['OutputValue'] for output in outputs if output['OutputKey'] == BUCKET_OUTPUT_NAME))
except StopIteration:
LOG.debug("Outputs from stack '%s' did not contain '%s':\n%s", stack_id, BUCKET_OUTPUT_NAME, ', '.join((output['OutputKey'] for output in outputs)))
raise InternalError('Required output not found on stack')
</DeepExtract>
<DeepExtract>
result = self.cfn_client.describe_stacks(StackName=stack_id)
outputs = result['Stacks'][0]['Outputs']
try:
self.log_delivery_role_arn = next((output['OutputValue'] for output in outputs if output['OutputKey'] == LOG_DELIVERY_ROLE_ARN_OUTPUT_NAME))
except StopIteration:
LOG.debug("Outputs from stack '%s' did not contain '%s':\n%s", stack_id, LOG_DELIVERY_ROLE_ARN_OUTPUT_NAME, ', '.join((output['OutputKey'] for output in outputs)))
raise InternalError('Required output not found on stack')
</DeepExtract>
timestamp = datetime.utcnow().isoformat(timespec='seconds').replace(':', '-')
key = '{}-{}.zip'.format(file_prefix, timestamp)
LOG.debug("Uploading to '%s/%s'...", self.bucket_name, key)
try:
self.s3_client.upload_fileobj(fileobj, self.bucket_name, key)
except ClientError as e:
LOG.debug('S3 upload resulted in unknown ClientError', exc_info=e)
raise DownstreamError('Failed to upload artifacts to S3') from e
LOG.debug('Upload complete')
return 's3://{0}/{1}'.format(self.bucket_name, key)
|
def upload(self, file_prefix, fileobj):
with resource_stream(__name__, 'data/managed-upload-infrastructure.yaml') as f:
template = f.read()
for output_name in [BUCKET_OUTPUT_NAME, LOG_DELIVERY_ROLE_ARN_OUTPUT_NAME]:
if output_name not in template:
LOG.debug("Output '%s' not found in managed upload infrastructure template:\n%s", output_name, template)
raise InternalError('Output not found in managed upload infrastructure template')
template = template
args = {'StackName': INFRA_STACK_NAME, 'TemplateBody': template}
LOG.info('Creating %s', INFRA_STACK_NAME)
try:
result = self.cfn_client.create_stack(**args, EnableTerminationProtection=True, Capabilities=['CAPABILITY_IAM'])
except self.cfn_client.exceptions.AlreadyExistsException:
LOG.info('%s already exists. Attempting to update', INFRA_STACK_NAME)
try:
result = self.cfn_client.update_stack(**args, Capabilities=['CAPABILITY_IAM'])
except ClientError as e:
msg = str(e)
if 'No updates are to be performed' in msg:
LOG.info('%s stack is up to date', INFRA_STACK_NAME)
stack_id = INFRA_STACK_NAME
else:
LOG.debug('%s stack update resulted in unknown ClientError', INFRA_STACK_NAME, exc_info=e)
raise DownstreamError('Unknown CloudFormation error') from e
else:
stack_id = result['StackId']
self._wait_for_stack(stack_id, 'stack_update_complete', '{} stack is up to date'.format(INFRA_STACK_NAME))
except ClientError as e:
LOG.debug('%s stack create resulted in unknown ClientError', INFRA_STACK_NAME, exc_info=e)
raise DownstreamError('Unknown CloudFormation error') from e
else:
stack_id = result['StackId']
self._wait_for_stack(stack_id, 'stack_create_complete', '{} stack was successfully created'.format(INFRA_STACK_NAME))
stack_id = stack_id
result = self.cfn_client.describe_stacks(StackName=stack_id)
outputs = result['Stacks'][0]['Outputs']
try:
self.bucket_name = next((output['OutputValue'] for output in outputs if output['OutputKey'] == BUCKET_OUTPUT_NAME))
except StopIteration:
LOG.debug("Outputs from stack '%s' did not contain '%s':\n%s", stack_id, BUCKET_OUTPUT_NAME, ', '.join((output['OutputKey'] for output in outputs)))
raise InternalError('Required output not found on stack')
result = self.cfn_client.describe_stacks(StackName=stack_id)
outputs = result['Stacks'][0]['Outputs']
try:
self.log_delivery_role_arn = next((output['OutputValue'] for output in outputs if output['OutputKey'] == LOG_DELIVERY_ROLE_ARN_OUTPUT_NAME))
except StopIteration:
LOG.debug("Outputs from stack '%s' did not contain '%s':\n%s", stack_id, LOG_DELIVERY_ROLE_ARN_OUTPUT_NAME, ', '.join((output['OutputKey'] for output in outputs)))
raise InternalError('Required output not found on stack')
timestamp = datetime.utcnow().isoformat(timespec='seconds').replace(':', '-')
key = '{}-{}.zip'.format(file_prefix, timestamp)
LOG.debug("Uploading to '%s/%s'...", self.bucket_name, key)
try:
self.s3_client.upload_fileobj(fileobj, self.bucket_name, key)
except ClientError as e:
LOG.debug('S3 upload resulted in unknown ClientError', exc_info=e)
raise DownstreamError('Failed to upload artifacts to S3') from e
LOG.debug('Upload complete')
return 's3://{0}/{1}'.format(self.bucket_name, key)
|
cloudformation-cli
|
positive
|
def nullable_converter(subtype_str, x):
if x is None:
return None
<DeepExtract>
result = converters.get(subtype_str)
if result is not None:
converter = result
if subtype_str.startswith('DateTime64'):
converter = converters['DateTime64']
if subtype_str.startswith('Decimal'):
converter = converters['Decimal']
if subtype_str.startswith('Nullable('):
subtype_str = EXTRACT_SUBTYPE_RE.match(subtype_str).group(1)
converter = partial(converters['Nullable'], subtype_str)
converter = None
</DeepExtract>
return converter(x) if converter else x
|
def nullable_converter(subtype_str, x):
if x is None:
return None
result = converters.get(subtype_str)
if result is not None:
converter = result
if subtype_str.startswith('DateTime64'):
converter = converters['DateTime64']
if subtype_str.startswith('Decimal'):
converter = converters['Decimal']
if subtype_str.startswith('Nullable('):
subtype_str = EXTRACT_SUBTYPE_RE.match(subtype_str).group(1)
converter = partial(converters['Nullable'], subtype_str)
converter = None
return converter(x) if converter else x
|
clickhouse-sqlalchemy
|
positive
|
def find_children(lintable: Lintable) -> list[Lintable]:
"""Traverse children of a single file or folder."""
if not lintable.path.exists():
return []
playbook_dir = str(lintable.path.parent)
<DeepExtract>
AnsibleCollectionConfig.playbook_paths = os.path.abspath(playbook_dir or os.path.abspath('.'))
</DeepExtract>
add_all_plugin_dirs(playbook_dir or '.')
if lintable.kind == 'role':
playbook_ds = AnsibleMapping({'roles': [{'role': str(lintable.path)}]})
elif lintable.kind not in ('playbook', 'tasks'):
return []
else:
try:
<DeepExtract>
dataloader = DataLoader()
if hasattr(dataloader, 'set_vault_password'):
dataloader.set_vault_password(DEFAULT_VAULT_PASSWORD)
playbook_ds = dataloader.load_from_file(str(lintable.path))
</DeepExtract>
except AnsibleError as exc:
raise SystemExit(exc) from exc
results = []
basedir = os.path.dirname(str(lintable.path))
if isinstance(playbook_ds, str):
raise MatchError(filename=lintable, rule=LoadingFailureRule())
for item in _playbook_items(playbook_ds):
for child in play_children(basedir, item, lintable.kind, playbook_dir):
path_str = str(child.path)
if '$' in path_str or '{{' in path_str:
continue
valid_tokens = []
for token in split_args(path_str):
if '=' in token:
break
valid_tokens.append(token)
path = ' '.join(valid_tokens)
if path != path_str:
child.path = Path(path)
child.name = child.path.name
results.append(child)
return results
|
def find_children(lintable: Lintable) -> list[Lintable]:
"""Traverse children of a single file or folder."""
if not lintable.path.exists():
return []
playbook_dir = str(lintable.path.parent)
AnsibleCollectionConfig.playbook_paths = os.path.abspath(playbook_dir or os.path.abspath('.'))
add_all_plugin_dirs(playbook_dir or '.')
if lintable.kind == 'role':
playbook_ds = AnsibleMapping({'roles': [{'role': str(lintable.path)}]})
elif lintable.kind not in ('playbook', 'tasks'):
return []
else:
try:
dataloader = DataLoader()
if hasattr(dataloader, 'set_vault_password'):
dataloader.set_vault_password(DEFAULT_VAULT_PASSWORD)
playbook_ds = dataloader.load_from_file(str(lintable.path))
except AnsibleError as exc:
raise SystemExit(exc) from exc
results = []
basedir = os.path.dirname(str(lintable.path))
if isinstance(playbook_ds, str):
raise MatchError(filename=lintable, rule=LoadingFailureRule())
for item in _playbook_items(playbook_ds):
for child in play_children(basedir, item, lintable.kind, playbook_dir):
path_str = str(child.path)
if '$' in path_str or '{{' in path_str:
continue
valid_tokens = []
for token in split_args(path_str):
if '=' in token:
break
valid_tokens.append(token)
path = ' '.join(valid_tokens)
if path != path_str:
child.path = Path(path)
child.name = child.path.name
results.append(child)
return results
|
ansible-lint
|
positive
|
@tracker.route('/advisory.json', methods=['GET'])
@tracker.route('/advisory/json', methods=['GET'])
@tracker.route('/advisories.json', methods=['GET'])
@tracker.route('/advisories/json', methods=['GET'])
@json_response
def advisory_json():
<DeepExtract>
entries = db.session.query(Advisory, CVEGroup, CVEGroupPackage).join(CVEGroupPackage, Advisory.group_package).join(CVEGroup, CVEGroupPackage.group).group_by(CVEGroupPackage.id).order_by(Advisory.created.desc()).all()
entries = [{'advisory': advisory, 'group': group, 'package': package} for (advisory, group, package) in entries]
scheduled = list(filter(lambda item: item['advisory'].publication == Publication.scheduled, entries))
scheduled = sorted(scheduled, key=lambda item: item['advisory'].created, reverse=True)
published = list(filter(lambda item: item['advisory'].publication == Publication.published, entries))
published = sorted(published, key=lambda item: item['advisory'].created, reverse=True)
data = {'scheduled': scheduled, 'published': published}
</DeepExtract>
def to_json_data(entry):
advisory = entry['advisory']
group = entry['group']
package = entry['package']
json_entry = OrderedDict()
json_entry['name'] = advisory.id
json_entry['date'] = advisory.created.strftime('%Y-%m-%d')
json_entry['group'] = group.name
json_entry['package'] = package.pkgname
json_entry['severity'] = group.severity.label
json_entry['type'] = advisory.advisory_type
json_entry['reference'] = advisory.reference if advisory.reference else None
return json_entry
return list(map(to_json_data, data['published']))
|
@tracker.route('/advisory.json', methods=['GET'])
@tracker.route('/advisory/json', methods=['GET'])
@tracker.route('/advisories.json', methods=['GET'])
@tracker.route('/advisories/json', methods=['GET'])
@json_response
def advisory_json():
entries = db.session.query(Advisory, CVEGroup, CVEGroupPackage).join(CVEGroupPackage, Advisory.group_package).join(CVEGroup, CVEGroupPackage.group).group_by(CVEGroupPackage.id).order_by(Advisory.created.desc()).all()
entries = [{'advisory': advisory, 'group': group, 'package': package} for (advisory, group, package) in entries]
scheduled = list(filter(lambda item: item['advisory'].publication == Publication.scheduled, entries))
scheduled = sorted(scheduled, key=lambda item: item['advisory'].created, reverse=True)
published = list(filter(lambda item: item['advisory'].publication == Publication.published, entries))
published = sorted(published, key=lambda item: item['advisory'].created, reverse=True)
data = {'scheduled': scheduled, 'published': published}
def to_json_data(entry):
advisory = entry['advisory']
group = entry['group']
package = entry['package']
json_entry = OrderedDict()
json_entry['name'] = advisory.id
json_entry['date'] = advisory.created.strftime('%Y-%m-%d')
json_entry['group'] = group.name
json_entry['package'] = package.pkgname
json_entry['severity'] = group.severity.label
json_entry['type'] = advisory.advisory_type
json_entry['reference'] = advisory.reference if advisory.reference else None
return json_entry
return list(map(to_json_data, data['published']))
|
arch-security-tracker
|
positive
|
@unittest.skipIf(sys.version_info < (3, 0) or platform.system() == 'Windows', 'Unit test compatibility issue on Windows')
def test_filename_unicode_normalization(self):
logger.info(self.getTestHeader(sys._getframe().f_code.co_name))
test_filename = 'Núñez Papers.txt'
test_filename_nfd = unicodedata.normalize('NFD', test_filename)
os.makedirs(j(self.tmpdir, 'unicode-normalization'))
with open(j(self.tmpdir, 'unicode-normalization', test_filename_nfd), 'w') as f:
f.write('This is a test filename written using NFD normalization\n')
bag = bagit.make_bag(self.tmpdir)
bag.save()
self.assertTrue(bag.is_valid())
for m_f in bag.manifest_files():
<DeepExtract>
with bagit.open_text_file(m_f) as f:
contents = f.read()
</DeepExtract>
normalized_bytes = unicodedata.normalize('NFC', contents).encode('utf-8')
with open(m_f, 'wb') as f:
f.write(normalized_bytes)
for alg in bag.algorithms:
bagit._make_tagmanifest_file(alg, bag.path, encoding=bag.encoding)
bag = bagit.BDBag(self.tmpdir)
self.assertTrue(bag.is_valid())
|
@unittest.skipIf(sys.version_info < (3, 0) or platform.system() == 'Windows', 'Unit test compatibility issue on Windows')
def test_filename_unicode_normalization(self):
logger.info(self.getTestHeader(sys._getframe().f_code.co_name))
test_filename = 'Núñez Papers.txt'
test_filename_nfd = unicodedata.normalize('NFD', test_filename)
os.makedirs(j(self.tmpdir, 'unicode-normalization'))
with open(j(self.tmpdir, 'unicode-normalization', test_filename_nfd), 'w') as f:
f.write('This is a test filename written using NFD normalization\n')
bag = bagit.make_bag(self.tmpdir)
bag.save()
self.assertTrue(bag.is_valid())
for m_f in bag.manifest_files():
with bagit.open_text_file(m_f) as f:
contents = f.read()
normalized_bytes = unicodedata.normalize('NFC', contents).encode('utf-8')
with open(m_f, 'wb') as f:
f.write(normalized_bytes)
for alg in bag.algorithms:
bagit._make_tagmanifest_file(alg, bag.path, encoding=bag.encoding)
bag = bagit.BDBag(self.tmpdir)
self.assertTrue(bag.is_valid())
|
bdbag
|
positive
|
def xml2list(fp):
"""
convert VOC XML to a list
:param fp: file path to VOC XML file
:return: [(type,(x1, y1, x2, y2))]
"""
tree = ET.parse(fp)
root = tree.getroot()
objects = root.findall('object')
lst = [mapper(obj) for obj in objects]
<DeepExtract>
cls_map = {}
for obj in lst:
(t, coords) = obj
if t in cls_map:
cls_map[t].append(coords)
else:
cls_map[t] = [coords]
final_list = []
for cls in cls_map:
coords_list = cls_map[cls]
x_groupings = {}
for coords in coords_list:
(xmin, xmax) = (coords[0], coords[2])
(ymin, ymax) = (coords[1], coords[3])
found_f = False
for grouping in x_groupings:
(gxmin, gxmax) = grouping
if gxmin - xtres <= xmin <= gxmin + xtres and gxmax - xtres <= xmax <= gxmax + xtres:
found_f = True
x_groupings[grouping].append(coords)
break
if found_f is False:
x_groupings[xmin, xmax] = [coords]
for grouping in x_groupings:
grouping_coords = x_groupings[grouping]
grouping_coords.sort(key=lambda x: x[1])
curr_ymax = None
merge_list = []
in_merge_list = set()
for (ind, coords) in enumerate(grouping_coords):
if ind == 0:
curr_ymax = coords[3]
curr_ymax_ind = 0
continue
(ymin, ymax) = (coords[1], coords[3])
if ymin < curr_ymax < ymax:
merge_list.append((curr_ymax_ind, ind))
in_merge_list.add(curr_ymax_ind)
in_merge_list.add(ind)
curr_ymax = ymax
curr_ymax_ind = ind
elif ymin > curr_ymax:
curr_ymax = ymax
curr_ymax_ind = ind
if ymax < curr_ymax:
in_merge_list.add(ind)
for merge in merge_list:
(t, b) = merge
t_item = grouping_coords[t]
b_item = grouping_coords[b]
x1 = min(t_item[0], b_item[0])
y1 = t_item[1]
x2 = max(t_item[2], b_item[2])
y2 = b_item[3]
final_list.append((cls, (x1, y1, x2, y2)))
for (ind, coords) in enumerate(grouping_coords):
if ind not in in_merge_list:
final_list.append((cls, coords))
new_lst = final_list
</DeepExtract>
<DeepExtract>
new_objs = []
for obj in new_lst:
(t, coords) = obj
new_coords = (max(coords[0] - feather_x, 0), max(coords[1] - feather_y, 0), min(coords[2] + feather_x, max_x), min(coords[3] + feather_y, max_y))
new_objs.append((t, new_coords))
feathered_new_lst = new_objs
</DeepExtract>
feathered_new_lst.sort(key=lambda x: x[1])
return feathered_new_lst
|
def xml2list(fp):
"""
convert VOC XML to a list
:param fp: file path to VOC XML file
:return: [(type,(x1, y1, x2, y2))]
"""
tree = ET.parse(fp)
root = tree.getroot()
objects = root.findall('object')
lst = [mapper(obj) for obj in objects]
cls_map = {}
for obj in lst:
(t, coords) = obj
if t in cls_map:
cls_map[t].append(coords)
else:
cls_map[t] = [coords]
final_list = []
for cls in cls_map:
coords_list = cls_map[cls]
x_groupings = {}
for coords in coords_list:
(xmin, xmax) = (coords[0], coords[2])
(ymin, ymax) = (coords[1], coords[3])
found_f = False
for grouping in x_groupings:
(gxmin, gxmax) = grouping
if gxmin - xtres <= xmin <= gxmin + xtres and gxmax - xtres <= xmax <= gxmax + xtres:
found_f = True
x_groupings[grouping].append(coords)
break
if found_f is False:
x_groupings[xmin, xmax] = [coords]
for grouping in x_groupings:
grouping_coords = x_groupings[grouping]
grouping_coords.sort(key=lambda x: x[1])
curr_ymax = None
merge_list = []
in_merge_list = set()
for (ind, coords) in enumerate(grouping_coords):
if ind == 0:
curr_ymax = coords[3]
curr_ymax_ind = 0
continue
(ymin, ymax) = (coords[1], coords[3])
if ymin < curr_ymax < ymax:
merge_list.append((curr_ymax_ind, ind))
in_merge_list.add(curr_ymax_ind)
in_merge_list.add(ind)
curr_ymax = ymax
curr_ymax_ind = ind
elif ymin > curr_ymax:
curr_ymax = ymax
curr_ymax_ind = ind
if ymax < curr_ymax:
in_merge_list.add(ind)
for merge in merge_list:
(t, b) = merge
t_item = grouping_coords[t]
b_item = grouping_coords[b]
x1 = min(t_item[0], b_item[0])
y1 = t_item[1]
x2 = max(t_item[2], b_item[2])
y2 = b_item[3]
final_list.append((cls, (x1, y1, x2, y2)))
for (ind, coords) in enumerate(grouping_coords):
if ind not in in_merge_list:
final_list.append((cls, coords))
new_lst = final_list
new_objs = []
for obj in new_lst:
(t, coords) = obj
new_coords = (max(coords[0] - feather_x, 0), max(coords[1] - feather_y, 0), min(coords[2] + feather_x, max_x), min(coords[3] + feather_y, max_y))
new_objs.append((t, new_coords))
feathered_new_lst = new_objs
feathered_new_lst.sort(key=lambda x: x[1])
return feathered_new_lst
|
Cosmos
|
positive
|
def import_3dmigoto_vb_ib(operator, context, paths, flip_texcoord_v=True, axis_forward='-Z', axis_up='Y', pose_cb_off=[0, 0], pose_cb_step=1):
<DeepExtract>
(vb_paths, ib_paths, use_bin, pose_path) = zip(*paths)
pose_path = pose_path[0]
if use_bin[0]:
(vb, ib, name, pose_path) = load_3dmigoto_mesh_bin(operator, vb_paths, ib_paths, pose_path)
vb = VertexBuffer(open(vb_paths[0], 'r'))
for vb_path in vb_paths[1:]:
tmp = VertexBuffer(open(vb_path, 'r'))
vb.merge(tmp)
ib = None
if ib_paths:
ib = IndexBuffer(open(ib_paths[0], 'r'))
for ib_path in ib_paths[1:]:
tmp = IndexBuffer(open(ib_path, 'r'))
ib.merge(tmp)
(vb, ib, name, pose_path) = (vb, ib, os.path.basename(vb_paths[0]), pose_path)
</DeepExtract>
mesh = bpy.data.meshes.new(name)
obj = bpy.data.objects.new(mesh.name, mesh)
global_matrix = axis_conversion(from_forward=axis_forward, from_up=axis_up).to_4x4()
obj.matrix_world = global_matrix
obj['3DMigoto:VBLayout'] = vb.layout.serialise()
obj['3DMigoto:VBStride'] = vb.layout.stride
obj['3DMigoto:FirstVertex'] = vb.first
if ib is not None:
<DeepExtract>
mesh.loops.add(len(ib.faces) * 3)
mesh.polygons.add(len(ib.faces))
mesh.loops.foreach_set('vertex_index', unpack_list(ib.faces))
mesh.polygons.foreach_set('loop_start', [x * 3 for x in range(len(ib.faces))])
mesh.polygons.foreach_set('loop_total', [3] * len(ib.faces))
</DeepExtract>
obj['3DMigoto:IBFormat'] = ib.format
obj['3DMigoto:FirstIndex'] = ib.first
else:
<DeepExtract>
num_faces = len(vb.vertices) // 3
mesh.loops.add(num_faces * 3)
mesh.polygons.add(num_faces)
mesh.loops.foreach_set('vertex_index', [x for x in range(num_faces * 3)])
mesh.polygons.foreach_set('loop_start', [x * 3 for x in range(num_faces)])
mesh.polygons.foreach_set('loop_total', [3] * num_faces)
</DeepExtract>
<DeepExtract>
mesh.vertices.add(len(vb.vertices))
seen_offsets = set()
blend_indices = {}
blend_weights = {}
texcoords = {}
vertex_layers = {}
use_normals = False
for elem in vb.layout:
if elem.InputSlotClass != 'per-vertex':
continue
semantic_translations = {}
translated_elem_name = semantic_translations.get(elem.name, elem.name)
if (elem.InputSlot, elem.AlignedByteOffset) in seen_offsets:
assert translated_elem_name != 'POSITION'
continue
seen_offsets.add((elem.InputSlot, elem.AlignedByteOffset))
data = tuple((x[elem.name] for x in vb.vertices))
if translated_elem_name == 'POSITION':
if len(data[0]) == 4:
if [x[3] for x in data] != [1.0] * len(data):
raise Fatal('Positions are 4D')
print('Positions are 4D, storing W coordinate in POSITION.w vertex layer')
vertex_layers['POSITION.w'] = [[x[3]] for x in data]
positions = [(x[0], x[1], x[2]) for x in data]
mesh.vertices.foreach_set('co', unpack_list(positions))
elif translated_elem_name.startswith('COLOR'):
if len(data[0]) <= 3 or vertex_color_layer_channels == 4:
mesh.vertex_colors.new(name=elem.name)
color_layer = mesh.vertex_colors[elem.name].data
c = vertex_color_layer_channels
for l in mesh.loops:
color_layer[l.index].color = list(data[l.vertex_index]) + [0] * (c - len(data[l.vertex_index]))
else:
mesh.vertex_colors.new(name=elem.name + '.RGB')
mesh.vertex_colors.new(name=elem.name + '.A')
color_layer = mesh.vertex_colors[elem.name + '.RGB'].data
alpha_layer = mesh.vertex_colors[elem.name + '.A'].data
for l in mesh.loops:
color_layer[l.index].color = data[l.vertex_index][:3]
alpha_layer[l.index].color = [data[l.vertex_index][3], 0, 0]
elif translated_elem_name == 'NORMAL':
use_normals = True
import_normals_step1(mesh, data)
elif translated_elem_name in ('TANGENT', 'BINORMAL'):
print('NOTICE: Skipping import of %s in favour of recalculating on export' % elem.name)
elif translated_elem_name.startswith('BLENDINDICES'):
blend_indices[elem.SemanticIndex] = data
elif translated_elem_name.startswith('BLENDWEIGHT'):
blend_weights[elem.SemanticIndex] = data
elif translated_elem_name.startswith('TEXCOORD') and elem.is_float():
texcoords[elem.SemanticIndex] = data
else:
print('NOTICE: Storing unhandled semantic %s %s as vertex layer' % (elem.name, elem.Format))
vertex_layers[elem.name] = data
(blend_indices, blend_weights, texcoords, vertex_layers, use_normals) = (blend_indices, blend_weights, texcoords, vertex_layers, use_normals)
</DeepExtract>
<DeepExtract>
for (texcoord, data) in sorted(texcoords.items()):
dim = len(data[0])
if dim == 4:
components_list = ('xy', 'zw')
elif dim == 2:
components_list = ('xy',)
else:
raise Fatal('Unhandled TEXCOORD dimension: %i' % dim)
cmap = {'x': 0, 'y': 1, 'z': 2, 'w': 3}
for components in components_list:
uv_name = 'TEXCOORD%s.%s' % (texcoord and texcoord or '', components)
if hasattr(mesh, 'uv_textures'):
mesh.uv_textures.new(uv_name)
else:
mesh.uv_layers.new(name=uv_name)
blender_uvs = mesh.uv_layers[uv_name]
if flip_texcoord_v:
flip_uv = lambda uv: (uv[0], 1.0 - uv[1])
obj['3DMigoto:' + uv_name] = {'flip_v': True}
else:
flip_uv = lambda uv: uv
uvs = [[d[cmap[c]] for c in components] for d in data]
for l in mesh.loops:
blender_uvs.data[l.index].uv = flip_uv(uvs[l.vertex_index])
</DeepExtract>
<DeepExtract>
for (element_name, data) in sorted(vertex_layers.items()):
dim = len(data[0])
cmap = {0: 'x', 1: 'y', 2: 'z', 3: 'w'}
for component in range(dim):
if dim != 1 or element_name.find('.') == -1:
layer_name = '%s.%s' % (element_name, cmap[component])
else:
layer_name = element_name
if type(data[0][0]) == int:
mesh.vertex_layers_int.new(name=layer_name)
layer = mesh.vertex_layers_int[layer_name]
for v in mesh.vertices:
val = data[v.index][component]
if val < 2147483648:
layer.data[v.index].value = val
else:
layer.data[v.index].value = struct.unpack('i', struct.pack('I', val))[0]
elif type(data[0][0]) == float:
mesh.vertex_layers_float.new(name=layer_name)
layer = mesh.vertex_layers_float[layer_name]
for v in mesh.vertices:
layer.data[v.index].value = data[v.index][component]
else:
raise Fatal('BUG: Bad layer type %s' % type(data[0][0]))
</DeepExtract>
<DeepExtract>
assert len(blend_indices) == len(blend_weights)
if blend_indices:
num_vertex_groups = max(itertools.chain(*itertools.chain(*blend_indices.values()))) + 1
for i in range(num_vertex_groups):
obj.vertex_groups.new(name=str(i))
for vertex in mesh.vertices:
for semantic_index in sorted(blend_indices.keys()):
for (i, w) in zip(blend_indices[semantic_index][vertex.index], blend_weights[semantic_index][vertex.index]):
if w == 0.0:
continue
obj.vertex_groups[i].add((vertex.index,), w, 'REPLACE')
</DeepExtract>
mesh.validate(verbose=False, clean_customdata=False)
mesh.update()
if use_normals:
<DeepExtract>
clnors = array('f', [0.0] * (len(mesh.loops) * 3))
mesh.loops.foreach_get('normal', clnors)
mesh.polygons.foreach_set('use_smooth', [True] * len(mesh.polygons))
mesh.normals_split_custom_set(tuple(zip(*(iter(clnors),) * 3)))
mesh.use_auto_smooth = True
</DeepExtract>
else:
mesh.calc_normals()
<DeepExtract>
context.scene.collection.objects.link(obj)
</DeepExtract>
<DeepExtract>
obj.select_set(True)
</DeepExtract>
<DeepExtract>
context.view_layer.objects.active = obj
</DeepExtract>
if pose_path is not None:
<DeepExtract>
pose_buffer = ConstantBuffer(open(pose_path, 'r'), *pose_cb_off)
matrices = pose_buffer.as_3x4_matrices()
obj = context.object
if not context.selected_objects:
obj = None
if True and obj:
matrices = matrices[:len(obj.vertex_groups)]
name = os.path.basename(pose_path)
arm_data = bpy.data.armatures.new(name)
arm = bpy.data.objects.new(name, object_data=arm_data)
conversion_matrix = axis_conversion(from_forward=axis_forward, from_up=axis_up).to_4x4()
link_object_to_scene(context, arm)
select_set(arm, True)
set_active_object(context, arm)
bpy.ops.object.mode_set(mode='EDIT')
for (i, matrix) in enumerate(matrices):
bone = arm_data.edit_bones.new(str(i * pose_cb_step))
bone.tail = Vector((0.0, 0.1, 0.0))
bpy.ops.object.mode_set(mode='OBJECT')
for (i, matrix) in enumerate(matrices):
bone = arm.pose.bones[str(i * pose_cb_step)]
matrix.resize_4x4()
bone.matrix_basis = matmul(matmul(conversion_matrix, matrix), conversion_matrix.inverted())
if obj is not None:
mod = obj.modifiers.new(arm.name, 'ARMATURE')
mod.object = arm
obj.parent = arm
hide_set(arm, True)
</DeepExtract>
<DeepExtract>
context.view_layer.objects.active = obj
</DeepExtract>
return obj
|
def import_3dmigoto_vb_ib(operator, context, paths, flip_texcoord_v=True, axis_forward='-Z', axis_up='Y', pose_cb_off=[0, 0], pose_cb_step=1):
(vb_paths, ib_paths, use_bin, pose_path) = zip(*paths)
pose_path = pose_path[0]
if use_bin[0]:
(vb, ib, name, pose_path) = load_3dmigoto_mesh_bin(operator, vb_paths, ib_paths, pose_path)
vb = VertexBuffer(open(vb_paths[0], 'r'))
for vb_path in vb_paths[1:]:
tmp = VertexBuffer(open(vb_path, 'r'))
vb.merge(tmp)
ib = None
if ib_paths:
ib = IndexBuffer(open(ib_paths[0], 'r'))
for ib_path in ib_paths[1:]:
tmp = IndexBuffer(open(ib_path, 'r'))
ib.merge(tmp)
(vb, ib, name, pose_path) = (vb, ib, os.path.basename(vb_paths[0]), pose_path)
mesh = bpy.data.meshes.new(name)
obj = bpy.data.objects.new(mesh.name, mesh)
global_matrix = axis_conversion(from_forward=axis_forward, from_up=axis_up).to_4x4()
obj.matrix_world = global_matrix
obj['3DMigoto:VBLayout'] = vb.layout.serialise()
obj['3DMigoto:VBStride'] = vb.layout.stride
obj['3DMigoto:FirstVertex'] = vb.first
if ib is not None:
mesh.loops.add(len(ib.faces) * 3)
mesh.polygons.add(len(ib.faces))
mesh.loops.foreach_set('vertex_index', unpack_list(ib.faces))
mesh.polygons.foreach_set('loop_start', [x * 3 for x in range(len(ib.faces))])
mesh.polygons.foreach_set('loop_total', [3] * len(ib.faces))
obj['3DMigoto:IBFormat'] = ib.format
obj['3DMigoto:FirstIndex'] = ib.first
else:
num_faces = len(vb.vertices) // 3
mesh.loops.add(num_faces * 3)
mesh.polygons.add(num_faces)
mesh.loops.foreach_set('vertex_index', [x for x in range(num_faces * 3)])
mesh.polygons.foreach_set('loop_start', [x * 3 for x in range(num_faces)])
mesh.polygons.foreach_set('loop_total', [3] * num_faces)
mesh.vertices.add(len(vb.vertices))
seen_offsets = set()
blend_indices = {}
blend_weights = {}
texcoords = {}
vertex_layers = {}
use_normals = False
for elem in vb.layout:
if elem.InputSlotClass != 'per-vertex':
continue
semantic_translations = {}
translated_elem_name = semantic_translations.get(elem.name, elem.name)
if (elem.InputSlot, elem.AlignedByteOffset) in seen_offsets:
assert translated_elem_name != 'POSITION'
continue
seen_offsets.add((elem.InputSlot, elem.AlignedByteOffset))
data = tuple((x[elem.name] for x in vb.vertices))
if translated_elem_name == 'POSITION':
if len(data[0]) == 4:
if [x[3] for x in data] != [1.0] * len(data):
raise Fatal('Positions are 4D')
print('Positions are 4D, storing W coordinate in POSITION.w vertex layer')
vertex_layers['POSITION.w'] = [[x[3]] for x in data]
positions = [(x[0], x[1], x[2]) for x in data]
mesh.vertices.foreach_set('co', unpack_list(positions))
elif translated_elem_name.startswith('COLOR'):
if len(data[0]) <= 3 or vertex_color_layer_channels == 4:
mesh.vertex_colors.new(name=elem.name)
color_layer = mesh.vertex_colors[elem.name].data
c = vertex_color_layer_channels
for l in mesh.loops:
color_layer[l.index].color = list(data[l.vertex_index]) + [0] * (c - len(data[l.vertex_index]))
else:
mesh.vertex_colors.new(name=elem.name + '.RGB')
mesh.vertex_colors.new(name=elem.name + '.A')
color_layer = mesh.vertex_colors[elem.name + '.RGB'].data
alpha_layer = mesh.vertex_colors[elem.name + '.A'].data
for l in mesh.loops:
color_layer[l.index].color = data[l.vertex_index][:3]
alpha_layer[l.index].color = [data[l.vertex_index][3], 0, 0]
elif translated_elem_name == 'NORMAL':
use_normals = True
import_normals_step1(mesh, data)
elif translated_elem_name in ('TANGENT', 'BINORMAL'):
print('NOTICE: Skipping import of %s in favour of recalculating on export' % elem.name)
elif translated_elem_name.startswith('BLENDINDICES'):
blend_indices[elem.SemanticIndex] = data
elif translated_elem_name.startswith('BLENDWEIGHT'):
blend_weights[elem.SemanticIndex] = data
elif translated_elem_name.startswith('TEXCOORD') and elem.is_float():
texcoords[elem.SemanticIndex] = data
else:
print('NOTICE: Storing unhandled semantic %s %s as vertex layer' % (elem.name, elem.Format))
vertex_layers[elem.name] = data
(blend_indices, blend_weights, texcoords, vertex_layers, use_normals) = (blend_indices, blend_weights, texcoords, vertex_layers, use_normals)
for (texcoord, data) in sorted(texcoords.items()):
dim = len(data[0])
if dim == 4:
components_list = ('xy', 'zw')
elif dim == 2:
components_list = ('xy',)
else:
raise Fatal('Unhandled TEXCOORD dimension: %i' % dim)
cmap = {'x': 0, 'y': 1, 'z': 2, 'w': 3}
for components in components_list:
uv_name = 'TEXCOORD%s.%s' % (texcoord and texcoord or '', components)
if hasattr(mesh, 'uv_textures'):
mesh.uv_textures.new(uv_name)
else:
mesh.uv_layers.new(name=uv_name)
blender_uvs = mesh.uv_layers[uv_name]
if flip_texcoord_v:
flip_uv = lambda uv: (uv[0], 1.0 - uv[1])
obj['3DMigoto:' + uv_name] = {'flip_v': True}
else:
flip_uv = lambda uv: uv
uvs = [[d[cmap[c]] for c in components] for d in data]
for l in mesh.loops:
blender_uvs.data[l.index].uv = flip_uv(uvs[l.vertex_index])
for (element_name, data) in sorted(vertex_layers.items()):
dim = len(data[0])
cmap = {0: 'x', 1: 'y', 2: 'z', 3: 'w'}
for component in range(dim):
if dim != 1 or element_name.find('.') == -1:
layer_name = '%s.%s' % (element_name, cmap[component])
else:
layer_name = element_name
if type(data[0][0]) == int:
mesh.vertex_layers_int.new(name=layer_name)
layer = mesh.vertex_layers_int[layer_name]
for v in mesh.vertices:
val = data[v.index][component]
if val < 2147483648:
layer.data[v.index].value = val
else:
layer.data[v.index].value = struct.unpack('i', struct.pack('I', val))[0]
elif type(data[0][0]) == float:
mesh.vertex_layers_float.new(name=layer_name)
layer = mesh.vertex_layers_float[layer_name]
for v in mesh.vertices:
layer.data[v.index].value = data[v.index][component]
else:
raise Fatal('BUG: Bad layer type %s' % type(data[0][0]))
assert len(blend_indices) == len(blend_weights)
if blend_indices:
num_vertex_groups = max(itertools.chain(*itertools.chain(*blend_indices.values()))) + 1
for i in range(num_vertex_groups):
obj.vertex_groups.new(name=str(i))
for vertex in mesh.vertices:
for semantic_index in sorted(blend_indices.keys()):
for (i, w) in zip(blend_indices[semantic_index][vertex.index], blend_weights[semantic_index][vertex.index]):
if w == 0.0:
continue
obj.vertex_groups[i].add((vertex.index,), w, 'REPLACE')
mesh.validate(verbose=False, clean_customdata=False)
mesh.update()
if use_normals:
clnors = array('f', [0.0] * (len(mesh.loops) * 3))
mesh.loops.foreach_get('normal', clnors)
mesh.polygons.foreach_set('use_smooth', [True] * len(mesh.polygons))
mesh.normals_split_custom_set(tuple(zip(*(iter(clnors),) * 3)))
mesh.use_auto_smooth = True
else:
mesh.calc_normals()
context.scene.collection.objects.link(obj)
obj.select_set(True)
context.view_layer.objects.active = obj
if pose_path is not None:
pose_buffer = ConstantBuffer(open(pose_path, 'r'), *pose_cb_off)
matrices = pose_buffer.as_3x4_matrices()
obj = context.object
if not context.selected_objects:
obj = None
if True and obj:
matrices = matrices[:len(obj.vertex_groups)]
name = os.path.basename(pose_path)
arm_data = bpy.data.armatures.new(name)
arm = bpy.data.objects.new(name, object_data=arm_data)
conversion_matrix = axis_conversion(from_forward=axis_forward, from_up=axis_up).to_4x4()
link_object_to_scene(context, arm)
select_set(arm, True)
set_active_object(context, arm)
bpy.ops.object.mode_set(mode='EDIT')
for (i, matrix) in enumerate(matrices):
bone = arm_data.edit_bones.new(str(i * pose_cb_step))
bone.tail = Vector((0.0, 0.1, 0.0))
bpy.ops.object.mode_set(mode='OBJECT')
for (i, matrix) in enumerate(matrices):
bone = arm.pose.bones[str(i * pose_cb_step)]
matrix.resize_4x4()
bone.matrix_basis = matmul(matmul(conversion_matrix, matrix), conversion_matrix.inverted())
if obj is not None:
mod = obj.modifiers.new(arm.name, 'ARMATURE')
mod.object = arm
obj.parent = arm
hide_set(arm, True)
context.view_layer.objects.active = obj
return obj
|
3d-fixes
|
positive
|
def print_begin(argv, args):
script = os.path.realpath(argv[0])
system = _user_mode and ' --user' or ' --system'
init = _init and ' --init' or ''
logg.info('EXEC BEGIN %s %s%s%s', script, ' '.join(args), system, init)
if _root and (not is_good_root(_root)):
<DeepExtract>
if not _root:
root44 = '<none>'
x = _root.find('/', 8)
if len(_root) <= 40:
if '/' not in _root:
root44 = '.../' + _root
elif len(_root) <= 44:
root44 = _root
if 0 < x and x < 14:
out = _root[:x + 1]
out += '...'
else:
out = _root[:10]
out += '...'
remain = len(_root) - len(out)
y = _root.find('/', remain)
if 0 < y and y < remain + 5:
out += _root[y:]
else:
out += _root[remain:]
root44 = out
</DeepExtract>
logg.warning('the --root=%s should have alteast three levels /tmp/test_123/root', root44)
|
def print_begin(argv, args):
script = os.path.realpath(argv[0])
system = _user_mode and ' --user' or ' --system'
init = _init and ' --init' or ''
logg.info('EXEC BEGIN %s %s%s%s', script, ' '.join(args), system, init)
if _root and (not is_good_root(_root)):
if not _root:
root44 = '<none>'
x = _root.find('/', 8)
if len(_root) <= 40:
if '/' not in _root:
root44 = '.../' + _root
elif len(_root) <= 44:
root44 = _root
if 0 < x and x < 14:
out = _root[:x + 1]
out += '...'
else:
out = _root[:10]
out += '...'
remain = len(_root) - len(out)
y = _root.find('/', remain)
if 0 < y and y < remain + 5:
out += _root[y:]
else:
out += _root[remain:]
root44 = out
logg.warning('the --root=%s should have alteast three levels /tmp/test_123/root', root44)
|
docker-systemctl-images
|
positive
|
def _on_topmost_changed(self, program, property_spec):
if self._mode == self.AUTOMATIC:
if program.get_is_topmost():
<DeepExtract>
rpc = osso.Rpc(self._osso_context)
rpc.rpc_run(self._MCE_SERVICE, self._MCE_REQUEST_PATH, self._MCE_REQUEST_IF, self._ENABLE_ACCEL, use_system_bus=True)
</DeepExtract>
else:
<DeepExtract>
rpc = osso.Rpc(self._osso_context)
rpc.rpc_run(self._MCE_SERVICE, self._MCE_REQUEST_PATH, self._MCE_REQUEST_IF, self._DISABLE_ACCEL, use_system_bus=True)
</DeepExtract>
|
def _on_topmost_changed(self, program, property_spec):
if self._mode == self.AUTOMATIC:
if program.get_is_topmost():
rpc = osso.Rpc(self._osso_context)
rpc.rpc_run(self._MCE_SERVICE, self._MCE_REQUEST_PATH, self._MCE_REQUEST_IF, self._ENABLE_ACCEL, use_system_bus=True)
else:
rpc = osso.Rpc(self._osso_context)
rpc.rpc_run(self._MCE_SERVICE, self._MCE_REQUEST_PATH, self._MCE_REQUEST_IF, self._DISABLE_ACCEL, use_system_bus=True)
</DeepExtract>
|
advancedcaching
|
positive
|
def build_graph(self, state, last_cards, passive_decision_target, passive_bomb_target, passive_response_target, active_decision_target, active_response_target, seq_length_target, minor_response_target, minor_type, mode):
<DeepExtract>
with tf.variable_scope(SCOPE):
with slim.arg_scope([slim.fully_connected, slim.conv2d], weights_regularizer=slim.l2_regularizer(WEIGHT_DECAY)):
with tf.variable_scope('branch_main'):
flattened_1 = conv_block(state[:, :60], 32, INPUT_DIM // 3, [[16, 32, 5, 'identity'], [16, 32, 5, 'identity'], [32, 128, 5, 'downsampling'], [32, 128, 5, 'identity'], [32, 128, 5, 'identity'], [64, 256, 5, 'downsampling'], [64, 256, 3, 'identity'], [64, 256, 3, 'identity']], 'branch_main1')
flattened_2 = conv_block(state[:, 60:120], 32, INPUT_DIM // 3, [[16, 32, 5, 'identity'], [16, 32, 5, 'identity'], [32, 128, 5, 'downsampling'], [32, 128, 5, 'identity'], [32, 128, 5, 'identity'], [64, 256, 5, 'downsampling'], [64, 256, 3, 'identity'], [64, 256, 3, 'identity']], 'branch_main2')
flattened_3 = conv_block(state[:, 120:], 32, INPUT_DIM // 3, [[16, 32, 5, 'identity'], [16, 32, 5, 'identity'], [32, 128, 5, 'downsampling'], [32, 128, 5, 'identity'], [32, 128, 5, 'identity'], [64, 256, 5, 'downsampling'], [64, 256, 3, 'identity'], [64, 256, 3, 'identity']], 'branch_main3')
flattened = tf.concat([flattened_1, flattened_2, flattened_3], axis=1)
with tf.variable_scope('branch_passive'):
flattened_last = conv_block(last_cards, 32, LAST_INPUT_DIM, [[16, 32, 5, 'identity'], [16, 32, 5, 'identity'], [32, 128, 5, 'downsampling'], [32, 128, 5, 'identity'], [32, 128, 5, 'identity'], [64, 256, 5, 'downsampling'], [64, 256, 3, 'identity'], [64, 256, 3, 'identity']], 'last_cards')
with tf.variable_scope('decision'):
attention_decision = slim.fully_connected(inputs=flattened_last, num_outputs=256, activation_fn=tf.nn.sigmoid)
fc_passive_decision = slim.fully_connected(inputs=flattened, num_outputs=256, activation_fn=tf.nn.relu)
fc_passive_decision = fc_passive_decision * attention_decision
fc_passive_decision = slim.fully_connected(inputs=fc_passive_decision, num_outputs=64, activation_fn=tf.nn.relu)
passive_decision_logits = slim.fully_connected(inputs=fc_passive_decision, num_outputs=4, activation_fn=None)
with tf.variable_scope('bomb'):
fc_passive_bomb = slim.fully_connected(inputs=flattened, num_outputs=256, activation_fn=tf.nn.relu)
fc_passive_bomb = slim.fully_connected(inputs=fc_passive_bomb, num_outputs=64, activation_fn=tf.nn.relu)
passive_bomb_logits = slim.fully_connected(inputs=fc_passive_bomb, num_outputs=13, activation_fn=None)
with tf.variable_scope('response'):
attention_response = slim.fully_connected(inputs=flattened_last, num_outputs=256, activation_fn=tf.nn.sigmoid)
fc_passive_response = slim.fully_connected(inputs=flattened, num_outputs=256, activation_fn=tf.nn.relu)
fc_passive_response = fc_passive_response * attention_response
fc_passive_response = slim.fully_connected(inputs=fc_passive_response, num_outputs=64, activation_fn=tf.nn.relu)
passive_response_logits = slim.fully_connected(inputs=fc_passive_response, num_outputs=15, activation_fn=None)
with tf.variable_scope('branch_active'):
hidden_size = 256
lstm_active = rnn.BasicLSTMCell(num_units=hidden_size, state_is_tuple=True)
with tf.variable_scope('decision'):
fc_active_decision = slim.fully_connected(inputs=flattened, num_outputs=256, activation_fn=tf.nn.relu)
(lstm_active_decision_output, hidden_active_output) = tf.nn.dynamic_rnn(lstm_active, tf.expand_dims(fc_active_decision, 1), initial_state=lstm_active.zero_state(tf.shape(fc_active_decision)[0], dtype=tf.float32), sequence_length=tf.ones([tf.shape(state)[0]]))
fc_active_decision = slim.fully_connected(inputs=tf.squeeze(lstm_active_decision_output, axis=[1]), num_outputs=64, activation_fn=tf.nn.relu)
active_decision_logits = slim.fully_connected(inputs=fc_active_decision, num_outputs=13, activation_fn=None)
with tf.variable_scope('response'):
fc_active_response = slim.fully_connected(inputs=flattened, num_outputs=256, activation_fn=tf.nn.relu)
(lstm_active_response_output, hidden_active_output) = tf.nn.dynamic_rnn(lstm_active, tf.expand_dims(fc_active_response, 1), initial_state=hidden_active_output, sequence_length=tf.ones([tf.shape(state)[0]]))
fc_active_response = slim.fully_connected(inputs=tf.squeeze(lstm_active_response_output, axis=[1]), num_outputs=64, activation_fn=tf.nn.relu)
active_response_logits = slim.fully_connected(inputs=fc_active_response, num_outputs=15, activation_fn=None)
with tf.variable_scope('seq_length'):
fc_active_seq = slim.fully_connected(inputs=flattened, num_outputs=256, activation_fn=tf.nn.relu)
(lstm_active_seq_output, _) = tf.nn.dynamic_rnn(lstm_active, tf.expand_dims(fc_active_seq, 1), initial_state=hidden_active_output, sequence_length=tf.ones([tf.shape(state)[0]]))
fc_active_seq = slim.fully_connected(inputs=tf.squeeze(lstm_active_seq_output, axis=[1]), num_outputs=64, activation_fn=tf.nn.relu)
active_seq_logits = slim.fully_connected(inputs=fc_active_seq, num_outputs=12, activation_fn=None)
with tf.variable_scope('branch_minor'):
fc_minor = slim.fully_connected(inputs=flattened, num_outputs=256, activation_fn=tf.nn.relu)
minor_type_embedding = slim.fully_connected(inputs=tf.one_hot(minor_type, 2), num_outputs=256, activation_fn=tf.nn.sigmoid)
fc_minor = fc_minor * minor_type_embedding
fc_minor = slim.fully_connected(inputs=fc_minor, num_outputs=64, activation_fn=tf.nn.relu)
minor_response_logits = slim.fully_connected(inputs=fc_minor, num_outputs=15, activation_fn=None)
(passive_decision_logits, passive_bomb_logits, passive_response_logits, active_decision_logits, active_response_logits, active_seq_logits, minor_response_logits) = (passive_decision_logits, passive_bomb_logits, passive_response_logits, active_decision_logits, active_response_logits, active_seq_logits, minor_response_logits)
</DeepExtract>
passive_decision_prob = tf.nn.softmax(passive_decision_logits, name='passive_decision_prob')
passive_bomb_prob = tf.nn.softmax(passive_bomb_logits, name='passive_bomb_prob')
passive_response_prob = tf.nn.softmax(passive_response_logits, name='passive_response_prob')
active_decision_prob = tf.nn.softmax(active_decision_logits, name='active_decision_prob')
active_response_prob = tf.nn.softmax(active_response_logits, name='active_response_prob')
active_seq_prob = tf.nn.softmax(active_seq_logits, name='active_seq_prob')
minor_response_prob = tf.nn.softmax(minor_response_logits, name='minor_response_prob')
is_training = get_current_tower_context().is_training
if not is_training:
return
with tf.variable_scope('passive_mode_loss'):
passive_decision_loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf.one_hot(passive_decision_target, 4), logits=passive_decision_logits)
passive_bomb_loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf.one_hot(passive_bomb_target, 13), logits=passive_bomb_logits)
passive_response_loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf.one_hot(passive_response_target, 15), logits=passive_response_logits)
with tf.variable_scope('active_mode_loss'):
active_decision_loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf.one_hot(active_decision_target, 13), logits=active_decision_logits)
active_response_loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf.one_hot(active_response_target, 15), logits=active_response_logits)
active_seq_loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf.one_hot(seq_length_target, 12), logits=active_seq_logits)
with tf.variable_scope('minor_mode_loss'):
minor_loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf.one_hot(minor_response_target, 15), logits=minor_response_logits)
ctx = get_current_tower_context()
if ctx.has_own_variables:
l2_loss = ctx.get_collection_in_tower(tf.GraphKeys.REGULARIZATION_LOSSES)
else:
l2_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if len(l2_loss) > 0:
logger.info('regularize_cost_from_collection() found {} regularizers in REGULARIZATION_LOSSES collection.'.format(len(l2_loss)))
l2_main_loss = [l for l in l2_loss if 'branch_main' in l.name]
l2_passive_fc_loss = [l for l in l2_loss if 'branch_passive' in l.name and 'decision' not in l.name and ('bomb' not in l.name) and ('response' not in l.name)]
l2_active_fc_loss = [l for l in l2_loss if 'branch_active' in l.name and 'decision' not in l.name and ('response' not in l.name) and ('seq_length' not in l.name)] + [WEIGHT_DECAY * tf.nn.l2_loss(tf.get_default_graph().get_tensor_by_name(SCOPE + '/branch_active/decision/rnn/basic_lstm_cell/kernel:0'))]
print('l2 loss', len(l2_loss))
print('l2 main loss', len(l2_main_loss))
print('l2 passive fc loss', len(l2_passive_fc_loss))
print('l2 active fc loss', len(l2_active_fc_loss))
name_scopes = ['branch_passive/decision', 'branch_passive/bomb', 'branch_passive/response', 'branch_active/decision', 'branch_active/response', 'branch_active/seq_length', 'branch_minor']
losses = [passive_decision_loss, passive_bomb_loss, passive_response_loss, active_decision_loss, active_response_loss, active_seq_loss, minor_loss]
for (i, name) in enumerate(name_scopes):
l2_branch_loss = l2_main_loss.copy()
if 'passive' in name:
if 'bomb' in name:
l2_branch_loss += [l for l in l2_loss if name in l.name]
else:
l2_branch_loss += l2_passive_fc_loss + [l for l in l2_loss if name in l.name]
elif 'minor' in name:
l2_branch_loss += l2_active_fc_loss[:-1] + [l for l in l2_loss if name in l.name]
else:
l2_branch_loss += l2_active_fc_loss + [l for l in l2_loss if name in l.name]
losses[i] += tf.add_n(l2_branch_loss)
print('losses shape', losses[i].shape)
print('l2 branch loss', len(l2_branch_loss))
losses = tf.stack(losses, axis=1)
idx = tf.stack([tf.range(0, tf.shape(state)[0]), mode], axis=1)
loss = tf.gather_nd(losses, idx)
print(loss.shape)
loss = tf.reduce_mean(loss, name='loss')
add_moving_summary(loss, decay=0.1)
return loss
|
def build_graph(self, state, last_cards, passive_decision_target, passive_bomb_target, passive_response_target, active_decision_target, active_response_target, seq_length_target, minor_response_target, minor_type, mode):
with tf.variable_scope(SCOPE):
with slim.arg_scope([slim.fully_connected, slim.conv2d], weights_regularizer=slim.l2_regularizer(WEIGHT_DECAY)):
with tf.variable_scope('branch_main'):
flattened_1 = conv_block(state[:, :60], 32, INPUT_DIM // 3, [[16, 32, 5, 'identity'], [16, 32, 5, 'identity'], [32, 128, 5, 'downsampling'], [32, 128, 5, 'identity'], [32, 128, 5, 'identity'], [64, 256, 5, 'downsampling'], [64, 256, 3, 'identity'], [64, 256, 3, 'identity']], 'branch_main1')
flattened_2 = conv_block(state[:, 60:120], 32, INPUT_DIM // 3, [[16, 32, 5, 'identity'], [16, 32, 5, 'identity'], [32, 128, 5, 'downsampling'], [32, 128, 5, 'identity'], [32, 128, 5, 'identity'], [64, 256, 5, 'downsampling'], [64, 256, 3, 'identity'], [64, 256, 3, 'identity']], 'branch_main2')
flattened_3 = conv_block(state[:, 120:], 32, INPUT_DIM // 3, [[16, 32, 5, 'identity'], [16, 32, 5, 'identity'], [32, 128, 5, 'downsampling'], [32, 128, 5, 'identity'], [32, 128, 5, 'identity'], [64, 256, 5, 'downsampling'], [64, 256, 3, 'identity'], [64, 256, 3, 'identity']], 'branch_main3')
flattened = tf.concat([flattened_1, flattened_2, flattened_3], axis=1)
with tf.variable_scope('branch_passive'):
flattened_last = conv_block(last_cards, 32, LAST_INPUT_DIM, [[16, 32, 5, 'identity'], [16, 32, 5, 'identity'], [32, 128, 5, 'downsampling'], [32, 128, 5, 'identity'], [32, 128, 5, 'identity'], [64, 256, 5, 'downsampling'], [64, 256, 3, 'identity'], [64, 256, 3, 'identity']], 'last_cards')
with tf.variable_scope('decision'):
attention_decision = slim.fully_connected(inputs=flattened_last, num_outputs=256, activation_fn=tf.nn.sigmoid)
fc_passive_decision = slim.fully_connected(inputs=flattened, num_outputs=256, activation_fn=tf.nn.relu)
fc_passive_decision = fc_passive_decision * attention_decision
fc_passive_decision = slim.fully_connected(inputs=fc_passive_decision, num_outputs=64, activation_fn=tf.nn.relu)
passive_decision_logits = slim.fully_connected(inputs=fc_passive_decision, num_outputs=4, activation_fn=None)
with tf.variable_scope('bomb'):
fc_passive_bomb = slim.fully_connected(inputs=flattened, num_outputs=256, activation_fn=tf.nn.relu)
fc_passive_bomb = slim.fully_connected(inputs=fc_passive_bomb, num_outputs=64, activation_fn=tf.nn.relu)
passive_bomb_logits = slim.fully_connected(inputs=fc_passive_bomb, num_outputs=13, activation_fn=None)
with tf.variable_scope('response'):
attention_response = slim.fully_connected(inputs=flattened_last, num_outputs=256, activation_fn=tf.nn.sigmoid)
fc_passive_response = slim.fully_connected(inputs=flattened, num_outputs=256, activation_fn=tf.nn.relu)
fc_passive_response = fc_passive_response * attention_response
fc_passive_response = slim.fully_connected(inputs=fc_passive_response, num_outputs=64, activation_fn=tf.nn.relu)
passive_response_logits = slim.fully_connected(inputs=fc_passive_response, num_outputs=15, activation_fn=None)
with tf.variable_scope('branch_active'):
hidden_size = 256
lstm_active = rnn.BasicLSTMCell(num_units=hidden_size, state_is_tuple=True)
with tf.variable_scope('decision'):
fc_active_decision = slim.fully_connected(inputs=flattened, num_outputs=256, activation_fn=tf.nn.relu)
(lstm_active_decision_output, hidden_active_output) = tf.nn.dynamic_rnn(lstm_active, tf.expand_dims(fc_active_decision, 1), initial_state=lstm_active.zero_state(tf.shape(fc_active_decision)[0], dtype=tf.float32), sequence_length=tf.ones([tf.shape(state)[0]]))
fc_active_decision = slim.fully_connected(inputs=tf.squeeze(lstm_active_decision_output, axis=[1]), num_outputs=64, activation_fn=tf.nn.relu)
active_decision_logits = slim.fully_connected(inputs=fc_active_decision, num_outputs=13, activation_fn=None)
with tf.variable_scope('response'):
fc_active_response = slim.fully_connected(inputs=flattened, num_outputs=256, activation_fn=tf.nn.relu)
(lstm_active_response_output, hidden_active_output) = tf.nn.dynamic_rnn(lstm_active, tf.expand_dims(fc_active_response, 1), initial_state=hidden_active_output, sequence_length=tf.ones([tf.shape(state)[0]]))
fc_active_response = slim.fully_connected(inputs=tf.squeeze(lstm_active_response_output, axis=[1]), num_outputs=64, activation_fn=tf.nn.relu)
active_response_logits = slim.fully_connected(inputs=fc_active_response, num_outputs=15, activation_fn=None)
with tf.variable_scope('seq_length'):
fc_active_seq = slim.fully_connected(inputs=flattened, num_outputs=256, activation_fn=tf.nn.relu)
(lstm_active_seq_output, _) = tf.nn.dynamic_rnn(lstm_active, tf.expand_dims(fc_active_seq, 1), initial_state=hidden_active_output, sequence_length=tf.ones([tf.shape(state)[0]]))
fc_active_seq = slim.fully_connected(inputs=tf.squeeze(lstm_active_seq_output, axis=[1]), num_outputs=64, activation_fn=tf.nn.relu)
active_seq_logits = slim.fully_connected(inputs=fc_active_seq, num_outputs=12, activation_fn=None)
with tf.variable_scope('branch_minor'):
fc_minor = slim.fully_connected(inputs=flattened, num_outputs=256, activation_fn=tf.nn.relu)
minor_type_embedding = slim.fully_connected(inputs=tf.one_hot(minor_type, 2), num_outputs=256, activation_fn=tf.nn.sigmoid)
fc_minor = fc_minor * minor_type_embedding
fc_minor = slim.fully_connected(inputs=fc_minor, num_outputs=64, activation_fn=tf.nn.relu)
minor_response_logits = slim.fully_connected(inputs=fc_minor, num_outputs=15, activation_fn=None)
(passive_decision_logits, passive_bomb_logits, passive_response_logits, active_decision_logits, active_response_logits, active_seq_logits, minor_response_logits) = (passive_decision_logits, passive_bomb_logits, passive_response_logits, active_decision_logits, active_response_logits, active_seq_logits, minor_response_logits)
passive_decision_prob = tf.nn.softmax(passive_decision_logits, name='passive_decision_prob')
passive_bomb_prob = tf.nn.softmax(passive_bomb_logits, name='passive_bomb_prob')
passive_response_prob = tf.nn.softmax(passive_response_logits, name='passive_response_prob')
active_decision_prob = tf.nn.softmax(active_decision_logits, name='active_decision_prob')
active_response_prob = tf.nn.softmax(active_response_logits, name='active_response_prob')
active_seq_prob = tf.nn.softmax(active_seq_logits, name='active_seq_prob')
minor_response_prob = tf.nn.softmax(minor_response_logits, name='minor_response_prob')
is_training = get_current_tower_context().is_training
if not is_training:
return
with tf.variable_scope('passive_mode_loss'):
passive_decision_loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf.one_hot(passive_decision_target, 4), logits=passive_decision_logits)
passive_bomb_loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf.one_hot(passive_bomb_target, 13), logits=passive_bomb_logits)
passive_response_loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf.one_hot(passive_response_target, 15), logits=passive_response_logits)
with tf.variable_scope('active_mode_loss'):
active_decision_loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf.one_hot(active_decision_target, 13), logits=active_decision_logits)
active_response_loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf.one_hot(active_response_target, 15), logits=active_response_logits)
active_seq_loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf.one_hot(seq_length_target, 12), logits=active_seq_logits)
with tf.variable_scope('minor_mode_loss'):
minor_loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf.one_hot(minor_response_target, 15), logits=minor_response_logits)
ctx = get_current_tower_context()
if ctx.has_own_variables:
l2_loss = ctx.get_collection_in_tower(tf.GraphKeys.REGULARIZATION_LOSSES)
else:
l2_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if len(l2_loss) > 0:
logger.info('regularize_cost_from_collection() found {} regularizers in REGULARIZATION_LOSSES collection.'.format(len(l2_loss)))
l2_main_loss = [l for l in l2_loss if 'branch_main' in l.name]
l2_passive_fc_loss = [l for l in l2_loss if 'branch_passive' in l.name and 'decision' not in l.name and ('bomb' not in l.name) and ('response' not in l.name)]
l2_active_fc_loss = [l for l in l2_loss if 'branch_active' in l.name and 'decision' not in l.name and ('response' not in l.name) and ('seq_length' not in l.name)] + [WEIGHT_DECAY * tf.nn.l2_loss(tf.get_default_graph().get_tensor_by_name(SCOPE + '/branch_active/decision/rnn/basic_lstm_cell/kernel:0'))]
print('l2 loss', len(l2_loss))
print('l2 main loss', len(l2_main_loss))
print('l2 passive fc loss', len(l2_passive_fc_loss))
print('l2 active fc loss', len(l2_active_fc_loss))
name_scopes = ['branch_passive/decision', 'branch_passive/bomb', 'branch_passive/response', 'branch_active/decision', 'branch_active/response', 'branch_active/seq_length', 'branch_minor']
losses = [passive_decision_loss, passive_bomb_loss, passive_response_loss, active_decision_loss, active_response_loss, active_seq_loss, minor_loss]
for (i, name) in enumerate(name_scopes):
l2_branch_loss = l2_main_loss.copy()
if 'passive' in name:
if 'bomb' in name:
l2_branch_loss += [l for l in l2_loss if name in l.name]
else:
l2_branch_loss += l2_passive_fc_loss + [l for l in l2_loss if name in l.name]
elif 'minor' in name:
l2_branch_loss += l2_active_fc_loss[:-1] + [l for l in l2_loss if name in l.name]
else:
l2_branch_loss += l2_active_fc_loss + [l for l in l2_loss if name in l.name]
losses[i] += tf.add_n(l2_branch_loss)
print('losses shape', losses[i].shape)
print('l2 branch loss', len(l2_branch_loss))
losses = tf.stack(losses, axis=1)
idx = tf.stack([tf.range(0, tf.shape(state)[0]), mode], axis=1)
loss = tf.gather_nd(losses, idx)
print(loss.shape)
loss = tf.reduce_mean(loss, name='loss')
add_moving_summary(loss, decay=0.1)
return loss
|
doudizhu-C
|
positive
|
def get_query(self, query):
"""Run a generic issue/PR query"""
<DeepExtract>
if self.host == 'github.com':
baseurl = 'https://api.github.com'
else:
baseurl = f'https://{self.host}/api/v3'
url = baseurl + '/search/issues?q={query}&per_page=100'.format(**context)
</DeepExtract>
return self._getter(url, subkey='items')
|
def get_query(self, query):
"""Run a generic issue/PR query"""
if self.host == 'github.com':
baseurl = 'https://api.github.com'
else:
baseurl = f'https://{self.host}/api/v3'
url = baseurl + '/search/issues?q={query}&per_page=100'.format(**context)
return self._getter(url, subkey='items')
|
bugwarrior
|
positive
|
def test_caching():
start_params = np.array([3, 2, 1])
<DeepExtract>
empirical_moments = np.zeros(3)
</DeepExtract>
if isinstance(empirical_moments, dict):
empirical_moments = empirical_moments['simulated_moments']
got = estimate_msm(simulate_moments=_sim_np, empirical_moments=empirical_moments, moments_cov=cov_np, params=start_params, optimize_options='scipy_lbfgsb')
assert got._cache == {}
cov = got.cov(method='robust', return_type='array')
assert got._cache == {}
cov = got.cov(method='robust', return_type='array', seed=0)
assert_array_equal(list(got._cache.values())[0], cov)
|
def test_caching():
start_params = np.array([3, 2, 1])
empirical_moments = np.zeros(3)
if isinstance(empirical_moments, dict):
empirical_moments = empirical_moments['simulated_moments']
got = estimate_msm(simulate_moments=_sim_np, empirical_moments=empirical_moments, moments_cov=cov_np, params=start_params, optimize_options='scipy_lbfgsb')
assert got._cache == {}
cov = got.cov(method='robust', return_type='array')
assert got._cache == {}
cov = got.cov(method='robust', return_type='array', seed=0)
assert_array_equal(list(got._cache.values())[0], cov)
|
estimagic
|
positive
|
def mpi_moments(x, axis=0, comm=None, keepdims=False):
x = np.asarray(x)
assert x.ndim > 0
<DeepExtract>
x = np.asarray(x)
assert x.ndim > 0
if comm is None:
comm = MPI.COMM_WORLD
xsum = x.sum(axis=axis, keepdims=True)
n = xsum.size
localsum = np.zeros(n + 1, x.dtype)
localsum[:n] = xsum.ravel()
localsum[n] = x.shape[axis]
globalsum = np.zeros_like(localsum)
comm.Allreduce(localsum, globalsum, op=MPI.SUM)
(mean, count) = (globalsum[:n].reshape(xsum.shape) / globalsum[n], globalsum[n])
</DeepExtract>
sqdiffs = np.square(x - mean)
<DeepExtract>
sqdiffs = np.asarray(sqdiffs)
assert sqdiffs.ndim > 0
if comm is None:
comm = MPI.COMM_WORLD
xsum = sqdiffs.sum(axis=axis, keepdims=True)
n = xsum.size
localsum = np.zeros(n + 1, sqdiffs.dtype)
localsum[:n] = xsum.ravel()
localsum[n] = sqdiffs.shape[axis]
globalsum = np.zeros_like(localsum)
comm.Allreduce(localsum, globalsum, op=MPI.SUM)
(meansqdiff, count1) = (globalsum[:n].reshape(xsum.shape) / globalsum[n], globalsum[n])
</DeepExtract>
assert count1 == count
std = np.sqrt(meansqdiff)
if not keepdims:
newshape = mean.shape[:axis] + mean.shape[axis + 1:]
mean = mean.reshape(newshape)
std = std.reshape(newshape)
return (mean, std, count)
|
def mpi_moments(x, axis=0, comm=None, keepdims=False):
x = np.asarray(x)
assert x.ndim > 0
x = np.asarray(x)
assert x.ndim > 0
if comm is None:
comm = MPI.COMM_WORLD
xsum = x.sum(axis=axis, keepdims=True)
n = xsum.size
localsum = np.zeros(n + 1, x.dtype)
localsum[:n] = xsum.ravel()
localsum[n] = x.shape[axis]
globalsum = np.zeros_like(localsum)
comm.Allreduce(localsum, globalsum, op=MPI.SUM)
(mean, count) = (globalsum[:n].reshape(xsum.shape) / globalsum[n], globalsum[n])
sqdiffs = np.square(x - mean)
sqdiffs = np.asarray(sqdiffs)
assert sqdiffs.ndim > 0
if comm is None:
comm = MPI.COMM_WORLD
xsum = sqdiffs.sum(axis=axis, keepdims=True)
n = xsum.size
localsum = np.zeros(n + 1, sqdiffs.dtype)
localsum[:n] = xsum.ravel()
localsum[n] = sqdiffs.shape[axis]
globalsum = np.zeros_like(localsum)
comm.Allreduce(localsum, globalsum, op=MPI.SUM)
(meansqdiff, count1) = (globalsum[:n].reshape(xsum.shape) / globalsum[n], globalsum[n])
assert count1 == count
std = np.sqrt(meansqdiff)
if not keepdims:
newshape = mean.shape[:axis] + mean.shape[axis + 1:]
mean = mean.reshape(newshape)
std = std.reshape(newshape)
return (mean, std, count)
|
deepdrive
|
positive
|
def _smac_tuning_function(optimizer, scoring_function, tunable_hyperparameters, iterations, **kwargs):
"""Construct and run a full optimization loop.
Given an optimizer from ``smac.facade``, use it to perform a complete optimization for
a given ``scoring_function``. This is achieved by creating a ``config_space`` from the
tunable hyperparameters, adapting the scoring function to work with minimization, and
then, create an instace scenario that with the config space and the amount of iterations.
Finally we use the optimizer with the previously created sceneario and adapted scoring
function to optimize this and obtain the best configuration for the given iterations.
Args:
optimizer (type):
A ``smac.facade`` class that represents a tuner.
scoring_function (function):
A function that performs scoring over params.
tunable_hyperparameters (dict):
A python dict with hyperparameters.
iterations (int):
Number of tuning iterations to perform.
kwargs (kwargs):
Any additional configuration used by the optimizer can be passed as
keyword args.
"""
with TemporaryDirectory() as tmp_dir:
<DeepExtract>
config_space = _create_config_space(tunable_hyperparameters)
tae_runner = _adapt_scoring_function(scoring_function)
scenario = Scenario({'run_obj': 'quality', 'runcount_limit': iterations, 'cs': config_space, 'deterministic': 'true', 'output_dir': tmp_dir, 'limit_resources': False})
optimizer_params = {'scenario': scenario, 'rng': 42, 'tae_runner': tae_runner}
if kwargs:
optimizer_params.update(kwargs)
optimizer_params = optimizer_params
</DeepExtract>
smac = optimizer(**optimizer_params)
best_config = smac.optimize()
return scoring_function(**_parse_params(best_config))
|
def _smac_tuning_function(optimizer, scoring_function, tunable_hyperparameters, iterations, **kwargs):
"""Construct and run a full optimization loop.
Given an optimizer from ``smac.facade``, use it to perform a complete optimization for
a given ``scoring_function``. This is achieved by creating a ``config_space`` from the
tunable hyperparameters, adapting the scoring function to work with minimization, and
then, create an instace scenario that with the config space and the amount of iterations.
Finally we use the optimizer with the previously created sceneario and adapted scoring
function to optimize this and obtain the best configuration for the given iterations.
Args:
optimizer (type):
A ``smac.facade`` class that represents a tuner.
scoring_function (function):
A function that performs scoring over params.
tunable_hyperparameters (dict):
A python dict with hyperparameters.
iterations (int):
Number of tuning iterations to perform.
kwargs (kwargs):
Any additional configuration used by the optimizer can be passed as
keyword args.
"""
with TemporaryDirectory() as tmp_dir:
config_space = _create_config_space(tunable_hyperparameters)
tae_runner = _adapt_scoring_function(scoring_function)
scenario = Scenario({'run_obj': 'quality', 'runcount_limit': iterations, 'cs': config_space, 'deterministic': 'true', 'output_dir': tmp_dir, 'limit_resources': False})
optimizer_params = {'scenario': scenario, 'rng': 42, 'tae_runner': tae_runner}
if kwargs:
optimizer_params.update(kwargs)
optimizer_params = optimizer_params
smac = optimizer(**optimizer_params)
best_config = smac.optimize()
return scoring_function(**_parse_params(best_config))
|
BTB
|
positive
|
def _get_batch_of_sequences(self, which_memory, batch_size, sample_history_length, contains_first_step):
assert sample_history_length > 1
(obs, task, candidates, chosen_indices, reward, next_obs, next_candidates) = ([], [], [], [], [], [], [])
for _ in range(sample_history_length):
obs.append([])
candidates.append([])
chosen_indices.append([])
reward.append([])
next_obs.append([])
next_candidates.append([])
for _ in range(batch_size):
<DeepExtract>
if len(which_memory) == 0:
t = None
assert sample_history_length > 1
trajectory_id = np.random.randint(len(which_memory))
trajectory = which_memory[trajectory_id]
if len(trajectory) <= sample_history_length:
t = None
if contains_first_step:
head = 0
else:
if 1 >= len(trajectory) - sample_history_length:
t = None
head = np.random.randint(1, len(trajectory) - sample_history_length)
final = len(trajectory) - 1
(seq_obs, seq_candidates, seq_chosen_indices, seq_reward, seq_next_obs, seq_next_candidates) = ([], [], [], [], [], [])
task = trajectory[head].task_list
for j in range(sample_history_length):
seq_obs.append(trajectory[head + j].observation_list)
seq_candidates.append(trajectory[head + j].action_candidate_list)
seq_chosen_indices.append(trajectory[head + j].chosen_indices)
seq_next_obs.append(trajectory[head + j + 1].observation_list)
seq_next_candidates.append(trajectory[head + j + 1].action_candidate_list)
how_long = final - (head + j) + 1 if self.accumulate_reward_from_final else 1
accumulated_rewards = [self.discount_gamma_game_reward ** i * trajectory[head + j + i].reward for i in range(how_long)]
accumulated_rewards = accumulated_rewards[:1]
game_reward = torch.sum(torch.stack(accumulated_rewards))
accumulated_count_rewards = [self.discount_gamma_count_reward ** i * trajectory[head + j + i].count_reward for i in range(how_long)]
accumulated_count_rewards = accumulated_count_rewards[:1]
count_reward = torch.sum(torch.stack(accumulated_count_rewards))
accumulated_novel_object_rewards = [self.discount_gamma_novel_object_reward ** i * trajectory[head + j + i].novel_object_reward for i in range(how_long)]
accumulated_novel_object_rewards = accumulated_novel_object_rewards[:1]
novel_object_reward = torch.sum(torch.stack(accumulated_novel_object_rewards))
seq_reward.append(game_reward + count_reward + novel_object_reward)
t = [seq_obs, seq_candidates, seq_chosen_indices, seq_reward, seq_next_obs, seq_next_candidates, task]
</DeepExtract>
if t is None:
continue
task.append(t[6])
for step in range(sample_history_length):
obs[step].append(t[0][step])
candidates[step].append(t[1][step])
chosen_indices[step].append(t[2][step])
reward[step].append(t[3][step])
next_obs[step].append(t[4][step])
next_candidates[step].append(t[5][step])
if len(task) == 0:
return None
return [obs, task, candidates, chosen_indices, reward, next_obs, next_candidates]
|
def _get_batch_of_sequences(self, which_memory, batch_size, sample_history_length, contains_first_step):
assert sample_history_length > 1
(obs, task, candidates, chosen_indices, reward, next_obs, next_candidates) = ([], [], [], [], [], [], [])
for _ in range(sample_history_length):
obs.append([])
candidates.append([])
chosen_indices.append([])
reward.append([])
next_obs.append([])
next_candidates.append([])
for _ in range(batch_size):
if len(which_memory) == 0:
t = None
assert sample_history_length > 1
trajectory_id = np.random.randint(len(which_memory))
trajectory = which_memory[trajectory_id]
if len(trajectory) <= sample_history_length:
t = None
if contains_first_step:
head = 0
else:
if 1 >= len(trajectory) - sample_history_length:
t = None
head = np.random.randint(1, len(trajectory) - sample_history_length)
final = len(trajectory) - 1
(seq_obs, seq_candidates, seq_chosen_indices, seq_reward, seq_next_obs, seq_next_candidates) = ([], [], [], [], [], [])
task = trajectory[head].task_list
for j in range(sample_history_length):
seq_obs.append(trajectory[head + j].observation_list)
seq_candidates.append(trajectory[head + j].action_candidate_list)
seq_chosen_indices.append(trajectory[head + j].chosen_indices)
seq_next_obs.append(trajectory[head + j + 1].observation_list)
seq_next_candidates.append(trajectory[head + j + 1].action_candidate_list)
how_long = final - (head + j) + 1 if self.accumulate_reward_from_final else 1
accumulated_rewards = [self.discount_gamma_game_reward ** i * trajectory[head + j + i].reward for i in range(how_long)]
accumulated_rewards = accumulated_rewards[:1]
game_reward = torch.sum(torch.stack(accumulated_rewards))
accumulated_count_rewards = [self.discount_gamma_count_reward ** i * trajectory[head + j + i].count_reward for i in range(how_long)]
accumulated_count_rewards = accumulated_count_rewards[:1]
count_reward = torch.sum(torch.stack(accumulated_count_rewards))
accumulated_novel_object_rewards = [self.discount_gamma_novel_object_reward ** i * trajectory[head + j + i].novel_object_reward for i in range(how_long)]
accumulated_novel_object_rewards = accumulated_novel_object_rewards[:1]
novel_object_reward = torch.sum(torch.stack(accumulated_novel_object_rewards))
seq_reward.append(game_reward + count_reward + novel_object_reward)
t = [seq_obs, seq_candidates, seq_chosen_indices, seq_reward, seq_next_obs, seq_next_candidates, task]
if t is None:
continue
task.append(t[6])
for step in range(sample_history_length):
obs[step].append(t[0][step])
candidates[step].append(t[1][step])
chosen_indices[step].append(t[2][step])
reward[step].append(t[3][step])
next_obs[step].append(t[4][step])
next_candidates[step].append(t[5][step])
if len(task) == 0:
return None
return [obs, task, candidates, chosen_indices, reward, next_obs, next_candidates]
|
alfworld
|
positive
|
def merge_cfg_from_list(cfg_list):
"""Merge config keys, values in a list (e.g., from command line) into the
global config. For example, `cfg_list = ['TEST.NMS', 0.5]`.
"""
assert len(cfg_list) % 2 == 0
for (full_key, v) in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = full_key.split('.')
d = __C
for subkey in key_list[:-1]:
assert subkey in d, 'Non-existent key: {}'.format(full_key)
d = d[subkey]
subkey = key_list[-1]
assert subkey in d, 'Non-existent key: {}'.format(full_key)
<DeepExtract>
if isinstance(v, dict):
value = AttrDict(v)
if not isinstance(v, six.string_types):
value = v
try:
v = literal_eval(v)
except ValueError:
pass
except SyntaxError:
pass
value = v
</DeepExtract>
<DeepExtract>
type_b = type(d[subkey])
type_a = type(value)
if type_a is type_b:
value = value
if isinstance(d[subkey], np.ndarray):
value = np.array(value, dtype=d[subkey].dtype)
elif isinstance(d[subkey], six.string_types):
value = str(value)
elif isinstance(value, tuple) and isinstance(d[subkey], list):
value = list(value)
elif isinstance(value, list) and isinstance(d[subkey], tuple):
value = tuple(value)
else:
raise ValueError('Type mismatch ({} vs. {}) with values ({} vs. {}) for config key: {}'.format(type_b, type_a, d[subkey], value, full_key))
value = value
</DeepExtract>
d[subkey] = value
|
def merge_cfg_from_list(cfg_list):
"""Merge config keys, values in a list (e.g., from command line) into the
global config. For example, `cfg_list = ['TEST.NMS', 0.5]`.
"""
assert len(cfg_list) % 2 == 0
for (full_key, v) in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = full_key.split('.')
d = __C
for subkey in key_list[:-1]:
assert subkey in d, 'Non-existent key: {}'.format(full_key)
d = d[subkey]
subkey = key_list[-1]
assert subkey in d, 'Non-existent key: {}'.format(full_key)
if isinstance(v, dict):
value = AttrDict(v)
if not isinstance(v, six.string_types):
value = v
try:
v = literal_eval(v)
except ValueError:
pass
except SyntaxError:
pass
value = v
type_b = type(d[subkey])
type_a = type(value)
if type_a is type_b:
value = value
if isinstance(d[subkey], np.ndarray):
value = np.array(value, dtype=d[subkey].dtype)
elif isinstance(d[subkey], six.string_types):
value = str(value)
elif isinstance(value, tuple) and isinstance(d[subkey], list):
value = list(value)
elif isinstance(value, list) and isinstance(d[subkey], tuple):
value = tuple(value)
else:
raise ValueError('Type mismatch ({} vs. {}) with values ({} vs. {}) for config key: {}'.format(type_b, type_a, d[subkey], value, full_key))
value = value
d[subkey] = value
|
DIoU-pytorch-detectron
|
positive
|
@property
def config(self):
if self._config is None:
<DeepExtract>
if section and section in self.name.config_loader:
log.detail('[{section}]'.format(section=section))
self._config = self.name.config_loader[section]
log.detail('[{section}]'.format(section='DEFAULT'))
self._config = self.name.config_loader['DEFAULT']
</DeepExtract>
return self._config
|
@property
def config(self):
if self._config is None:
if section and section in self.name.config_loader:
log.detail('[{section}]'.format(section=section))
self._config = self.name.config_loader[section]
log.detail('[{section}]'.format(section='DEFAULT'))
self._config = self.name.config_loader['DEFAULT']
return self._config
|
cpppo
|
positive
|
@flask_login.login_required
def post(self):
"""
---
description: Create a user.
responses:
"201": "UserCreated"
"400": "400"
"401": "401"
requestBody:
content:
application/json:
schema: UserCreate
tags:
- Users
"""
<DeepExtract>
data = self.validate(self.schema.create)
email = data.get('email')
if user and user.email == email:
data = data
other = User.first(email=email)
if other:
message = 'Email address already in use.'
self.abort_400_bad_request({'email': [message]})
data = data
</DeepExtract>
user = User.create(data)
return self.response_201_created(self.serializer.one.dump(user))
|
@flask_login.login_required
def post(self):
"""
---
description: Create a user.
responses:
"201": "UserCreated"
"400": "400"
"401": "401"
requestBody:
content:
application/json:
schema: UserCreate
tags:
- Users
"""
data = self.validate(self.schema.create)
email = data.get('email')
if user and user.email == email:
data = data
other = User.first(email=email)
if other:
message = 'Email address already in use.'
self.abort_400_bad_request({'email': [message]})
data = data
user = User.create(data)
return self.response_201_created(self.serializer.one.dump(user))
|
conbench
|
positive
|
def get_wind(data: List[str], units: Units) -> Tuple[List[str], Optional[Number], Optional[Number], Optional[Number], List[Number]]:
"""Returns the report list and removed:
Direction string, speed string, gust string, variable direction list
"""
(direction, speed, gust) = ('', '', '')
variable: List[Number] = []
if data:
item = copy(data[0])
if is_wind(item):
if item.endswith('KT'):
item = item.replace('KT', '')
elif item.endswith('KTS'):
item = item.replace('KTS', '')
elif item.endswith('MPS'):
units.wind_speed = 'm/s'
item = item.replace('MPS', '')
elif item.endswith('KMH'):
units.wind_speed = 'km/h'
item = item.replace('KMH', '')
elif item.endswith('MPH'):
units.wind_speed = 'mi/h'
item = item.replace('MPH', '')
<DeepExtract>
(direction, speed, gust) = ('', '', '')
if 'G' in item:
g_index = item.find('G')
(start, end) = (g_index + 1, g_index + 3)
if 'GP' in item:
end += 1
gust = item[start:end]
item = item[:g_index] + item[end:]
if item:
if len(item) == 2:
speed = item
else:
direction = item[:3]
speed = item[3:]
(direction, speed, gust) = (direction, speed, gust)
</DeepExtract>
data.pop(0)
if data and 1 < len(data[0]) < 4 and (data[0][0] == 'G') and data[0][1:].isdigit():
gust = data.pop(0)[1:]
if data and is_variable_wind_direction(data[0]):
for item in data.pop(0).split('V'):
<DeepExtract>
if not item or is_unknown(item):
value = None
with suppress(KeyError):
item = (special or {}).get(item) or SPECIAL_NUMBERS[item]
if isinstance(item, tuple):
(value, spoken) = item
else:
value = item
spoken = spoken_number(str(value), literal=True)
value = Number(repr or item, value, spoken)
if item in CARDINALS:
if not repr:
repr = item
item = str(CARDINALS[item])
item = item.rstrip('M.')
item = item.replace('O', '0')
item = item.replace('+', '')
if '/' in item:
value = make_fraction(item, repr, True)
if 'M' in item:
val_str = item.replace('MM', '-').replace('M', '-')
while val_str[0] != '-':
val_str = val_str[1:]
else:
val_str = item
speak_prefix = ''
if val_str.startswith('ABV '):
speak_prefix += 'above '
val_str = val_str[4:]
if val_str.startswith('BLW '):
speak_prefix += 'below '
val_str = val_str[4:]
if val_str.startswith('FL'):
speak_prefix += 'flight level '
(val_str, True) = (val_str[2:], True)
if not val_str:
value = None
if '.' in item:
value = float(val_str)
if not value:
value = 0
else:
value = int(val_str)
spoken = speak_prefix + spoken_number(item or str(value), True)
value = Number(repr or item, value, spoken)
</DeepExtract>
if value is not None:
variable.append(value)
<DeepExtract>
if not direction or is_unknown(direction):
direction_value = None
with suppress(KeyError):
item = (special or {}).get(direction) or SPECIAL_NUMBERS[direction]
if isinstance(item, tuple):
(value, spoken) = item
else:
value = item
spoken = spoken_number(str(value), literal=True)
direction_value = Number(repr or direction, value, spoken)
if direction in CARDINALS:
if not repr:
repr = direction
direction = str(CARDINALS[direction])
direction = direction.rstrip('M.')
direction = direction.replace('O', '0')
direction = direction.replace('+', '')
if '/' in direction:
direction_value = make_fraction(direction, repr, True)
if 'M' in direction:
val_str = direction.replace('MM', '-').replace('M', '-')
while val_str[0] != '-':
val_str = val_str[1:]
else:
val_str = direction
speak_prefix = ''
if val_str.startswith('ABV '):
speak_prefix += 'above '
val_str = val_str[4:]
if val_str.startswith('BLW '):
speak_prefix += 'below '
val_str = val_str[4:]
if val_str.startswith('FL'):
speak_prefix += 'flight level '
(val_str, True) = (val_str[2:], True)
if not val_str:
direction_value = None
if '.' in direction:
value = float(val_str)
if not value:
value = 0
else:
value = int(val_str)
spoken = speak_prefix + spoken_number(direction or str(value), True)
direction_value = Number(repr or direction, value, spoken)
</DeepExtract>
<DeepExtract>
if not speed.strip('BV') or is_unknown(speed.strip('BV')):
speed_value = None
with suppress(KeyError):
item = (special or {}).get(speed.strip('BV')) or SPECIAL_NUMBERS[speed.strip('BV')]
if isinstance(item, tuple):
(value, spoken) = item
else:
value = item
spoken = spoken_number(str(value), literal=literal)
speed_value = Number(repr or speed.strip('BV'), value, spoken)
if speed.strip('BV') in CARDINALS:
if not repr:
repr = speed.strip('BV')
speed.strip('BV') = str(CARDINALS[speed.strip('BV')])
speed.strip('BV') = speed.strip('BV').rstrip('M.')
speed.strip('BV') = speed.strip('BV').replace('O', '0')
speed.strip('BV') = speed.strip('BV').replace('+', '')
if '/' in speed.strip('BV'):
speed_value = make_fraction(speed.strip('BV'), repr, literal)
if 'M' in speed.strip('BV'):
val_str = speed.strip('BV').replace('MM', '-').replace('M', '-')
while val_str[0] != '-':
val_str = val_str[1:]
else:
val_str = speed.strip('BV')
speak_prefix = ''
if val_str.startswith('ABV '):
speak_prefix += 'above '
val_str = val_str[4:]
if val_str.startswith('BLW '):
speak_prefix += 'below '
val_str = val_str[4:]
if val_str.startswith('FL'):
speak_prefix += 'flight level '
(val_str, literal) = (val_str[2:], True)
if not val_str:
speed_value = None
if '.' in speed.strip('BV'):
value = float(val_str)
if not value:
value = 0
else:
value = int(val_str)
spoken = speak_prefix + spoken_number(speak or str(value), literal)
speed_value = Number(repr or speed.strip('BV'), value, spoken)
</DeepExtract>
<DeepExtract>
if not gust or is_unknown(gust):
gust_value = None
with suppress(KeyError):
item = (special or {}).get(gust) or SPECIAL_NUMBERS[gust]
if isinstance(item, tuple):
(value, spoken) = item
else:
value = item
spoken = spoken_number(str(value), literal=literal)
gust_value = Number(repr or gust, value, spoken)
if gust in CARDINALS:
if not repr:
repr = gust
gust = str(CARDINALS[gust])
gust = gust.rstrip('M.')
gust = gust.replace('O', '0')
gust = gust.replace('+', '')
if '/' in gust:
gust_value = make_fraction(gust, repr, literal)
if 'M' in gust:
val_str = gust.replace('MM', '-').replace('M', '-')
while val_str[0] != '-':
val_str = val_str[1:]
else:
val_str = gust
speak_prefix = ''
if val_str.startswith('ABV '):
speak_prefix += 'above '
val_str = val_str[4:]
if val_str.startswith('BLW '):
speak_prefix += 'below '
val_str = val_str[4:]
if val_str.startswith('FL'):
speak_prefix += 'flight level '
(val_str, literal) = (val_str[2:], True)
if not val_str:
gust_value = None
if '.' in gust:
value = float(val_str)
if not value:
value = 0
else:
value = int(val_str)
spoken = speak_prefix + spoken_number(speak or str(value), literal)
gust_value = Number(repr or gust, value, spoken)
</DeepExtract>
return (data, direction_value, speed_value, gust_value, variable)
|
def get_wind(data: List[str], units: Units) -> Tuple[List[str], Optional[Number], Optional[Number], Optional[Number], List[Number]]:
"""Returns the report list and removed:
Direction string, speed string, gust string, variable direction list
"""
(direction, speed, gust) = ('', '', '')
variable: List[Number] = []
if data:
item = copy(data[0])
if is_wind(item):
if item.endswith('KT'):
item = item.replace('KT', '')
elif item.endswith('KTS'):
item = item.replace('KTS', '')
elif item.endswith('MPS'):
units.wind_speed = 'm/s'
item = item.replace('MPS', '')
elif item.endswith('KMH'):
units.wind_speed = 'km/h'
item = item.replace('KMH', '')
elif item.endswith('MPH'):
units.wind_speed = 'mi/h'
item = item.replace('MPH', '')
(direction, speed, gust) = ('', '', '')
if 'G' in item:
g_index = item.find('G')
(start, end) = (g_index + 1, g_index + 3)
if 'GP' in item:
end += 1
gust = item[start:end]
item = item[:g_index] + item[end:]
if item:
if len(item) == 2:
speed = item
else:
direction = item[:3]
speed = item[3:]
(direction, speed, gust) = (direction, speed, gust)
data.pop(0)
if data and 1 < len(data[0]) < 4 and (data[0][0] == 'G') and data[0][1:].isdigit():
gust = data.pop(0)[1:]
if data and is_variable_wind_direction(data[0]):
for item in data.pop(0).split('V'):
if not item or is_unknown(item):
value = None
with suppress(KeyError):
item = (special or {}).get(item) or SPECIAL_NUMBERS[item]
if isinstance(item, tuple):
(value, spoken) = item
else:
value = item
spoken = spoken_number(str(value), literal=True)
value = Number(repr or item, value, spoken)
if item in CARDINALS:
if not repr:
repr = item
item = str(CARDINALS[item])
item = item.rstrip('M.')
item = item.replace('O', '0')
item = item.replace('+', '')
if '/' in item:
value = make_fraction(item, repr, True)
if 'M' in item:
val_str = item.replace('MM', '-').replace('M', '-')
while val_str[0] != '-':
val_str = val_str[1:]
else:
val_str = item
speak_prefix = ''
if val_str.startswith('ABV '):
speak_prefix += 'above '
val_str = val_str[4:]
if val_str.startswith('BLW '):
speak_prefix += 'below '
val_str = val_str[4:]
if val_str.startswith('FL'):
speak_prefix += 'flight level '
(val_str, True) = (val_str[2:], True)
if not val_str:
value = None
if '.' in item:
value = float(val_str)
if not value:
value = 0
else:
value = int(val_str)
spoken = speak_prefix + spoken_number(item or str(value), True)
value = Number(repr or item, value, spoken)
if value is not None:
variable.append(value)
if not direction or is_unknown(direction):
direction_value = None
with suppress(KeyError):
item = (special or {}).get(direction) or SPECIAL_NUMBERS[direction]
if isinstance(item, tuple):
(value, spoken) = item
else:
value = item
spoken = spoken_number(str(value), literal=True)
direction_value = Number(repr or direction, value, spoken)
if direction in CARDINALS:
if not repr:
repr = direction
direction = str(CARDINALS[direction])
direction = direction.rstrip('M.')
direction = direction.replace('O', '0')
direction = direction.replace('+', '')
if '/' in direction:
direction_value = make_fraction(direction, repr, True)
if 'M' in direction:
val_str = direction.replace('MM', '-').replace('M', '-')
while val_str[0] != '-':
val_str = val_str[1:]
else:
val_str = direction
speak_prefix = ''
if val_str.startswith('ABV '):
speak_prefix += 'above '
val_str = val_str[4:]
if val_str.startswith('BLW '):
speak_prefix += 'below '
val_str = val_str[4:]
if val_str.startswith('FL'):
speak_prefix += 'flight level '
(val_str, True) = (val_str[2:], True)
if not val_str:
direction_value = None
if '.' in direction:
value = float(val_str)
if not value:
value = 0
else:
value = int(val_str)
spoken = speak_prefix + spoken_number(direction or str(value), True)
direction_value = Number(repr or direction, value, spoken)
if not speed.strip('BV') or is_unknown(speed.strip('BV')):
speed_value = None
with suppress(KeyError):
item = (special or {}).get(speed.strip('BV')) or SPECIAL_NUMBERS[speed.strip('BV')]
if isinstance(item, tuple):
(value, spoken) = item
else:
value = item
spoken = spoken_number(str(value), literal=literal)
speed_value = Number(repr or speed.strip('BV'), value, spoken)
if speed.strip('BV') in CARDINALS:
if not repr:
repr = speed.strip('BV')
speed.strip('BV') = str(CARDINALS[speed.strip('BV')])
speed.strip('BV') = speed.strip('BV').rstrip('M.')
speed.strip('BV') = speed.strip('BV').replace('O', '0')
speed.strip('BV') = speed.strip('BV').replace('+', '')
if '/' in speed.strip('BV'):
speed_value = make_fraction(speed.strip('BV'), repr, literal)
if 'M' in speed.strip('BV'):
val_str = speed.strip('BV').replace('MM', '-').replace('M', '-')
while val_str[0] != '-':
val_str = val_str[1:]
else:
val_str = speed.strip('BV')
speak_prefix = ''
if val_str.startswith('ABV '):
speak_prefix += 'above '
val_str = val_str[4:]
if val_str.startswith('BLW '):
speak_prefix += 'below '
val_str = val_str[4:]
if val_str.startswith('FL'):
speak_prefix += 'flight level '
(val_str, literal) = (val_str[2:], True)
if not val_str:
speed_value = None
if '.' in speed.strip('BV'):
value = float(val_str)
if not value:
value = 0
else:
value = int(val_str)
spoken = speak_prefix + spoken_number(speak or str(value), literal)
speed_value = Number(repr or speed.strip('BV'), value, spoken)
if not gust or is_unknown(gust):
gust_value = None
with suppress(KeyError):
item = (special or {}).get(gust) or SPECIAL_NUMBERS[gust]
if isinstance(item, tuple):
(value, spoken) = item
else:
value = item
spoken = spoken_number(str(value), literal=literal)
gust_value = Number(repr or gust, value, spoken)
if gust in CARDINALS:
if not repr:
repr = gust
gust = str(CARDINALS[gust])
gust = gust.rstrip('M.')
gust = gust.replace('O', '0')
gust = gust.replace('+', '')
if '/' in gust:
gust_value = make_fraction(gust, repr, literal)
if 'M' in gust:
val_str = gust.replace('MM', '-').replace('M', '-')
while val_str[0] != '-':
val_str = val_str[1:]
else:
val_str = gust
speak_prefix = ''
if val_str.startswith('ABV '):
speak_prefix += 'above '
val_str = val_str[4:]
if val_str.startswith('BLW '):
speak_prefix += 'below '
val_str = val_str[4:]
if val_str.startswith('FL'):
speak_prefix += 'flight level '
(val_str, literal) = (val_str[2:], True)
if not val_str:
gust_value = None
if '.' in gust:
value = float(val_str)
if not value:
value = 0
else:
value = int(val_str)
spoken = speak_prefix + spoken_number(speak or str(value), literal)
gust_value = Number(repr or gust, value, spoken)
return (data, direction_value, speed_value, gust_value, variable)
|
avwx-engine
|
positive
|
def test_interpolate_3d_cubic_extrapolate_linear_xmidyinfzsup(self):
"""3D cubic interpolation. Test values in the extrapolation area with x inside and y below and z above the interpolation area.
"""
<DeepExtract>
if x is None:
x = self.x
if y is None:
y = self.y
if z is None:
z = self.z
if data is None:
data = self.data
self.interp_data = data_file.cubic_interpolated_data
self.extrap_data_nea = data_file.cubic_nearest_extrapolated_data
self.extrap_data_lin = data_file.cubic_linear_extrapolated_data
self.extrap_data_qua = data_file.cubic_quadratic_extrapolated_data
self.interp_func = interpolators3d.Interpolate3DCubic(x, y, z, data, extrapolate=True, extrapolation_range=10, extrapolation_type='linear', tolerate_single_value=tolerate_single_value)
</DeepExtract>
<DeepExtract>
(mini, maxi) = self.extrapol_xdomains[1]
(minj, maxj) = self.extrapol_ydomains[0]
(mink, maxk) = self.extrapol_zdomains[2]
for iex in range(mini, maxi):
for jex in range(minj, maxj):
for kex in range(mink, maxk):
self.assertAlmostEqual(self.interp_func(self.xsamples_ex[iex], self.ysamples_ex[jex], self.zsamples_ex[kex]), self.extrap_data_lin[1][0][2][iex - mini, jex - minj, kex - mink], delta=1e-08)
</DeepExtract>
|
def test_interpolate_3d_cubic_extrapolate_linear_xmidyinfzsup(self):
"""3D cubic interpolation. Test values in the extrapolation area with x inside and y below and z above the interpolation area.
"""
if x is None:
x = self.x
if y is None:
y = self.y
if z is None:
z = self.z
if data is None:
data = self.data
self.interp_data = data_file.cubic_interpolated_data
self.extrap_data_nea = data_file.cubic_nearest_extrapolated_data
self.extrap_data_lin = data_file.cubic_linear_extrapolated_data
self.extrap_data_qua = data_file.cubic_quadratic_extrapolated_data
self.interp_func = interpolators3d.Interpolate3DCubic(x, y, z, data, extrapolate=True, extrapolation_range=10, extrapolation_type='linear', tolerate_single_value=tolerate_single_value)
(mini, maxi) = self.extrapol_xdomains[1]
(minj, maxj) = self.extrapol_ydomains[0]
(mink, maxk) = self.extrapol_zdomains[2]
for iex in range(mini, maxi):
for jex in range(minj, maxj):
for kex in range(mink, maxk):
self.assertAlmostEqual(self.interp_func(self.xsamples_ex[iex], self.ysamples_ex[jex], self.zsamples_ex[kex]), self.extrap_data_lin[1][0][2][iex - mini, jex - minj, kex - mink], delta=1e-08)
</DeepExtract>
|
core
|
positive
|
def assertMain(self, argd, stdout=None, stderr=None, should_fail=False, msg=None):
<DeepExtract>
with StdErrCatcher() as stderr:
with StdOutCatcher() as stdout:
ret = self.run_main_test(argd, should_fail=should_fail)
(ret, out, err) = (ret, stdout.output, stderr.output)
</DeepExtract>
if should_fail:
<DeepExtract>
if ret > 0:
return None
raise self.failureException(_equality_msg('<=', ret, 0, msg=msg or 'main() return a zero exit status.'))
</DeepExtract>
else:
try:
<DeepExtract>
if ret == 0:
return None
raise self.failureException(_equality_msg('!=', ret, 0, msg=msg or 'main() returned a non-zero exit status.'))
</DeepExtract>
except self.failureException as ex:
if err:
msg = '\n'.join((str(ex), '\nStderr:', err))
raise self.failureException(msg) from ex
else:
raise
if should_fail and stderr is not None:
<DeepExtract>
if err == stderr:
return None
raise self.failureException(_equality_msg('!=', err, stderr, msg=msg or 'main() printed something to stderr.'))
</DeepExtract>
elif not should_fail:
<DeepExtract>
if err == stderr or '':
return None
raise self.failureException(_equality_msg('!=', err, stderr or '', msg=msg or 'main() printed something to stderr.'))
</DeepExtract>
if should_fail and stdout is not None:
<DeepExtract>
if out == stdout:
return None
raise self.failureException(_equality_msg('!=', out, stdout, msg=msg or 'Output from main() did not match.'))
</DeepExtract>
elif not should_fail:
<DeepExtract>
if len(out) > 0:
return None
raise self.failureException(_equality_msg('<=', len(out), 0, msg=msg or 'main() did not produce any stdout output.'))
</DeepExtract>
|
def assertMain(self, argd, stdout=None, stderr=None, should_fail=False, msg=None):
with StdErrCatcher() as stderr:
with StdOutCatcher() as stdout:
ret = self.run_main_test(argd, should_fail=should_fail)
(ret, out, err) = (ret, stdout.output, stderr.output)
if should_fail:
if ret > 0:
return None
raise self.failureException(_equality_msg('<=', ret, 0, msg=msg or 'main() return a zero exit status.'))
else:
try:
if ret == 0:
return None
raise self.failureException(_equality_msg('!=', ret, 0, msg=msg or 'main() returned a non-zero exit status.'))
except self.failureException as ex:
if err:
msg = '\n'.join((str(ex), '\nStderr:', err))
raise self.failureException(msg) from ex
else:
raise
if should_fail and stderr is not None:
if err == stderr:
return None
raise self.failureException(_equality_msg('!=', err, stderr, msg=msg or 'main() printed something to stderr.'))
elif not should_fail:
if err == stderr or '':
return None
raise self.failureException(_equality_msg('!=', err, stderr or '', msg=msg or 'main() printed something to stderr.'))
if should_fail and stdout is not None:
if out == stdout:
return None
raise self.failureException(_equality_msg('!=', out, stdout, msg=msg or 'Output from main() did not match.'))
elif not should_fail:
if len(out) > 0:
return None
raise self.failureException(_equality_msg('<=', len(out), 0, msg=msg or 'main() did not produce any stdout output.'))
</DeepExtract>
|
colr
|
positive
|
def remove_exam_attempt(attempt_id, requesting_user):
"""
Removes an exam attempt given the attempt id. requesting_user is passed through to the instructor_service.
"""
log.info('Removing exam attempt_id=%(attempt_id)s', {'attempt_id': attempt_id})
existing_attempt = ProctoredExamStudentAttempt.objects.get_exam_attempt_by_id(attempt_id)
if not existing_attempt:
err_msg = f'Cannot remove attempt for attempt_id={attempt_id} because it does not exist!'
raise StudentExamAttemptDoesNotExistsException(err_msg)
username = existing_attempt.user.username
user_id = existing_attempt.user.id
course_id = existing_attempt.proctored_exam.course_id
content_id = existing_attempt.proctored_exam.content_id
to_status = existing_attempt.status
existing_attempt.delete_exam_attempt()
instructor_service = get_runtime_service('instructor')
grades_service = get_runtime_service('grades')
if instructor_service:
instructor_service.delete_student_attempt(username, course_id, content_id, requesting_user=requesting_user)
if grades_service:
grades_service.undo_override_subsection_grade(user_id=user_id, course_key_or_id=course_id, usage_key_or_id=content_id)
if ProctoredExamStudentAttemptStatus.needs_credit_status_update(to_status):
credit_service = get_runtime_service('credit')
credit_service.remove_credit_requirement_status(user_id=user_id, course_key_or_id=course_id, req_namespace='proctored_exam', req_name=content_id)
<DeepExtract>
proctored_exam = ProctoredExam.get_exam_by_content_id(course_id, content_id)
if proctored_exam is None:
err_msg = f'Cannot find proctored exam in course_id={course_id} with content_id={content_id}'
raise ProctoredExamNotFoundException(err_msg)
serialized_exam_object = ProctoredExamSerializer(proctored_exam)
exam = serialized_exam_object.data
</DeepExtract>
serialized_attempt_obj = ProctoredExamStudentAttemptSerializer(existing_attempt)
attempt = serialized_attempt_obj.data
emit_event(exam, 'deleted', attempt=attempt)
|
def remove_exam_attempt(attempt_id, requesting_user):
"""
Removes an exam attempt given the attempt id. requesting_user is passed through to the instructor_service.
"""
log.info('Removing exam attempt_id=%(attempt_id)s', {'attempt_id': attempt_id})
existing_attempt = ProctoredExamStudentAttempt.objects.get_exam_attempt_by_id(attempt_id)
if not existing_attempt:
err_msg = f'Cannot remove attempt for attempt_id={attempt_id} because it does not exist!'
raise StudentExamAttemptDoesNotExistsException(err_msg)
username = existing_attempt.user.username
user_id = existing_attempt.user.id
course_id = existing_attempt.proctored_exam.course_id
content_id = existing_attempt.proctored_exam.content_id
to_status = existing_attempt.status
existing_attempt.delete_exam_attempt()
instructor_service = get_runtime_service('instructor')
grades_service = get_runtime_service('grades')
if instructor_service:
instructor_service.delete_student_attempt(username, course_id, content_id, requesting_user=requesting_user)
if grades_service:
grades_service.undo_override_subsection_grade(user_id=user_id, course_key_or_id=course_id, usage_key_or_id=content_id)
if ProctoredExamStudentAttemptStatus.needs_credit_status_update(to_status):
credit_service = get_runtime_service('credit')
credit_service.remove_credit_requirement_status(user_id=user_id, course_key_or_id=course_id, req_namespace='proctored_exam', req_name=content_id)
proctored_exam = ProctoredExam.get_exam_by_content_id(course_id, content_id)
if proctored_exam is None:
err_msg = f'Cannot find proctored exam in course_id={course_id} with content_id={content_id}'
raise ProctoredExamNotFoundException(err_msg)
serialized_exam_object = ProctoredExamSerializer(proctored_exam)
exam = serialized_exam_object.data
serialized_attempt_obj = ProctoredExamStudentAttemptSerializer(existing_attempt)
attempt = serialized_attempt_obj.data
emit_event(exam, 'deleted', attempt=attempt)
|
edx-proctoring
|
positive
|
def next(self, states, **runopts):
"""Collapse all `states` to a single output state using the `self.runnable`."""
logger.debug('{} collapsing {} input states with {!r}'.format(self.name, len(states), self.runnable))
states = iter(states)
if self.initial_state is None:
<DeepExtract>
runopts['executor'] = immediate_executor
for component in self.components:
states = component.run(states, **runopts)
result = states.result()
</DeepExtract>
else:
result = self.initial_state
runopts['executor'] = immediate_executor
for state in states:
result = self.runnable.run(States(result, state), **runopts).result()
return result
|
def next(self, states, **runopts):
"""Collapse all `states` to a single output state using the `self.runnable`."""
logger.debug('{} collapsing {} input states with {!r}'.format(self.name, len(states), self.runnable))
states = iter(states)
if self.initial_state is None:
runopts['executor'] = immediate_executor
for component in self.components:
states = component.run(states, **runopts)
result = states.result()
else:
result = self.initial_state
runopts['executor'] = immediate_executor
for state in states:
result = self.runnable.run(States(result, state), **runopts).result()
return result
|
dwave-hybrid
|
positive
|
def on_selectTe1_clicked(b):
from .templates import template1
script = template1()
old = [i for i in self.pages]
try:
<DeepExtract>
blocks = {}
it = max(self.pages) + 1
check_block = False
for line in script:
if '##' in line:
it += 1
blocks[it] = [line]
check_block = True
continue
elif '#' in line:
check_block = False
elif check_block and ('<' in line or '>' in line):
blocks[it].append(line)
continue
self.loading_bids = range(min(blocks), max(blocks) + 1)
self._options(blocks)
self._transform()
</DeepExtract>
self.block_id = max(self.pages)
selectTe1.icon = 'check'
except Exception as err:
print('Invalid configuration file ...')
print(' IOError: %s' % err.message)
print('... Not loaded!')
selectTe1.icon = 'remove'
rm = [i for i in self.pages if i not in old]
for ib in rm:
if ib in self.pages:
del self.pages[ib]
self.debut = False
<DeepExtract>
id = 1
add_page_VBox = self.add_page_widgets()
self.pages[id] = container_page('Add a block', id, add_page_VBox)
self.display_accordion(id)
</DeepExtract>
self.graph.close()
dot = Digraph(format='png')
for edge in self.comp_graph:
dot.node('%i' % edge[0], label='%i %s' % (edge[0], self.pages[edge[0]].title))
dot.node('%i' % edge[2], label='%i %s' % (edge[2], self.pages[edge[2]].title))
dot.edge('%i' % edge[0], '%i' % edge[2], label='%s > %s' % (edge[1], edge[3]), labelfontcolor='green')
self.graph = widgets.Image(value=dot.pipe(), format='png')
display(self.graph)
|
def on_selectTe1_clicked(b):
from .templates import template1
script = template1()
old = [i for i in self.pages]
try:
blocks = {}
it = max(self.pages) + 1
check_block = False
for line in script:
if '##' in line:
it += 1
blocks[it] = [line]
check_block = True
continue
elif '#' in line:
check_block = False
elif check_block and ('<' in line or '>' in line):
blocks[it].append(line)
continue
self.loading_bids = range(min(blocks), max(blocks) + 1)
self._options(blocks)
self._transform()
self.block_id = max(self.pages)
selectTe1.icon = 'check'
except Exception as err:
print('Invalid configuration file ...')
print(' IOError: %s' % err.message)
print('... Not loaded!')
selectTe1.icon = 'remove'
rm = [i for i in self.pages if i not in old]
for ib in rm:
if ib in self.pages:
del self.pages[ib]
self.debut = False
id = 1
add_page_VBox = self.add_page_widgets()
self.pages[id] = container_page('Add a block', id, add_page_VBox)
self.display_accordion(id)
self.graph.close()
dot = Digraph(format='png')
for edge in self.comp_graph:
dot.node('%i' % edge[0], label='%i %s' % (edge[0], self.pages[edge[0]].title))
dot.node('%i' % edge[2], label='%i %s' % (edge[2], self.pages[edge[2]].title))
dot.edge('%i' % edge[0], '%i' % edge[2], label='%s > %s' % (edge[1], edge[3]), labelfontcolor='green')
self.graph = widgets.Image(value=dot.pipe(), format='png')
display(self.graph)
|
chemml
|
positive
|
def get_mod_edit_info(self, mod: util.BcmlMod) -> set:
edited = set()
<DeepExtract>
diffs = Hash()
if self.is_mod_logged(mod):
util.dict_merge(diffs, oead.byml.from_text((mod.path / 'logs' / self._log_name).read_text(encoding='utf-8')), overwrite_lists=True)
for opt in {d for d in (mod.path / 'options').glob('*') if d.is_dir()}:
if (opt / 'logs' / self._log_name).exists():
util.dict_merge(diffs, oead.byml.from_text((opt / 'logs' / self._log_name).read_text('utf-8')), overwrite_lists=True)
diff = diffs
</DeepExtract>
for (_, stuff) in diff.items():
for items in dict(stuff['add']).values():
edited |= set(items.keys())
edited |= set(stuff['del'])
return edited
|
def get_mod_edit_info(self, mod: util.BcmlMod) -> set:
edited = set()
diffs = Hash()
if self.is_mod_logged(mod):
util.dict_merge(diffs, oead.byml.from_text((mod.path / 'logs' / self._log_name).read_text(encoding='utf-8')), overwrite_lists=True)
for opt in {d for d in (mod.path / 'options').glob('*') if d.is_dir()}:
if (opt / 'logs' / self._log_name).exists():
util.dict_merge(diffs, oead.byml.from_text((opt / 'logs' / self._log_name).read_text('utf-8')), overwrite_lists=True)
diff = diffs
for (_, stuff) in diff.items():
for items in dict(stuff['add']).values():
edited |= set(items.keys())
edited |= set(stuff['del'])
return edited
|
BCML
|
positive
|
def forward(self, gdata, cdata, axis):
"""Apply forward transform along specified axis."""
<DeepExtract>
dtype = np.complex128
logger.debug('Building FFTW FFT plan for (dtype, gshape, axis) = (%s, %s, %s)' % (dtype, gdata.shape, axis))
flags = ['FFTW_' + FFTW_RIGOR().upper()]
plan = fftw.FourierTransform(dtype, gdata.shape, axis, flags=flags)
temp = fftw.create_array(plan.cshape, np.complex128)
(plan, temp) = (plan, temp)
</DeepExtract>
plan.forward(gdata, temp)
<DeepExtract>
Kmax = self.Kmax
meancos = axslice(axis, 0, 1)
np.multiply(temp[meancos].real, 1 / self.N, cdata[meancos])
cdata[axslice(axis, 1, 2)] = 0
temp_posfreq = temp[axslice(axis, 1, Kmax + 1)]
cdata_posfreq_cos = cdata[axslice(axis, 2, 2 * (Kmax + 1), 2)]
cdata_posfreq_msin = cdata[axslice(axis, 3, 2 * (Kmax + 1), 2)]
np.multiply(temp_posfreq.real, 2 * 1 / self.N, cdata_posfreq_cos)
np.multiply(temp_posfreq.imag, 2 * 1 / self.N, cdata_posfreq_msin)
cdata[axslice(axis, 2 * (Kmax + 1), None)] = 0
</DeepExtract>
|
def forward(self, gdata, cdata, axis):
"""Apply forward transform along specified axis."""
dtype = np.complex128
logger.debug('Building FFTW FFT plan for (dtype, gshape, axis) = (%s, %s, %s)' % (dtype, gdata.shape, axis))
flags = ['FFTW_' + FFTW_RIGOR().upper()]
plan = fftw.FourierTransform(dtype, gdata.shape, axis, flags=flags)
temp = fftw.create_array(plan.cshape, np.complex128)
(plan, temp) = (plan, temp)
plan.forward(gdata, temp)
Kmax = self.Kmax
meancos = axslice(axis, 0, 1)
np.multiply(temp[meancos].real, 1 / self.N, cdata[meancos])
cdata[axslice(axis, 1, 2)] = 0
temp_posfreq = temp[axslice(axis, 1, Kmax + 1)]
cdata_posfreq_cos = cdata[axslice(axis, 2, 2 * (Kmax + 1), 2)]
cdata_posfreq_msin = cdata[axslice(axis, 3, 2 * (Kmax + 1), 2)]
np.multiply(temp_posfreq.real, 2 * 1 / self.N, cdata_posfreq_cos)
np.multiply(temp_posfreq.imag, 2 * 1 / self.N, cdata_posfreq_msin)
cdata[axslice(axis, 2 * (Kmax + 1), None)] = 0
</DeepExtract>
|
dedalus
|
positive
|
def set_true_icon_image(self, image_name, image_path):
self.icon_img_true_name = image_name
self.icon_img_true_path = image_path
self.icon_img_true = cui_img_load(image_name, image_path)
<DeepExtract>
self.bool_val = self.bool_val
if self.icon_img_true is not None and self.bool_val:
self.icon_img_name = self.icon_img_true_name
self.icon_img_path = self.icon_img_true_path
self.icon_img = self.icon_img_true
if self.icon_img_false is not None and (not self.bool_val):
self.icon_img_name = self.icon_img_false_name
self.icon_img_path = self.icon_img_false_path
self.icon_img = self.icon_img_false
return
</DeepExtract>
return
|
def set_true_icon_image(self, image_name, image_path):
self.icon_img_true_name = image_name
self.icon_img_true_path = image_path
self.icon_img_true = cui_img_load(image_name, image_path)
self.bool_val = self.bool_val
if self.icon_img_true is not None and self.bool_val:
self.icon_img_name = self.icon_img_true_name
self.icon_img_path = self.icon_img_true_path
self.icon_img = self.icon_img_true
if self.icon_img_false is not None and (not self.bool_val):
self.icon_img_name = self.icon_img_false_name
self.icon_img_path = self.icon_img_false_path
self.icon_img = self.icon_img_false
return
return
|
Abnormal
|
positive
|
def affiliations_for_character(self, char_id):
"""Retrieve the affiliations of a single character
Convenience wrapper around owner_ids_from_names().
"""
<DeepExtract>
rowset = api_result.result.find('rowset')
rows = rowset.findall('row')
results = {}
for row in rows:
char_id = int(row.attrib['characterID'])
char_name = row.attrib['characterName']
corp_id = int(row.attrib['corporationID']) or None
corp_name = row.attrib['corporationName'] or None
faction_id = int(row.attrib['factionID']) or None
faction_name = row.attrib['factionName'] or None
alliance_id = int(row.attrib['allianceID']) or None
alliance_name = row.attrib['allianceName'] or None
results[char_id] = {'id': char_id, 'name': char_name, 'corp': {'id': corp_id, 'name': corp_name}}
if faction_id is not None:
results[char_id]['faction'] = {'id': faction_id, 'name': faction_name}
if alliance_id is not None:
results[char_id]['alliance'] = {'id': alliance_id, 'name': alliance_name}
api_result = api.APIResult(results, api_result.timestamp, api_result.expires)
</DeepExtract>
return api.APIResult(api_result.result[char_id], api_result.timestamp, api_result.expires)
|
def affiliations_for_character(self, char_id):
"""Retrieve the affiliations of a single character
Convenience wrapper around owner_ids_from_names().
"""
rowset = api_result.result.find('rowset')
rows = rowset.findall('row')
results = {}
for row in rows:
char_id = int(row.attrib['characterID'])
char_name = row.attrib['characterName']
corp_id = int(row.attrib['corporationID']) or None
corp_name = row.attrib['corporationName'] or None
faction_id = int(row.attrib['factionID']) or None
faction_name = row.attrib['factionName'] or None
alliance_id = int(row.attrib['allianceID']) or None
alliance_name = row.attrib['allianceName'] or None
results[char_id] = {'id': char_id, 'name': char_name, 'corp': {'id': corp_id, 'name': corp_name}}
if faction_id is not None:
results[char_id]['faction'] = {'id': faction_id, 'name': faction_name}
if alliance_id is not None:
results[char_id]['alliance'] = {'id': alliance_id, 'name': alliance_name}
api_result = api.APIResult(results, api_result.timestamp, api_result.expires)
return api.APIResult(api_result.result[char_id], api_result.timestamp, api_result.expires)
|
evelink
|
positive
|
def _get_pure(self, value):
"""Returns the Python representation of the object (usually a
list, tuple or dict) that has no instances embedded within it.
"""
result = value
if self._has_instance(value):
raise StateSetterError('Value has an instance: %s' % value)
if isinstance(value, (StateList, StateTuple)):
result = [self._get_pure(x) for x in value]
if isinstance(value, StateTuple):
result = tuple(result)
elif isinstance(value, StateDict):
result = {}
for (k, v) in value.items():
<DeepExtract>
result = v
if self._has_instance(v):
raise StateSetterError('Value has an instance: %s' % v)
if isinstance(v, (StateList, StateTuple)):
result = [self._get_pure(x) for x in v]
if isinstance(v, StateTuple):
result = tuple(result)
elif isinstance(v, StateDict):
result = {}
for (k, v) in v.items():
result[k] = self._get_pure(v)
result[k] = result
</DeepExtract>
return result
|
def _get_pure(self, value):
"""Returns the Python representation of the object (usually a
list, tuple or dict) that has no instances embedded within it.
"""
result = value
if self._has_instance(value):
raise StateSetterError('Value has an instance: %s' % value)
if isinstance(value, (StateList, StateTuple)):
result = [self._get_pure(x) for x in value]
if isinstance(value, StateTuple):
result = tuple(result)
elif isinstance(value, StateDict):
result = {}
for (k, v) in value.items():
result = v
if self._has_instance(v):
raise StateSetterError('Value has an instance: %s' % v)
if isinstance(v, (StateList, StateTuple)):
result = [self._get_pure(x) for x in v]
if isinstance(v, StateTuple):
result = tuple(result)
elif isinstance(v, StateDict):
result = {}
for (k, v) in v.items():
result[k] = self._get_pure(v)
result[k] = result
return result
|
apptools
|
positive
|
@staticmethod
def load_obj(path: str, metadata: Dict[str, Union[str, int]], material_adjustments: List[Dict[str, str]], transform: Optional[Matrix]=None, parent: Optional[Entity]=None) -> List[MeshObject]:
""" Load the wavefront object file from the given path and adjust according to the given arguments.
:param path: The path to the .obj file.
:param metadata: A dict of metadata which will be written into the object's custom data.
:param material_adjustments: Adjustments to the materials which were specified inside house.json.
:param transform: The transformation that should be applied to the loaded objects.
:param parent: The parent object to which the object should be linked
:return: The list of loaded mesh objects.
"""
if not os.path.exists(path):
print(f'Warning: {path} is missing!')
return []
object_already_loaded = path in _SuncgLoader.collection_of_loaded_objs
<DeepExtract>
if not os.path.exists(path):
print(f'Warning: {path} is missing!')
loaded_objects = []
object_already_loaded = path in _SuncgLoader.collection_of_loaded_objs
loaded_objects = load_obj(filepath=path, cached_objects=_SuncgLoader.collection_of_loaded_objs)
if object_already_loaded:
print(f'Duplicate object: {path}')
for obj in loaded_objects:
obj.set_local2world_mat(Matrix([[1, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]))
obj.clear_all_cps()
for obj in loaded_objects:
for key in metadata.keys():
used_key = key
if key == 'type':
used_key = 'suncg_type'
obj.set_cp(used_key, metadata[key])
_SuncgLoader.transform_and_colorize_object(obj, material_adjustments, transform, parent)
loaded_objects = loaded_objects
</DeepExtract>
if object_already_loaded:
print(f'Duplicate object: {path}')
for obj in loaded_objects:
obj.set_local2world_mat(Matrix([[1, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]))
obj.clear_all_cps()
for obj in loaded_objects:
for key in metadata.keys():
used_key = key
if key == 'type':
used_key = 'suncg_type'
obj.set_cp(used_key, metadata[key])
_SuncgLoader.transform_and_colorize_object(obj, material_adjustments, transform, parent)
return loaded_objects
|
@staticmethod
def load_obj(path: str, metadata: Dict[str, Union[str, int]], material_adjustments: List[Dict[str, str]], transform: Optional[Matrix]=None, parent: Optional[Entity]=None) -> List[MeshObject]:
""" Load the wavefront object file from the given path and adjust according to the given arguments.
:param path: The path to the .obj file.
:param metadata: A dict of metadata which will be written into the object's custom data.
:param material_adjustments: Adjustments to the materials which were specified inside house.json.
:param transform: The transformation that should be applied to the loaded objects.
:param parent: The parent object to which the object should be linked
:return: The list of loaded mesh objects.
"""
if not os.path.exists(path):
print(f'Warning: {path} is missing!')
return []
object_already_loaded = path in _SuncgLoader.collection_of_loaded_objs
if not os.path.exists(path):
print(f'Warning: {path} is missing!')
loaded_objects = []
object_already_loaded = path in _SuncgLoader.collection_of_loaded_objs
loaded_objects = load_obj(filepath=path, cached_objects=_SuncgLoader.collection_of_loaded_objs)
if object_already_loaded:
print(f'Duplicate object: {path}')
for obj in loaded_objects:
obj.set_local2world_mat(Matrix([[1, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]))
obj.clear_all_cps()
for obj in loaded_objects:
for key in metadata.keys():
used_key = key
if key == 'type':
used_key = 'suncg_type'
obj.set_cp(used_key, metadata[key])
_SuncgLoader.transform_and_colorize_object(obj, material_adjustments, transform, parent)
loaded_objects = loaded_objects
if object_already_loaded:
print(f'Duplicate object: {path}')
for obj in loaded_objects:
obj.set_local2world_mat(Matrix([[1, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]))
obj.clear_all_cps()
for obj in loaded_objects:
for key in metadata.keys():
used_key = key
if key == 'type':
used_key = 'suncg_type'
obj.set_cp(used_key, metadata[key])
_SuncgLoader.transform_and_colorize_object(obj, material_adjustments, transform, parent)
return loaded_objects
|
BlenderProc
|
positive
|
@wraps(f)
def skip_if_lo64_(self):
<DeepExtract>
if self.conn.server_version < 90300:
(lo64, msg) = (False, "server version %s doesn't support the lo64 API" % self.conn.server_version)
if 'lo64' not in psycopg2.__version__:
(lo64, msg) = (False, "this psycopg build doesn't support the lo64 API")
(lo64, msg) = (True, 'this server and build support the lo64 API')
</DeepExtract>
if lo64:
return self.skipTest(msg)
else:
return f(self)
|
@wraps(f)
def skip_if_lo64_(self):
if self.conn.server_version < 90300:
(lo64, msg) = (False, "server version %s doesn't support the lo64 API" % self.conn.server_version)
if 'lo64' not in psycopg2.__version__:
(lo64, msg) = (False, "this psycopg build doesn't support the lo64 API")
(lo64, msg) = (True, 'this server and build support the lo64 API')
if lo64:
return self.skipTest(msg)
else:
return f(self)
|
aws-lambda-redshift-copy
|
positive
|
def testCredentials():
<DeepExtract>
address = serverAddress + '/api/user/my?incRoles=True'
if 'get' == 'get':
r = requests.get(address, auth=(username, userPassword), params={}, verify=False)
elif 'get' == 'post' and execute:
r = requests.post(address, auth=(username, userPassword), data={}, params={}, verify=False)
elif 'get' == 'put' and execute:
r = requests.put(address, auth=(username, userPassword), data={}, params={}, verify=False)
elif 'get' == 'delete' and execute:
r = requests.delete(address, auth=(username, userPassword), data={}, params={}, verify=False)
elif not execute:
r = False
else:
print('ERROR: Invalid Request type. Try again')
r = False
response = r
</DeepExtract>
content = response.text
data = json.loads(content)
if response.status_code == 200:
print('Credentials are good.')
print('\tthis user has the following roles:', data['data']['roles'])
else:
print(data)
print('Exiting, please try your credentials again.')
sys.exit()
|
def testCredentials():
address = serverAddress + '/api/user/my?incRoles=True'
if 'get' == 'get':
r = requests.get(address, auth=(username, userPassword), params={}, verify=False)
elif 'get' == 'post' and execute:
r = requests.post(address, auth=(username, userPassword), data={}, params={}, verify=False)
elif 'get' == 'put' and execute:
r = requests.put(address, auth=(username, userPassword), data={}, params={}, verify=False)
elif 'get' == 'delete' and execute:
r = requests.delete(address, auth=(username, userPassword), data={}, params={}, verify=False)
elif not execute:
r = False
else:
print('ERROR: Invalid Request type. Try again')
r = False
response = r
content = response.text
data = json.loads(content)
if response.status_code == 200:
print('Credentials are good.')
print('\tthis user has the following roles:', data['data']['roles'])
else:
print(data)
print('Exiting, please try your credentials again.')
sys.exit()
|
crashplan_api_examples
|
positive
|
def fix_onnx_model(filepath):
"""
Applies an inplace fix to ONNX file from Coach.
"""
model = onnx.load_model(filepath)
<DeepExtract>
graph_name = model.graph.output[0].name
if '_continuousppohead' in graph_name:
print('ONNX correction applied to continuous PPO agent.')
output_nodes = ppo_continuous_outputs(model)
elif '_discreteppohead' in graph_name:
print('ONNX correction applied to discrete PPO agent.')
output_nodes = ppo_discrete_outputs(model)
elif '_qhead' in graph_name:
print('ONNX correction not required for DQN agent.')
output_nodes = model.graph.output
else:
raise Exception("Can't determine the RL Agent used from the ONNX graph provided.")
</DeepExtract>
<DeepExtract>
new_graph = helper.make_graph(nodes=model.graph.node, name='new_graph', inputs=model.graph.input, outputs=output_nodes, initializer=model.graph.initializer)
checker.check_graph(new_graph)
new_model = helper.make_model(new_graph)
with open(filepath, 'wb') as file_handle:
serialized = new_model.SerializeToString()
file_handle.write(serialized)
</DeepExtract>
|
def fix_onnx_model(filepath):
"""
Applies an inplace fix to ONNX file from Coach.
"""
model = onnx.load_model(filepath)
graph_name = model.graph.output[0].name
if '_continuousppohead' in graph_name:
print('ONNX correction applied to continuous PPO agent.')
output_nodes = ppo_continuous_outputs(model)
elif '_discreteppohead' in graph_name:
print('ONNX correction applied to discrete PPO agent.')
output_nodes = ppo_discrete_outputs(model)
elif '_qhead' in graph_name:
print('ONNX correction not required for DQN agent.')
output_nodes = model.graph.output
else:
raise Exception("Can't determine the RL Agent used from the ONNX graph provided.")
new_graph = helper.make_graph(nodes=model.graph.node, name='new_graph', inputs=model.graph.input, outputs=output_nodes, initializer=model.graph.initializer)
checker.check_graph(new_graph)
new_model = helper.make_model(new_graph)
with open(filepath, 'wb') as file_handle:
serialized = new_model.SerializeToString()
file_handle.write(serialized)
</DeepExtract>
|
deepracer-local
|
positive
|
def index_file_list(root_directory, input_filename_list, compiler_args_filename, output_db_filename):
symbol_db = SymbolDatabase(output_db_filename)
symbol_db.create_data_model()
cxxd_config_parser = CxxdConfigParser(os.path.join(root_directory, '.cxxd_config.json'), root_directory)
parser = ClangParser(compiler_args_filename, TranslationUnitCache(NoCache()), cxxd_config_parser.get_clang_library_file())
with open(input_filename_list, 'r') as input_list:
for filename in input_list.readlines():
<DeepExtract>
logging.info("Indexing a file '{0}' ... ".format(filename.strip()))
tunit = parser.parse(filename.strip(), filename.strip())
if tunit:
parser.traverse(tunit.cursor, [parser, symbol_db, root_directory], indexer_visitor)
store_tunit_diagnostics(tunit.diagnostics, symbol_db, root_directory)
symbol_db.flush()
logging.info('Indexing of {0} completed.'.format(filename.strip()))
return tunit is not None
</DeepExtract>
symbol_db.close()
|
def index_file_list(root_directory, input_filename_list, compiler_args_filename, output_db_filename):
symbol_db = SymbolDatabase(output_db_filename)
symbol_db.create_data_model()
cxxd_config_parser = CxxdConfigParser(os.path.join(root_directory, '.cxxd_config.json'), root_directory)
parser = ClangParser(compiler_args_filename, TranslationUnitCache(NoCache()), cxxd_config_parser.get_clang_library_file())
with open(input_filename_list, 'r') as input_list:
for filename in input_list.readlines():
logging.info("Indexing a file '{0}' ... ".format(filename.strip()))
tunit = parser.parse(filename.strip(), filename.strip())
if tunit:
parser.traverse(tunit.cursor, [parser, symbol_db, root_directory], indexer_visitor)
store_tunit_diagnostics(tunit.diagnostics, symbol_db, root_directory)
symbol_db.flush()
logging.info('Indexing of {0} completed.'.format(filename.strip()))
return tunit is not None
symbol_db.close()
|
cxxd
|
positive
|
@pytest.mark.skipif('ethereum_optimized.frontier.state_db' not in sys.modules, reason="missing dependency (use `pip install 'ethereum[optimized]'`)")
def test_storage_key() -> None:
def actions(impl: Any) -> Any:
obj = impl.State()
impl.set_account(obj, ADDRESS_FOO, EMPTY_ACCOUNT)
impl.set_storage(obj, ADDRESS_FOO, STORAGE_FOO, U256(42))
impl.state_root(obj)
return obj
<DeepExtract>
obj = state.State()
state.set_account(obj, ADDRESS_FOO, EMPTY_ACCOUNT)
state.set_storage(obj, ADDRESS_FOO, STORAGE_FOO, U256(42))
state.state_root(obj)
state_normal = obj
</DeepExtract>
<DeepExtract>
obj = state_db.State()
state_db.set_account(obj, ADDRESS_FOO, EMPTY_ACCOUNT)
state_db.set_storage(obj, ADDRESS_FOO, STORAGE_FOO, U256(42))
state_db.state_root(obj)
state_optimized = obj
</DeepExtract>
assert state.get_storage(state_normal, ADDRESS_FOO, STORAGE_FOO) == state_db.get_storage(state_optimized, ADDRESS_FOO, STORAGE_FOO)
assert state.state_root(state_normal) == state_db.state_root(state_optimized)
|
@pytest.mark.skipif('ethereum_optimized.frontier.state_db' not in sys.modules, reason="missing dependency (use `pip install 'ethereum[optimized]'`)")
def test_storage_key() -> None:
def actions(impl: Any) -> Any:
obj = impl.State()
impl.set_account(obj, ADDRESS_FOO, EMPTY_ACCOUNT)
impl.set_storage(obj, ADDRESS_FOO, STORAGE_FOO, U256(42))
impl.state_root(obj)
return obj
obj = state.State()
state.set_account(obj, ADDRESS_FOO, EMPTY_ACCOUNT)
state.set_storage(obj, ADDRESS_FOO, STORAGE_FOO, U256(42))
state.state_root(obj)
state_normal = obj
obj = state_db.State()
state_db.set_account(obj, ADDRESS_FOO, EMPTY_ACCOUNT)
state_db.set_storage(obj, ADDRESS_FOO, STORAGE_FOO, U256(42))
state_db.state_root(obj)
state_optimized = obj
assert state.get_storage(state_normal, ADDRESS_FOO, STORAGE_FOO) == state_db.get_storage(state_optimized, ADDRESS_FOO, STORAGE_FOO)
assert state.state_root(state_normal) == state_db.state_root(state_optimized)
|
eth1.0-specs
|
positive
|
@unittest.skipUnless(django.VERSION < (5, 0), 'pytz support removed in Django 5.0')
@override_settings(TIME_ZONE='UTC')
def test_datetime_timezone_awareness(self):
(utc, tokyo) = (pytz.timezone('UTC'), pytz.timezone('Asia/Tokyo'))
reference = utc.localize(self.reference_dt)
<DeepExtract>
parsed = self.field.strptime(self.reference_str, IsoDateTimeField.ISO_8601)
</DeepExtract>
self.assertIsInstance(parsed.tzinfo, tzinfo)
self.assertEqual(parsed, reference)
reference = tokyo.localize(self.reference_dt)
with timezone.override(tokyo):
<DeepExtract>
parsed = self.field.strptime(self.reference_str, IsoDateTimeField.ISO_8601)
</DeepExtract>
self.assertIsInstance(parsed.tzinfo, tzinfo)
self.assertEqual(parsed.tzinfo.zone, tokyo.zone)
self.assertEqual(parsed, reference)
reference = utc.localize(self.reference_dt - timedelta(hours=1))
<DeepExtract>
parsed = self.field.strptime(self.reference_str + '+01:00', IsoDateTimeField.ISO_8601)
</DeepExtract>
self.assertIsInstance(parsed.tzinfo, tzinfo)
self.assertEqual(parsed, reference)
|
@unittest.skipUnless(django.VERSION < (5, 0), 'pytz support removed in Django 5.0')
@override_settings(TIME_ZONE='UTC')
def test_datetime_timezone_awareness(self):
(utc, tokyo) = (pytz.timezone('UTC'), pytz.timezone('Asia/Tokyo'))
reference = utc.localize(self.reference_dt)
parsed = self.field.strptime(self.reference_str, IsoDateTimeField.ISO_8601)
self.assertIsInstance(parsed.tzinfo, tzinfo)
self.assertEqual(parsed, reference)
reference = tokyo.localize(self.reference_dt)
with timezone.override(tokyo):
parsed = self.field.strptime(self.reference_str, IsoDateTimeField.ISO_8601)
self.assertIsInstance(parsed.tzinfo, tzinfo)
self.assertEqual(parsed.tzinfo.zone, tokyo.zone)
self.assertEqual(parsed, reference)
reference = utc.localize(self.reference_dt - timedelta(hours=1))
parsed = self.field.strptime(self.reference_str + '+01:00', IsoDateTimeField.ISO_8601)
self.assertIsInstance(parsed.tzinfo, tzinfo)
self.assertEqual(parsed, reference)
|
django-filter
|
positive
|
def handle_starttag(self, name, namespace, nsprefix, attrs):
"""Push a start tag on to the stack.
If this method returns None, the tag was rejected by the
SoupStrainer. You should proceed as if the tag had not occured
in the document. For instance, if this was a self-closing tag,
don't call handle_endtag.
"""
<DeepExtract>
if self.current_data:
current_data = ''.join(self.current_data)
if not self.preserve_whitespace_tag_stack:
strippable = True
for i in current_data:
if i not in self.ASCII_SPACES:
strippable = False
break
if strippable:
if '\n' in current_data:
current_data = '\n'
else:
current_data = ' '
self.current_data = []
if self.parse_only and len(self.tagStack) <= 1 and (not self.parse_only.text or not self.parse_only.search(current_data)):
return
o = containerClass(current_data)
self.object_was_parsed(o)
</DeepExtract>
if self.parse_only and len(self.tagStack) <= 1 and (self.parse_only.text or not self.parse_only.search_tag(name, attrs)):
return None
tag = Tag(self, self.builder, name, namespace, nsprefix, attrs, self.currentTag, self._most_recent_element)
if tag is None:
return tag
if self._most_recent_element:
self._most_recent_element.next_element = tag
self._most_recent_element = tag
<DeepExtract>
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
if tag.name in self.builder.preserve_whitespace_tags:
self.preserve_whitespace_tag_stack.append(tag)
</DeepExtract>
return tag
|
def handle_starttag(self, name, namespace, nsprefix, attrs):
"""Push a start tag on to the stack.
If this method returns None, the tag was rejected by the
SoupStrainer. You should proceed as if the tag had not occured
in the document. For instance, if this was a self-closing tag,
don't call handle_endtag.
"""
if self.current_data:
current_data = ''.join(self.current_data)
if not self.preserve_whitespace_tag_stack:
strippable = True
for i in current_data:
if i not in self.ASCII_SPACES:
strippable = False
break
if strippable:
if '\n' in current_data:
current_data = '\n'
else:
current_data = ' '
self.current_data = []
if self.parse_only and len(self.tagStack) <= 1 and (not self.parse_only.text or not self.parse_only.search(current_data)):
return
o = containerClass(current_data)
self.object_was_parsed(o)
if self.parse_only and len(self.tagStack) <= 1 and (self.parse_only.text or not self.parse_only.search_tag(name, attrs)):
return None
tag = Tag(self, self.builder, name, namespace, nsprefix, attrs, self.currentTag, self._most_recent_element)
if tag is None:
return tag
if self._most_recent_element:
self._most_recent_element.next_element = tag
self._most_recent_element = tag
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
if tag.name in self.builder.preserve_whitespace_tags:
self.preserve_whitespace_tag_stack.append(tag)
return tag
|
coursera-python-for-everybody-specialization
|
positive
|
def medium(self):
"""Set MComboBox to medium"""
<DeepExtract>
self._dayu_size = dayu_theme.medium
self.lineEdit().setProperty('dayu_size', dayu_theme.medium)
self.style().polish(self)
</DeepExtract>
return self
|
def medium(self):
"""Set MComboBox to medium"""
self._dayu_size = dayu_theme.medium
self.lineEdit().setProperty('dayu_size', dayu_theme.medium)
self.style().polish(self)
return self
|
dayu_widgets
|
positive
|
def write_update_columns(self, attribute_columns, row_check_sum):
if len(attribute_columns) != 0:
<DeepExtract>
self.output_stream.write_raw_byte(ord(TAG_ROW_DATA))
</DeepExtract>
for update_type in list(attribute_columns.keys()):
columns = attribute_columns[update_type]
for column in columns:
if isinstance(column, six.text_type) or isinstance(column, six.binary_type):
<DeepExtract>
update_type = update_type.upper()
cell_check_sum = 0
self.write_tag(TAG_CELL)
cell_check_sum = self.write_cell_name(column, cell_check_sum)
timestamp = None
if None is not None:
if isinstance(None, tuple):
if None[0] is not None:
cell_check_sum = self.write_column_value_with_checksum(None[0], cell_check_sum)
if None[1] is not None:
timestamp = None[1]
else:
cell_check_sum = self.write_column_value_with_checksum(None, cell_check_sum)
if update_type == UpdateType.DELETE:
self.write_tag(TAG_CELL_TYPE)
self.output_stream.write_raw_byte(const.DELETE_ONE_VERSION)
elif update_type == UpdateType.DELETE_ALL:
self.write_tag(TAG_CELL_TYPE)
self.output_stream.write_raw_byte(const.DELETE_ALL_VERSION)
elif update_type == UpdateType.INCREMENT:
self.write_tag(TAG_CELL_TYPE)
self.output_stream.write_raw_byte(const.INCREMENT)
if timestamp is not None:
self.write_tag(TAG_CELL_TIMESTAMP)
self.output_stream.write_raw_little_endian64(timestamp)
if timestamp is not None:
cell_check_sum = PlainBufferCrc8.crc_int64(cell_check_sum, timestamp)
if update_type == UpdateType.DELETE:
cell_check_sum = PlainBufferCrc8.crc_int8(cell_check_sum, const.DELETE_ONE_VERSION)
if update_type == UpdateType.DELETE_ALL:
cell_check_sum = PlainBufferCrc8.crc_int8(cell_check_sum, const.DELETE_ALL_VERSION)
if update_type == UpdateType.INCREMENT:
cell_check_sum = PlainBufferCrc8.crc_int8(cell_check_sum, const.INCREMENT)
self.write_tag(TAG_CELL_CHECKSUM)
self.output_stream.write_raw_byte(cell_check_sum)
row_check_sum = PlainBufferCrc8.crc_int8(row_check_sum, cell_check_sum)
row_check_sum = row_check_sum
</DeepExtract>
elif len(column) == 2:
<DeepExtract>
update_type = update_type.upper()
cell_check_sum = 0
self.write_tag(TAG_CELL)
cell_check_sum = self.write_cell_name(column[0], cell_check_sum)
timestamp = None
if (column[1], None) is not None:
if isinstance((column[1], None), tuple):
if (column[1], None)[0] is not None:
cell_check_sum = self.write_column_value_with_checksum((column[1], None)[0], cell_check_sum)
if (column[1], None)[1] is not None:
timestamp = (column[1], None)[1]
else:
cell_check_sum = self.write_column_value_with_checksum((column[1], None), cell_check_sum)
if update_type == UpdateType.DELETE:
self.write_tag(TAG_CELL_TYPE)
self.output_stream.write_raw_byte(const.DELETE_ONE_VERSION)
elif update_type == UpdateType.DELETE_ALL:
self.write_tag(TAG_CELL_TYPE)
self.output_stream.write_raw_byte(const.DELETE_ALL_VERSION)
elif update_type == UpdateType.INCREMENT:
self.write_tag(TAG_CELL_TYPE)
self.output_stream.write_raw_byte(const.INCREMENT)
if timestamp is not None:
self.write_tag(TAG_CELL_TIMESTAMP)
self.output_stream.write_raw_little_endian64(timestamp)
if timestamp is not None:
cell_check_sum = PlainBufferCrc8.crc_int64(cell_check_sum, timestamp)
if update_type == UpdateType.DELETE:
cell_check_sum = PlainBufferCrc8.crc_int8(cell_check_sum, const.DELETE_ONE_VERSION)
if update_type == UpdateType.DELETE_ALL:
cell_check_sum = PlainBufferCrc8.crc_int8(cell_check_sum, const.DELETE_ALL_VERSION)
if update_type == UpdateType.INCREMENT:
cell_check_sum = PlainBufferCrc8.crc_int8(cell_check_sum, const.INCREMENT)
self.write_tag(TAG_CELL_CHECKSUM)
self.output_stream.write_raw_byte(cell_check_sum)
row_check_sum = PlainBufferCrc8.crc_int8(row_check_sum, cell_check_sum)
row_check_sum = row_check_sum
</DeepExtract>
elif len(column) == 3:
<DeepExtract>
update_type = update_type.upper()
cell_check_sum = 0
self.write_tag(TAG_CELL)
cell_check_sum = self.write_cell_name(column[0], cell_check_sum)
timestamp = None
if (column[1], column[2]) is not None:
if isinstance((column[1], column[2]), tuple):
if (column[1], column[2])[0] is not None:
cell_check_sum = self.write_column_value_with_checksum((column[1], column[2])[0], cell_check_sum)
if (column[1], column[2])[1] is not None:
timestamp = (column[1], column[2])[1]
else:
cell_check_sum = self.write_column_value_with_checksum((column[1], column[2]), cell_check_sum)
if update_type == UpdateType.DELETE:
self.write_tag(TAG_CELL_TYPE)
self.output_stream.write_raw_byte(const.DELETE_ONE_VERSION)
elif update_type == UpdateType.DELETE_ALL:
self.write_tag(TAG_CELL_TYPE)
self.output_stream.write_raw_byte(const.DELETE_ALL_VERSION)
elif update_type == UpdateType.INCREMENT:
self.write_tag(TAG_CELL_TYPE)
self.output_stream.write_raw_byte(const.INCREMENT)
if timestamp is not None:
self.write_tag(TAG_CELL_TIMESTAMP)
self.output_stream.write_raw_little_endian64(timestamp)
if timestamp is not None:
cell_check_sum = PlainBufferCrc8.crc_int64(cell_check_sum, timestamp)
if update_type == UpdateType.DELETE:
cell_check_sum = PlainBufferCrc8.crc_int8(cell_check_sum, const.DELETE_ONE_VERSION)
if update_type == UpdateType.DELETE_ALL:
cell_check_sum = PlainBufferCrc8.crc_int8(cell_check_sum, const.DELETE_ALL_VERSION)
if update_type == UpdateType.INCREMENT:
cell_check_sum = PlainBufferCrc8.crc_int8(cell_check_sum, const.INCREMENT)
self.write_tag(TAG_CELL_CHECKSUM)
self.output_stream.write_raw_byte(cell_check_sum)
row_check_sum = PlainBufferCrc8.crc_int8(row_check_sum, cell_check_sum)
row_check_sum = row_check_sum
</DeepExtract>
else:
raise OTSClientError('Unsupported column format: ' + str(column))
return row_check_sum
|
def write_update_columns(self, attribute_columns, row_check_sum):
if len(attribute_columns) != 0:
self.output_stream.write_raw_byte(ord(TAG_ROW_DATA))
for update_type in list(attribute_columns.keys()):
columns = attribute_columns[update_type]
for column in columns:
if isinstance(column, six.text_type) or isinstance(column, six.binary_type):
update_type = update_type.upper()
cell_check_sum = 0
self.write_tag(TAG_CELL)
cell_check_sum = self.write_cell_name(column, cell_check_sum)
timestamp = None
if None is not None:
if isinstance(None, tuple):
if None[0] is not None:
cell_check_sum = self.write_column_value_with_checksum(None[0], cell_check_sum)
if None[1] is not None:
timestamp = None[1]
else:
cell_check_sum = self.write_column_value_with_checksum(None, cell_check_sum)
if update_type == UpdateType.DELETE:
self.write_tag(TAG_CELL_TYPE)
self.output_stream.write_raw_byte(const.DELETE_ONE_VERSION)
elif update_type == UpdateType.DELETE_ALL:
self.write_tag(TAG_CELL_TYPE)
self.output_stream.write_raw_byte(const.DELETE_ALL_VERSION)
elif update_type == UpdateType.INCREMENT:
self.write_tag(TAG_CELL_TYPE)
self.output_stream.write_raw_byte(const.INCREMENT)
if timestamp is not None:
self.write_tag(TAG_CELL_TIMESTAMP)
self.output_stream.write_raw_little_endian64(timestamp)
if timestamp is not None:
cell_check_sum = PlainBufferCrc8.crc_int64(cell_check_sum, timestamp)
if update_type == UpdateType.DELETE:
cell_check_sum = PlainBufferCrc8.crc_int8(cell_check_sum, const.DELETE_ONE_VERSION)
if update_type == UpdateType.DELETE_ALL:
cell_check_sum = PlainBufferCrc8.crc_int8(cell_check_sum, const.DELETE_ALL_VERSION)
if update_type == UpdateType.INCREMENT:
cell_check_sum = PlainBufferCrc8.crc_int8(cell_check_sum, const.INCREMENT)
self.write_tag(TAG_CELL_CHECKSUM)
self.output_stream.write_raw_byte(cell_check_sum)
row_check_sum = PlainBufferCrc8.crc_int8(row_check_sum, cell_check_sum)
row_check_sum = row_check_sum
elif len(column) == 2:
update_type = update_type.upper()
cell_check_sum = 0
self.write_tag(TAG_CELL)
cell_check_sum = self.write_cell_name(column[0], cell_check_sum)
timestamp = None
if (column[1], None) is not None:
if isinstance((column[1], None), tuple):
if (column[1], None)[0] is not None:
cell_check_sum = self.write_column_value_with_checksum((column[1], None)[0], cell_check_sum)
if (column[1], None)[1] is not None:
timestamp = (column[1], None)[1]
else:
cell_check_sum = self.write_column_value_with_checksum((column[1], None), cell_check_sum)
if update_type == UpdateType.DELETE:
self.write_tag(TAG_CELL_TYPE)
self.output_stream.write_raw_byte(const.DELETE_ONE_VERSION)
elif update_type == UpdateType.DELETE_ALL:
self.write_tag(TAG_CELL_TYPE)
self.output_stream.write_raw_byte(const.DELETE_ALL_VERSION)
elif update_type == UpdateType.INCREMENT:
self.write_tag(TAG_CELL_TYPE)
self.output_stream.write_raw_byte(const.INCREMENT)
if timestamp is not None:
self.write_tag(TAG_CELL_TIMESTAMP)
self.output_stream.write_raw_little_endian64(timestamp)
if timestamp is not None:
cell_check_sum = PlainBufferCrc8.crc_int64(cell_check_sum, timestamp)
if update_type == UpdateType.DELETE:
cell_check_sum = PlainBufferCrc8.crc_int8(cell_check_sum, const.DELETE_ONE_VERSION)
if update_type == UpdateType.DELETE_ALL:
cell_check_sum = PlainBufferCrc8.crc_int8(cell_check_sum, const.DELETE_ALL_VERSION)
if update_type == UpdateType.INCREMENT:
cell_check_sum = PlainBufferCrc8.crc_int8(cell_check_sum, const.INCREMENT)
self.write_tag(TAG_CELL_CHECKSUM)
self.output_stream.write_raw_byte(cell_check_sum)
row_check_sum = PlainBufferCrc8.crc_int8(row_check_sum, cell_check_sum)
row_check_sum = row_check_sum
elif len(column) == 3:
update_type = update_type.upper()
cell_check_sum = 0
self.write_tag(TAG_CELL)
cell_check_sum = self.write_cell_name(column[0], cell_check_sum)
timestamp = None
if (column[1], column[2]) is not None:
if isinstance((column[1], column[2]), tuple):
if (column[1], column[2])[0] is not None:
cell_check_sum = self.write_column_value_with_checksum((column[1], column[2])[0], cell_check_sum)
if (column[1], column[2])[1] is not None:
timestamp = (column[1], column[2])[1]
else:
cell_check_sum = self.write_column_value_with_checksum((column[1], column[2]), cell_check_sum)
if update_type == UpdateType.DELETE:
self.write_tag(TAG_CELL_TYPE)
self.output_stream.write_raw_byte(const.DELETE_ONE_VERSION)
elif update_type == UpdateType.DELETE_ALL:
self.write_tag(TAG_CELL_TYPE)
self.output_stream.write_raw_byte(const.DELETE_ALL_VERSION)
elif update_type == UpdateType.INCREMENT:
self.write_tag(TAG_CELL_TYPE)
self.output_stream.write_raw_byte(const.INCREMENT)
if timestamp is not None:
self.write_tag(TAG_CELL_TIMESTAMP)
self.output_stream.write_raw_little_endian64(timestamp)
if timestamp is not None:
cell_check_sum = PlainBufferCrc8.crc_int64(cell_check_sum, timestamp)
if update_type == UpdateType.DELETE:
cell_check_sum = PlainBufferCrc8.crc_int8(cell_check_sum, const.DELETE_ONE_VERSION)
if update_type == UpdateType.DELETE_ALL:
cell_check_sum = PlainBufferCrc8.crc_int8(cell_check_sum, const.DELETE_ALL_VERSION)
if update_type == UpdateType.INCREMENT:
cell_check_sum = PlainBufferCrc8.crc_int8(cell_check_sum, const.INCREMENT)
self.write_tag(TAG_CELL_CHECKSUM)
self.output_stream.write_raw_byte(cell_check_sum)
row_check_sum = PlainBufferCrc8.crc_int8(row_check_sum, cell_check_sum)
row_check_sum = row_check_sum
else:
raise OTSClientError('Unsupported column format: ' + str(column))
return row_check_sum
|
aliyun-tablestore-python-sdk
|
positive
|
def preprocess_dns(in_dir, out_dir='./data'):
"""Create json file from dataset folder.
Args:
in_dir (str): Location of the DNS data
out_dir (str): Where to save the json files.
"""
clean_wavs = glob.glob(os.path.join(in_dir, 'clean/*.wav'))
<DeepExtract>
clean_dic = {get_file_id(fp): fp for fp in clean_wavs}
</DeepExtract>
mix_wavs = glob.glob(os.path.join(in_dir, 'noisy/*.wav'))
<DeepExtract>
mix_dic = {get_file_id(fp): fp for fp in mix_wavs}
</DeepExtract>
noise_wavs = glob.glob(os.path.join(in_dir, 'noise/*.wav'))
<DeepExtract>
noise_dic = {get_file_id(fp): fp for fp in noise_wavs}
</DeepExtract>
assert clean_dic.keys() == mix_dic.keys() == noise_dic.keys()
file_infos = {k: dict(mix=mix_dic[k], clean=clean_dic[k], noise=noise_dic[k], snr=get_snr_from_mix_path(mix_dic[k]), file_len=len(sf.SoundFile(mix_dic[k]))) for k in clean_dic.keys()}
with open(os.path.join(out_dir, 'file_infos.json'), 'w') as f:
json.dump(file_infos, f, indent=2)
|
def preprocess_dns(in_dir, out_dir='./data'):
"""Create json file from dataset folder.
Args:
in_dir (str): Location of the DNS data
out_dir (str): Where to save the json files.
"""
clean_wavs = glob.glob(os.path.join(in_dir, 'clean/*.wav'))
clean_dic = {get_file_id(fp): fp for fp in clean_wavs}
mix_wavs = glob.glob(os.path.join(in_dir, 'noisy/*.wav'))
mix_dic = {get_file_id(fp): fp for fp in mix_wavs}
noise_wavs = glob.glob(os.path.join(in_dir, 'noise/*.wav'))
noise_dic = {get_file_id(fp): fp for fp in noise_wavs}
assert clean_dic.keys() == mix_dic.keys() == noise_dic.keys()
file_infos = {k: dict(mix=mix_dic[k], clean=clean_dic[k], noise=noise_dic[k], snr=get_snr_from_mix_path(mix_dic[k]), file_len=len(sf.SoundFile(mix_dic[k]))) for k in clean_dic.keys()}
with open(os.path.join(out_dir, 'file_infos.json'), 'w') as f:
json.dump(file_infos, f, indent=2)
|
asteroid
|
positive
|
def read_http_resp_header(self, reader):
"""read & parse http headers"""
line = reader.readline()
if line is None:
return line
line = line.strip()
if not utils.is_response(line):
return None
resp_header = HttpResponseHeader()
resp_header.status_line = line
try:
resp_header.status_code = int(line.split(' ')[1])
except:
pass
lines = [line]
<DeepExtract>
header_dict = defaultdict(str)
while True:
line = reader.readline()
if line is None:
break
line = line.strip()
if not line:
break
lines.append(line)
(key, value) = utils.parse_http_header(line)
if key is None:
continue
header_dict[key.lower()] = value
header_dict = header_dict
</DeepExtract>
if b'content-length' in header_dict:
resp_header.content_len = int(header_dict[b'content-length'])
if b'location' in header_dict:
resp_header.redirect_to = header_dict[b'location']
if b'chunked' in header_dict[b'transfer-encoding']:
resp_header.chunked = True
resp_header.content_type = header_dict[b'content-type']
resp_header.compress == utils.get_compress_type(header_dict[b'content-encoding'])
resp_header.connection_close = header_dict[b'connection'] == b'close'
resp_header.raw_data = b'\n'.join(lines)
resp_header.filename = ''
if b'content-disposition' in header_dict:
cnt_dis = header_dict[b'content-disposition']
if cnt_dis.find('filename=') > -1:
resp_header.filename = cnt_dis.split('=')[1].rstrip()
return resp_header
|
def read_http_resp_header(self, reader):
"""read & parse http headers"""
line = reader.readline()
if line is None:
return line
line = line.strip()
if not utils.is_response(line):
return None
resp_header = HttpResponseHeader()
resp_header.status_line = line
try:
resp_header.status_code = int(line.split(' ')[1])
except:
pass
lines = [line]
header_dict = defaultdict(str)
while True:
line = reader.readline()
if line is None:
break
line = line.strip()
if not line:
break
lines.append(line)
(key, value) = utils.parse_http_header(line)
if key is None:
continue
header_dict[key.lower()] = value
header_dict = header_dict
if b'content-length' in header_dict:
resp_header.content_len = int(header_dict[b'content-length'])
if b'location' in header_dict:
resp_header.redirect_to = header_dict[b'location']
if b'chunked' in header_dict[b'transfer-encoding']:
resp_header.chunked = True
resp_header.content_type = header_dict[b'content-type']
resp_header.compress == utils.get_compress_type(header_dict[b'content-encoding'])
resp_header.connection_close = header_dict[b'connection'] == b'close'
resp_header.raw_data = b'\n'.join(lines)
resp_header.filename = ''
if b'content-disposition' in header_dict:
cnt_dis = header_dict[b'content-disposition']
if cnt_dis.find('filename=') > -1:
resp_header.filename = cnt_dis.split('=')[1].rstrip()
return resp_header
|
CapTipper
|
positive
|
def test_get_data_from_database(data_object: Data):
"""
Test to ensure get data from database works appropriately.
:param data_object: Data object to leverage to test this function.
"""
<DeepExtract>
csv_data = get_csv_data(headers=False)
normalized_data = []
for data in csv_data:
split_data = data.split(',')
normalized_dict = get_normalized_data(split_data, parse_date=True)
normalized_data.append(normalized_dict)
normalized_csv_data = normalized_data
</DeepExtract>
<DeepExtract>
if os.path.isfile(DATABASE_FILE_PATH):
os.remove(DATABASE_FILE_PATH)
</DeepExtract>
data_object.create_table()
data_object.dump_to_table(normalized_csv_data)
result = data_object.get_data_from_database()
assert normalized_csv_data == result, 'Expected data to equal.'
|
def test_get_data_from_database(data_object: Data):
"""
Test to ensure get data from database works appropriately.
:param data_object: Data object to leverage to test this function.
"""
csv_data = get_csv_data(headers=False)
normalized_data = []
for data in csv_data:
split_data = data.split(',')
normalized_dict = get_normalized_data(split_data, parse_date=True)
normalized_data.append(normalized_dict)
normalized_csv_data = normalized_data
if os.path.isfile(DATABASE_FILE_PATH):
os.remove(DATABASE_FILE_PATH)
data_object.create_table()
data_object.dump_to_table(normalized_csv_data)
result = data_object.get_data_from_database()
assert normalized_csv_data == result, 'Expected data to equal.'
|
algobot
|
positive
|
def main(train_loader, test_loaders, model, logger, file_logger):
print('\nparsed options:\n{}\n'.format(vars(args)))
if args.cuda:
model.cuda()
<DeepExtract>
if args.optimizer == 'sgd':
optimizer = optim.SGD(model.features.parameters(), lr=args.lr, momentum=0.9, dampening=0.9, weight_decay=args.wd)
elif args.optimizer == 'adam':
optimizer = optim.Adam(model.features.parameters(), lr=args.lr, weight_decay=args.wd)
else:
raise Exception('Not supported optimizer: {0}'.format(args.optimizer))
optimizer1 = optimizer
</DeepExtract>
if args.resume:
if os.path.isfile(args.resume):
print('=> loading checkpoint {}'.format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
checkpoint = torch.load(args.resume)
model.load_state_dict(checkpoint['state_dict'])
else:
print('=> no checkpoint found at {}'.format(args.resume))
start = args.start_epoch
end = start + args.epochs
for epoch in range(start, end):
<DeepExtract>
model.train()
pbar = tqdm(enumerate(train_loader))
for (batch_idx, data) in pbar:
if triplet_flag:
(data_a, data_p, data_n) = data
else:
(data_a, data_p) = data
if args.cuda:
(data_a, data_p) = (data_a.cuda(), data_p.cuda())
(data_a, data_p) = (Variable(data_a), Variable(data_p))
out_a = model(data_a)
out_p = model(data_p)
if triplet_flag:
data_n = data_n.cuda()
data_n = Variable(data_n)
out_n = model(data_n)
if args.batch_reduce == 'L2Net':
loss = loss_L2Net(out_a, out_p, anchor_swap=args.anchorswap, margin=args.margin, loss_type=args.loss)
elif args.batch_reduce == 'random_global':
loss = loss_random_sampling(out_a, out_p, out_n, margin=args.margin, anchor_swap=args.anchorswap, loss_type=args.loss)
else:
loss = loss_HardNet(out_a, out_p, margin=args.margin, anchor_swap=args.anchorswap, anchor_ave=args.anchorave, batch_reduce=args.batch_reduce, loss_type=args.loss)
if args.decor:
loss += CorrelationPenaltyLoss()(out_a)
if args.gor:
loss += args.alpha * global_orthogonal_regularization(out_a, out_n)
optimizer1.zero_grad()
loss.backward()
optimizer1.step()
adjust_learning_rate(optimizer1)
if batch_idx % args.log_interval == 0:
pbar.set_description('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch_idx * len(data_a), len(train_loader.dataset), 100.0 * batch_idx / len(train_loader), loss.data[0]))
if args.enable_logging:
logger.log_value('loss', loss.data[0]).step()
try:
os.stat('{}{}'.format(args.model_dir, suffix))
except:
os.makedirs('{}{}'.format(args.model_dir, suffix))
torch.save({'epoch': epoch + 1, 'state_dict': model.state_dict()}, '{}{}/checkpoint_{}.pth'.format(args.model_dir, suffix, epoch))
</DeepExtract>
for test_loader in test_loaders:
<DeepExtract>
model.eval()
(labels, distances) = ([], [])
pbar = tqdm(enumerate(test_loader['dataloader']))
for (batch_idx, (data_a, data_p, label)) in pbar:
if args.cuda:
(data_a, data_p) = (data_a.cuda(), data_p.cuda())
(data_a, data_p, label) = (Variable(data_a, volatile=True), Variable(data_p, volatile=True), Variable(label))
out_a = model(data_a)
out_p = model(data_p)
dists = torch.sqrt(torch.sum((out_a - out_p) ** 2, 1))
distances.append(dists.data.cpu().numpy().reshape(-1, 1))
ll = label.data.cpu().numpy().reshape(-1, 1)
labels.append(ll)
if batch_idx % args.log_interval == 0:
pbar.set_description(test_loader['name'] + ' Test Epoch: {} [{}/{} ({:.0f}%)]'.format(epoch, batch_idx * len(data_a), len(test_loader['dataloader'].dataset), 100.0 * batch_idx / len(test_loader['dataloader'])))
num_tests = test_loader['dataloader'].dataset.matches.size(0)
labels = np.vstack(labels).reshape(num_tests)
distances = np.vstack(distances).reshape(num_tests)
fpr95 = ErrorRateAt95Recall(labels, 1.0 / (distances + 1e-08))
print('\x1b[91mTest set: Accuracy(FPR95): {:.8f}\n\x1b[0m'.format(fpr95))
if args.enable_logging:
logger.log_value(test_loader['name'] + ' fpr95', fpr95)
return
</DeepExtract>
if TEST_ON_W1BS:
patch_images = w1bs.get_list_of_patch_images(DATASET_DIR=args.w1bsroot.replace('/code', '/data/W1BS'))
desc_name = 'curr_desc'
DESCS_DIR = LOG_DIR + '/temp_descs/'
OUT_DIR = DESCS_DIR.replace('/temp_descs/', '/out_graphs/')
for img_fname in patch_images:
w1bs_extract_descs_and_save(img_fname, model, desc_name, cuda=args.cuda, mean_img=args.mean_image, std_img=args.std_image, out_dir=DESCS_DIR)
force_rewrite_list = [desc_name]
w1bs.match_descriptors_and_save_results(DESC_DIR=DESCS_DIR, do_rewrite=True, dist_dict={}, force_rewrite_list=force_rewrite_list)
if args.enable_logging:
w1bs.draw_and_save_plots_with_loggers(DESC_DIR=DESCS_DIR, OUT_DIR=OUT_DIR, methods=['SNN_ratio'], descs_to_draw=[desc_name], logger=file_logger, tensor_logger=logger)
else:
w1bs.draw_and_save_plots(DESC_DIR=DESCS_DIR, OUT_DIR=OUT_DIR, methods=['SNN_ratio'], descs_to_draw=[desc_name])
|
def main(train_loader, test_loaders, model, logger, file_logger):
print('\nparsed options:\n{}\n'.format(vars(args)))
if args.cuda:
model.cuda()
if args.optimizer == 'sgd':
optimizer = optim.SGD(model.features.parameters(), lr=args.lr, momentum=0.9, dampening=0.9, weight_decay=args.wd)
elif args.optimizer == 'adam':
optimizer = optim.Adam(model.features.parameters(), lr=args.lr, weight_decay=args.wd)
else:
raise Exception('Not supported optimizer: {0}'.format(args.optimizer))
optimizer1 = optimizer
if args.resume:
if os.path.isfile(args.resume):
print('=> loading checkpoint {}'.format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
checkpoint = torch.load(args.resume)
model.load_state_dict(checkpoint['state_dict'])
else:
print('=> no checkpoint found at {}'.format(args.resume))
start = args.start_epoch
end = start + args.epochs
for epoch in range(start, end):
model.train()
pbar = tqdm(enumerate(train_loader))
for (batch_idx, data) in pbar:
if triplet_flag:
(data_a, data_p, data_n) = data
else:
(data_a, data_p) = data
if args.cuda:
(data_a, data_p) = (data_a.cuda(), data_p.cuda())
(data_a, data_p) = (Variable(data_a), Variable(data_p))
out_a = model(data_a)
out_p = model(data_p)
if triplet_flag:
data_n = data_n.cuda()
data_n = Variable(data_n)
out_n = model(data_n)
if args.batch_reduce == 'L2Net':
loss = loss_L2Net(out_a, out_p, anchor_swap=args.anchorswap, margin=args.margin, loss_type=args.loss)
elif args.batch_reduce == 'random_global':
loss = loss_random_sampling(out_a, out_p, out_n, margin=args.margin, anchor_swap=args.anchorswap, loss_type=args.loss)
else:
loss = loss_HardNet(out_a, out_p, margin=args.margin, anchor_swap=args.anchorswap, anchor_ave=args.anchorave, batch_reduce=args.batch_reduce, loss_type=args.loss)
if args.decor:
loss += CorrelationPenaltyLoss()(out_a)
if args.gor:
loss += args.alpha * global_orthogonal_regularization(out_a, out_n)
optimizer1.zero_grad()
loss.backward()
optimizer1.step()
adjust_learning_rate(optimizer1)
if batch_idx % args.log_interval == 0:
pbar.set_description('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch_idx * len(data_a), len(train_loader.dataset), 100.0 * batch_idx / len(train_loader), loss.data[0]))
if args.enable_logging:
logger.log_value('loss', loss.data[0]).step()
try:
os.stat('{}{}'.format(args.model_dir, suffix))
except:
os.makedirs('{}{}'.format(args.model_dir, suffix))
torch.save({'epoch': epoch + 1, 'state_dict': model.state_dict()}, '{}{}/checkpoint_{}.pth'.format(args.model_dir, suffix, epoch))
for test_loader in test_loaders:
model.eval()
(labels, distances) = ([], [])
pbar = tqdm(enumerate(test_loader['dataloader']))
for (batch_idx, (data_a, data_p, label)) in pbar:
if args.cuda:
(data_a, data_p) = (data_a.cuda(), data_p.cuda())
(data_a, data_p, label) = (Variable(data_a, volatile=True), Variable(data_p, volatile=True), Variable(label))
out_a = model(data_a)
out_p = model(data_p)
dists = torch.sqrt(torch.sum((out_a - out_p) ** 2, 1))
distances.append(dists.data.cpu().numpy().reshape(-1, 1))
ll = label.data.cpu().numpy().reshape(-1, 1)
labels.append(ll)
if batch_idx % args.log_interval == 0:
pbar.set_description(test_loader['name'] + ' Test Epoch: {} [{}/{} ({:.0f}%)]'.format(epoch, batch_idx * len(data_a), len(test_loader['dataloader'].dataset), 100.0 * batch_idx / len(test_loader['dataloader'])))
num_tests = test_loader['dataloader'].dataset.matches.size(0)
labels = np.vstack(labels).reshape(num_tests)
distances = np.vstack(distances).reshape(num_tests)
fpr95 = ErrorRateAt95Recall(labels, 1.0 / (distances + 1e-08))
print('\x1b[91mTest set: Accuracy(FPR95): {:.8f}\n\x1b[0m'.format(fpr95))
if args.enable_logging:
logger.log_value(test_loader['name'] + ' fpr95', fpr95)
return
if TEST_ON_W1BS:
patch_images = w1bs.get_list_of_patch_images(DATASET_DIR=args.w1bsroot.replace('/code', '/data/W1BS'))
desc_name = 'curr_desc'
DESCS_DIR = LOG_DIR + '/temp_descs/'
OUT_DIR = DESCS_DIR.replace('/temp_descs/', '/out_graphs/')
for img_fname in patch_images:
w1bs_extract_descs_and_save(img_fname, model, desc_name, cuda=args.cuda, mean_img=args.mean_image, std_img=args.std_image, out_dir=DESCS_DIR)
force_rewrite_list = [desc_name]
w1bs.match_descriptors_and_save_results(DESC_DIR=DESCS_DIR, do_rewrite=True, dist_dict={}, force_rewrite_list=force_rewrite_list)
if args.enable_logging:
w1bs.draw_and_save_plots_with_loggers(DESC_DIR=DESCS_DIR, OUT_DIR=OUT_DIR, methods=['SNN_ratio'], descs_to_draw=[desc_name], logger=file_logger, tensor_logger=logger)
else:
w1bs.draw_and_save_plots(DESC_DIR=DESCS_DIR, OUT_DIR=OUT_DIR, methods=['SNN_ratio'], descs_to_draw=[desc_name])
|
CNNs-Without-Borders
|
positive
|
def _build_implemented_requirements(profile_set: Dict[str, str], responsible_roles: List[ResponsibleRole]) -> List[ImplementedRequirement]:
"""Build implemented requirements."""
implemented_requirements = []
profile_file = profile_set['profile-file']
<DeepExtract>
try:
fp = pathlib.Path(profile_file)
f = fp.open('r', encoding=const.FILE_ENCODING)
content = f.readlines()
rules = self._parse_cis_rules(content)
f.close()
except Exception:
logger.warning(f'unable to process {profile_file}')
rules = {}
rules = rules
</DeepExtract>
<DeepExtract>
controls = {}
for rule in rules.keys():
control = rules[rule][1]
if control not in controls.keys():
controls[control] = [rule]
else:
controls[control] = controls[control] + [rule]
for control in controls:
controls[control] = self._get_trimmed_rules(control, controls[control])
logger.debug(f'{control} {controls[control]}')
controls = controls
</DeepExtract>
rule_prefix = 'xccdf_org.ssgproject.content_rule_'
cac_openshift = f'{self._folder_cac}/applications/openshift'
for rule in rules:
if self._is_excluded(rule, rules[rule][1], controls):
continue
<DeepExtract>
title = None
for (path, dirs, _files) in os.walk(cac_openshift):
if rule in dirs:
folder = os.path.join(path, rule)
tpath = pathlib.Path(folder) / 'rule.yml'
fp = pathlib.Path(tpath)
f = fp.open('r', encoding=const.FILE_ENCODING)
content = f.readlines()
f.close()
for line in content:
if line.startswith('title:'):
title = line.split('title:')[1]
break
if title is None:
msg = f'unable to find "{rule}"'
logger.warning(msg)
title = 'no title'
title = title.strip().strip("'").strip('"')
logger.debug(f'{title}')
remarks = title
</DeepExtract>
prop = Property(class_='scc_goal_name_id', ns=profile_set['profile-ns'], name='XCCDF_rule', value=f'{rule_prefix}{rule}', remarks=f'{remarks}')
props = [prop]
implemented_requirement = ImplementedRequirement(uuid=f'{str(uuid.uuid4())}', control_id=f'CIS-{rules[rule][1]}', description=f'{rules[rule][2]}', props=props, responsible_roles=responsible_roles)
<DeepExtract>
set_parameter = None
for key in self._rule_to_parm_map.keys():
logger.debug(f'{key} {rule}')
if key == rule:
value = self._rule_to_parm_map[key]
remarks = value['description']
options = value['options']
default_value = options['default']
logger.debug(f'key: {key} options: {options}')
set_parameter = SetParameter(param_id=rule, values=[f'{default_value}'], remarks=remarks)
set_parameter = set_parameter
</DeepExtract>
if set_parameter is not None:
implemented_requirement.set_parameters = [set_parameter]
implemented_requirements.append(implemented_requirement)
return implemented_requirements
|
def _build_implemented_requirements(profile_set: Dict[str, str], responsible_roles: List[ResponsibleRole]) -> List[ImplementedRequirement]:
"""Build implemented requirements."""
implemented_requirements = []
profile_file = profile_set['profile-file']
try:
fp = pathlib.Path(profile_file)
f = fp.open('r', encoding=const.FILE_ENCODING)
content = f.readlines()
rules = self._parse_cis_rules(content)
f.close()
except Exception:
logger.warning(f'unable to process {profile_file}')
rules = {}
rules = rules
controls = {}
for rule in rules.keys():
control = rules[rule][1]
if control not in controls.keys():
controls[control] = [rule]
else:
controls[control] = controls[control] + [rule]
for control in controls:
controls[control] = self._get_trimmed_rules(control, controls[control])
logger.debug(f'{control} {controls[control]}')
controls = controls
rule_prefix = 'xccdf_org.ssgproject.content_rule_'
cac_openshift = f'{self._folder_cac}/applications/openshift'
for rule in rules:
if self._is_excluded(rule, rules[rule][1], controls):
continue
title = None
for (path, dirs, _files) in os.walk(cac_openshift):
if rule in dirs:
folder = os.path.join(path, rule)
tpath = pathlib.Path(folder) / 'rule.yml'
fp = pathlib.Path(tpath)
f = fp.open('r', encoding=const.FILE_ENCODING)
content = f.readlines()
f.close()
for line in content:
if line.startswith('title:'):
title = line.split('title:')[1]
break
if title is None:
msg = f'unable to find "{rule}"'
logger.warning(msg)
title = 'no title'
title = title.strip().strip("'").strip('"')
logger.debug(f'{title}')
remarks = title
prop = Property(class_='scc_goal_name_id', ns=profile_set['profile-ns'], name='XCCDF_rule', value=f'{rule_prefix}{rule}', remarks=f'{remarks}')
props = [prop]
implemented_requirement = ImplementedRequirement(uuid=f'{str(uuid.uuid4())}', control_id=f'CIS-{rules[rule][1]}', description=f'{rules[rule][2]}', props=props, responsible_roles=responsible_roles)
set_parameter = None
for key in self._rule_to_parm_map.keys():
logger.debug(f'{key} {rule}')
if key == rule:
value = self._rule_to_parm_map[key]
remarks = value['description']
options = value['options']
default_value = options['default']
logger.debug(f'key: {key} options: {options}')
set_parameter = SetParameter(param_id=rule, values=[f'{default_value}'], remarks=remarks)
set_parameter = set_parameter
if set_parameter is not None:
implemented_requirement.set_parameters = [set_parameter]
implemented_requirements.append(implemented_requirement)
return implemented_requirements
|
compliance-trestle
|
positive
|
def test_instance_fk_value(self):
<DeepExtract>
historical = self.poll.history.order_by('history_date')[0]
</DeepExtract>
original = historical.instance
self.assertEqual(original.place, self.poll.place)
|
def test_instance_fk_value(self):
historical = self.poll.history.order_by('history_date')[0]
original = historical.instance
self.assertEqual(original.place, self.poll.place)
|
django-simple-history
|
positive
|
@with_boto_retry()
def get_latest_taupage_image_id(self):
"""
Return the image id of the most recent private AMI matching the name pattern 'Taupage-AMI-*'
:return: str: image id
"""
<DeepExtract>
filters = [{'Name': 'name', 'Values': ['Taupage-AMI-*']}, {'Name': 'is-public', 'Values': ['false']}, {'Name': 'state', 'Values': ['available']}, {'Name': 'root-device-type', 'Values': ['ebs']}]
try:
response = self.client.describe_images(ExecutableUsers=['self'], Filters=filters)
except (BotoCoreError, ClientError) as e:
raise CfnSphereBotoError(e)
if not response['Images']:
raise CfnSphereException('Could not find any private and available Taupage AMI')
taupage_images = response['Images']
</DeepExtract>
return self.get_latest_image_id(taupage_images)
|
@with_boto_retry()
def get_latest_taupage_image_id(self):
"""
Return the image id of the most recent private AMI matching the name pattern 'Taupage-AMI-*'
:return: str: image id
"""
filters = [{'Name': 'name', 'Values': ['Taupage-AMI-*']}, {'Name': 'is-public', 'Values': ['false']}, {'Name': 'state', 'Values': ['available']}, {'Name': 'root-device-type', 'Values': ['ebs']}]
try:
response = self.client.describe_images(ExecutableUsers=['self'], Filters=filters)
except (BotoCoreError, ClientError) as e:
raise CfnSphereBotoError(e)
if not response['Images']:
raise CfnSphereException('Could not find any private and available Taupage AMI')
taupage_images = response['Images']
return self.get_latest_image_id(taupage_images)
|
cfn-sphere
|
positive
|
def verify_sha256(filepath: str, hash_value: str, block_size: int=4096):
"""
Verify that the target filepath has the given sha256 hash_value
Raise ValueError if False
filepath - target filepath
hash_value - hex encoded value of the hash
block_size - block size for chunked reading from file
"""
if len(hash_value) != 64:
raise ValueError(f'Invalid hash_value: len({hash_value}) != 64')
hash_value = hash_value.lower()
if not all((x in '0123456789abcdef' for x in hash_value)):
raise ValueError(f'Invalid hash_value: {hash_value} contains non-hex chars')
<DeepExtract>
sha256_hash = hashlib.sha256()
with open(filepath, 'rb') as f:
for byte_block in iter(lambda : f.read(block_size), b''):
sha256_hash.update(byte_block)
value = sha256_hash.hexdigest()
</DeepExtract>
if value != hash_value:
raise ValueError(f'sha256 hash of {filepath}: {value} != {hash_value}')
|
def verify_sha256(filepath: str, hash_value: str, block_size: int=4096):
"""
Verify that the target filepath has the given sha256 hash_value
Raise ValueError if False
filepath - target filepath
hash_value - hex encoded value of the hash
block_size - block size for chunked reading from file
"""
if len(hash_value) != 64:
raise ValueError(f'Invalid hash_value: len({hash_value}) != 64')
hash_value = hash_value.lower()
if not all((x in '0123456789abcdef' for x in hash_value)):
raise ValueError(f'Invalid hash_value: {hash_value} contains non-hex chars')
sha256_hash = hashlib.sha256()
with open(filepath, 'rb') as f:
for byte_block in iter(lambda : f.read(block_size), b''):
sha256_hash.update(byte_block)
value = sha256_hash.hexdigest()
if value != hash_value:
raise ValueError(f'sha256 hash of {filepath}: {value} != {hash_value}')
|
armory
|
positive
|
def _load_plugins(config, cfgparser):
"""Load and initialize plugins
"""
os.umask(config['umask'])
for s in cfgparser.sections():
if s in {'ENV', 'global'}:
continue
if s.startswith('/'):
menu = 'consumers'
path_chain = s.split('/')
if path_chain[-1] == '':
path_chain = path_chain[:-1]
name = tuple(path_chain)
elif s.startswith('auth:'):
menu = 'authenticators'
name = s[5:]
elif s.startswith('authz:'):
menu = 'authorizers'
name = s[6:]
elif s.startswith('store:'):
menu = 'stores'
name = s[6:]
else:
raise ValueError('Invalid section name [%s].\n' % s)
try:
<DeepExtract>
if not cfgparser.has_option(s, 'handler'):
raise ValueError('Invalid section, missing "handler"')
handler_name = cfgparser.get(s, 'handler')
hconf = {'facility_name': s}
try:
handler = _load_plugin_class(menu, handler_name)
classname = handler.__name__
hconf['facility_name'] = '%s-[%s]' % (classname, s)
except Exception as e:
raise ValueError('Invalid format for "handler" option [%r]: %s' % (e, handler_name))
if handler._options is not None:
plugin = handler(cfgparser, s)
else:
hconf.update(cfgparser.items(s))
hconf.pop('handler')
plugin = handler(hconf)
plugin.section = s
config[menu][name] = plugin
</DeepExtract>
except Exception as e:
logger.debug("Plugin '%s' failed to load.", name, exc_info=True)
raise RuntimeError(menu, name, e)
for menu in ['authenticators', 'authorizers', 'consumers', 'stores']:
plugins = config[menu]
for name in sorted(plugins):
plugin = plugins[name]
plugin.finalize_init(config, cfgparser, context=None)
|
def _load_plugins(config, cfgparser):
"""Load and initialize plugins
"""
os.umask(config['umask'])
for s in cfgparser.sections():
if s in {'ENV', 'global'}:
continue
if s.startswith('/'):
menu = 'consumers'
path_chain = s.split('/')
if path_chain[-1] == '':
path_chain = path_chain[:-1]
name = tuple(path_chain)
elif s.startswith('auth:'):
menu = 'authenticators'
name = s[5:]
elif s.startswith('authz:'):
menu = 'authorizers'
name = s[6:]
elif s.startswith('store:'):
menu = 'stores'
name = s[6:]
else:
raise ValueError('Invalid section name [%s].\n' % s)
try:
if not cfgparser.has_option(s, 'handler'):
raise ValueError('Invalid section, missing "handler"')
handler_name = cfgparser.get(s, 'handler')
hconf = {'facility_name': s}
try:
handler = _load_plugin_class(menu, handler_name)
classname = handler.__name__
hconf['facility_name'] = '%s-[%s]' % (classname, s)
except Exception as e:
raise ValueError('Invalid format for "handler" option [%r]: %s' % (e, handler_name))
if handler._options is not None:
plugin = handler(cfgparser, s)
else:
hconf.update(cfgparser.items(s))
hconf.pop('handler')
plugin = handler(hconf)
plugin.section = s
config[menu][name] = plugin
except Exception as e:
logger.debug("Plugin '%s' failed to load.", name, exc_info=True)
raise RuntimeError(menu, name, e)
for menu in ['authenticators', 'authorizers', 'consumers', 'stores']:
plugins = config[menu]
for name in sorted(plugins):
plugin = plugins[name]
plugin.finalize_init(config, cfgparser, context=None)
|
custodia
|
positive
|
def set_security_groups(load_balancer_names: List[str], security_group_ids: List[str], configuration: Configuration=None, secrets: Secrets=None) -> List[AWSResponse]:
"""
Changes the security groups for the specified load balancer(s).
This action will replace the existing security groups on an application
load balancer with the specified security groups.
Parameters:
- load_balancer_names: a list of load balancer names
- security_group_ids: a list of security group ids
returns:
[
{
'LoadBalancerArn': 'string',
'SecurityGroupIds': ['sg-0000000', 'sg-0000001']
},
...
]
"""
<DeepExtract>
try:
response = aws_client('ec2', configuration, secrets).describe_security_groups(GroupIds=security_group_ids)['SecurityGroups']
results = [r['GroupId'] for r in response]
except ClientError as e:
raise FailedActivity(e.response['Error']['Message'])
missing_sgs = [s for s in security_group_ids if s not in results]
if missing_sgs:
raise FailedActivity(f'Invalid security group id(s): {missing_sgs}')
security_group_ids = results
</DeepExtract>
client = aws_client('elbv2', configuration, secrets)
<DeepExtract>
results = {}
logger.debug(f'Searching for load balancer name(s): {load_balancer_names}.')
try:
response = client.describe_load_balancers(Names=load_balancer_names)
for lb in response['LoadBalancers']:
if lb['State']['Code'] != 'active':
raise FailedActivity('Invalid state for load balancer {}: {} is not active'.format(lb['LoadBalancerName'], lb['State']['Code']))
results.setdefault(lb['Type'], []).append(lb['LoadBalancerArn'])
results.setdefault('Names', []).append(lb['LoadBalancerName'])
except ClientError as e:
raise FailedActivity(e.response['Error']['Message'])
missing_lbs = [load_balancer for load_balancer in load_balancer_names if load_balancer not in results['Names']]
if missing_lbs:
raise FailedActivity(f'Unable to locate load balancer(s): {missing_lbs}')
if not results:
raise FailedActivity('Unable to find any load balancer(s) matching name(s): {}'.format(load_balancer_names))
load_balancers = results
</DeepExtract>
if load_balancers.get('network', []):
raise FailedActivity('Cannot change security groups of network load balancers.')
results = []
for load_balancer in load_balancers['application']:
response = client.set_security_groups(LoadBalancerArn=load_balancer, SecurityGroups=security_group_ids)
response['LoadBalancerArn'] = load_balancer
results.append(response)
return results
|
def set_security_groups(load_balancer_names: List[str], security_group_ids: List[str], configuration: Configuration=None, secrets: Secrets=None) -> List[AWSResponse]:
"""
Changes the security groups for the specified load balancer(s).
This action will replace the existing security groups on an application
load balancer with the specified security groups.
Parameters:
- load_balancer_names: a list of load balancer names
- security_group_ids: a list of security group ids
returns:
[
{
'LoadBalancerArn': 'string',
'SecurityGroupIds': ['sg-0000000', 'sg-0000001']
},
...
]
"""
try:
response = aws_client('ec2', configuration, secrets).describe_security_groups(GroupIds=security_group_ids)['SecurityGroups']
results = [r['GroupId'] for r in response]
except ClientError as e:
raise FailedActivity(e.response['Error']['Message'])
missing_sgs = [s for s in security_group_ids if s not in results]
if missing_sgs:
raise FailedActivity(f'Invalid security group id(s): {missing_sgs}')
security_group_ids = results
client = aws_client('elbv2', configuration, secrets)
results = {}
logger.debug(f'Searching for load balancer name(s): {load_balancer_names}.')
try:
response = client.describe_load_balancers(Names=load_balancer_names)
for lb in response['LoadBalancers']:
if lb['State']['Code'] != 'active':
raise FailedActivity('Invalid state for load balancer {}: {} is not active'.format(lb['LoadBalancerName'], lb['State']['Code']))
results.setdefault(lb['Type'], []).append(lb['LoadBalancerArn'])
results.setdefault('Names', []).append(lb['LoadBalancerName'])
except ClientError as e:
raise FailedActivity(e.response['Error']['Message'])
missing_lbs = [load_balancer for load_balancer in load_balancer_names if load_balancer not in results['Names']]
if missing_lbs:
raise FailedActivity(f'Unable to locate load balancer(s): {missing_lbs}')
if not results:
raise FailedActivity('Unable to find any load balancer(s) matching name(s): {}'.format(load_balancer_names))
load_balancers = results
if load_balancers.get('network', []):
raise FailedActivity('Cannot change security groups of network load balancers.')
results = []
for load_balancer in load_balancers['application']:
response = client.set_security_groups(LoadBalancerArn=load_balancer, SecurityGroups=security_group_ids)
response['LoadBalancerArn'] = load_balancer
results.append(response)
return results
|
chaostoolkit-aws
|
positive
|
def modify(self, fileobj, events, data=None):
try:
<DeepExtract>
try:
fd = _fileobj_to_fd(fileobj)
except ValueError:
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
fd = key.fd
raise
</DeepExtract>
key = self._fd_to_key[fd]
except KeyError:
raise KeyError('{!r} is not registered'.format(fileobj)) from None
if data == key.data and events == key.events:
return key
if events != key.events:
z_events = 0
if events & EVENT_READ:
z_events |= POLLIN
if events & EVENT_WRITE:
z_events |= POLLOUT
try:
self._poller.modify(fd, z_events)
except ZMQError as exc:
raise OSError(exc.errno, exc.strerror) from exc
key = key._replace(data=data, events=events)
self._fd_to_key[key.fd] = key
return key
|
def modify(self, fileobj, events, data=None):
try:
try:
fd = _fileobj_to_fd(fileobj)
except ValueError:
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
fd = key.fd
raise
key = self._fd_to_key[fd]
except KeyError:
raise KeyError('{!r} is not registered'.format(fileobj)) from None
if data == key.data and events == key.events:
return key
if events != key.events:
z_events = 0
if events & EVENT_READ:
z_events |= POLLIN
if events & EVENT_WRITE:
z_events |= POLLOUT
try:
self._poller.modify(fd, z_events)
except ZMQError as exc:
raise OSError(exc.errno, exc.strerror) from exc
key = key._replace(data=data, events=events)
self._fd_to_key[key.fd] = key
return key
|
aiozmq
|
positive
|
def run(self, video_path):
frame_path_input = os.path.join(self.output_path, 'frames-input')
frame_path_interpolated = os.path.join(self.output_path, 'frames-interpolated')
frame_path_combined = os.path.join(self.output_path, 'frames-combined')
video_path_output = os.path.join(self.output_path, 'videos-output')
if not os.path.exists(self.output_path):
os.makedirs(self.output_path)
if not os.path.exists(frame_path_input):
os.makedirs(frame_path_input)
if not os.path.exists(frame_path_interpolated):
os.makedirs(frame_path_interpolated)
if not os.path.exists(frame_path_combined):
os.makedirs(frame_path_combined)
if not os.path.exists(video_path_output):
os.makedirs(video_path_output)
timestep = self.time_step
num_frames = int(1.0 / timestep) - 1
cap = cv2.VideoCapture(video_path)
fps = cap.get(cv2.CAP_PROP_FPS)
print('Old fps (frame rate): ', fps)
times_interp = int(1.0 / timestep)
r2 = str(int(fps) * times_interp)
print('New fps (frame rate): ', r2)
out_path = video2frames(video_path, frame_path_input)
vidname = video_path.split('/')[-1].split('.')[0]
frames = sorted(glob.glob(os.path.join(out_path, '*.png')))
if self.remove_duplicates:
<DeepExtract>
def dhash(image, hash_size=8):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
resized = cv2.resize(gray, (hash_size + 1, hash_size))
diff = resized[:, 1:] > resized[:, :-1]
frames = sum([2 ** i for (i, v) in enumerate(diff.flatten()) if v])
hashes = {}
max_interp = 9
image_paths = sorted(glob.glob(os.path.join(out_path, '*.png')))
for image_path in image_paths:
image = cv2.imread(image_path)
h = dhash(image)
p = hashes.get(h, [])
p.append(image_path)
hashes[h] = p
for (h, hashed_paths) in hashes.items():
if len(hashed_paths) > 1:
first_index = int(hashed_paths[0].split('/')[-1].split('.')[-2])
last_index = int(hashed_paths[-1].split('/')[-1].split('.')[-2]) + 1
gap = 2 * (last_index - first_index) - 1
if gap > 2 * max_interp:
cut1 = len(hashed_paths) // 3
cut2 = cut1 * 2
for p in hashed_paths[1:cut1 - 1]:
os.remove(p)
for p in hashed_paths[cut1 + 1:cut2]:
os.remove(p)
for p in hashed_paths[cut2 + 1:]:
os.remove(p)
if gap > max_interp:
mid = len(hashed_paths) // 2
for p in hashed_paths[1:mid - 1]:
os.remove(p)
for p in hashed_paths[mid + 1:]:
os.remove(p)
else:
for p in hashed_paths[1:]:
os.remove(p)
frames = sorted(glob.glob(os.path.join(out_path, '*.png')))
frames = frames
</DeepExtract>
img = imread(frames[0])
int_width = img.shape[1]
int_height = img.shape[0]
channel = img.shape[2]
if not channel == 3:
return
if int_width != int_width >> 7 << 7:
int_width_pad = (int_width >> 7) + 1 << 7
padding_left = int((int_width_pad - int_width) / 2)
padding_right = int_width_pad - int_width - padding_left
else:
int_width_pad = int_width
padding_left = 32
padding_right = 32
if int_height != int_height >> 7 << 7:
int_height_pad = (int_height >> 7) + 1 << 7
padding_top = int((int_height_pad - int_height) / 2)
padding_bottom = int_height_pad - int_height - padding_top
else:
int_height_pad = int_height
padding_top = 32
padding_bottom = 32
frame_num = len(frames)
if not os.path.exists(os.path.join(frame_path_interpolated, vidname)):
os.makedirs(os.path.join(frame_path_interpolated, vidname))
if not os.path.exists(os.path.join(frame_path_combined, vidname)):
os.makedirs(os.path.join(frame_path_combined, vidname))
for i in tqdm(range(frame_num - 1)):
first = frames[i]
second = frames[i + 1]
first_index = int(first.split('/')[-1].split('.')[-2])
second_index = int(second.split('/')[-1].split('.')[-2])
img_first = imread(first)
img_second = imread(second)
'--------------Frame change test------------------------'
'-------------------------------------------------------'
X0 = img_first.astype('float32').transpose((2, 0, 1)) / 255
X1 = img_second.astype('float32').transpose((2, 0, 1)) / 255
assert X0.shape[1] == X1.shape[1]
assert X0.shape[2] == X1.shape[2]
X0 = np.pad(X0, ((0, 0), (padding_top, padding_bottom), (padding_left, padding_right)), mode='edge')
X1 = np.pad(X1, ((0, 0), (padding_top, padding_bottom), (padding_left, padding_right)), mode='edge')
X0 = np.expand_dims(X0, axis=0)
X1 = np.expand_dims(X1, axis=0)
X0 = np.expand_dims(X0, axis=0)
X1 = np.expand_dims(X1, axis=0)
X = np.concatenate((X0, X1), axis=0)
o = self.base_forward(X)
y_ = o[0]
y_ = [np.transpose(255.0 * item.clip(0, 1.0)[0, :, padding_top:padding_top + int_height, padding_left:padding_left + int_width], (1, 2, 0)) for item in y_]
if self.remove_duplicates:
num_frames = times_interp * (second_index - first_index) - 1
time_offsets = [kk * timestep for kk in range(1, 1 + num_frames, 1)]
start = times_interp * first_index + 1
for (item, time_offset) in zip(y_, time_offsets):
out_dir = os.path.join(frame_path_interpolated, vidname, '{:08d}.png'.format(start))
imsave(out_dir, np.round(item).astype(np.uint8))
start = start + 1
else:
time_offsets = [kk * timestep for kk in range(1, 1 + num_frames, 1)]
count = 1
for (item, time_offset) in zip(y_, time_offsets):
out_dir = os.path.join(frame_path_interpolated, vidname, '{:0>6d}_{:0>4d}.png'.format(i, count))
count = count + 1
imsave(out_dir, np.round(item).astype(np.uint8))
input_dir = os.path.join(frame_path_input, vidname)
interpolated_dir = os.path.join(frame_path_interpolated, vidname)
combined_dir = os.path.join(frame_path_combined, vidname)
if self.remove_duplicates:
<DeepExtract>
frames1 = sorted(glob.glob(os.path.join(input_dir, '*.png')))
frames2 = sorted(glob.glob(os.path.join(interpolated_dir, '*.png')))
num1 = len(frames1)
num2 = len(frames2)
for i in range(num1):
src = frames1[i]
index = int(src.split('/')[-1].split('.')[-2])
dst = os.path.join(combined_dir, '{:08d}.png'.format(times_interp * index))
shutil.copy2(src, dst)
for i in range(num2):
src = frames2[i]
imgname = src.split('/')[-1]
dst = os.path.join(combined_dir, imgname)
shutil.copy2(src, dst)
</DeepExtract>
else:
num_frames = int(1.0 / timestep) - 1
<DeepExtract>
frames1 = sorted(glob.glob(os.path.join(input_dir, '*.png')))
frames2 = sorted(glob.glob(os.path.join(interpolated_dir, '*.png')))
num1 = len(frames1)
num2 = len(frames2)
for i in range(num1):
src = frames1[i]
imgname = int(src.split('/')[-1].split('.')[-2])
assert i == imgname
dst = os.path.join(combined_dir, '{:08d}.png'.format(i * (num_frames + 1)))
shutil.copy2(src, dst)
if i < num1 - 1:
try:
for k in range(num_frames):
src = frames2[i * num_frames + k]
dst = os.path.join(combined_dir, '{:08d}.png'.format(i * (num_frames + 1) + k + 1))
shutil.copy2(src, dst)
except Exception as e:
print(e)
</DeepExtract>
frame_pattern_combined = os.path.join(frame_path_combined, vidname, '%08d.png')
video_pattern_output = os.path.join(video_path_output, vidname + '.mp4')
if os.path.exists(video_pattern_output):
os.remove(video_pattern_output)
frames2video(frame_pattern_combined, video_pattern_output, r2)
return (frame_pattern_combined, video_pattern_output)
|
def run(self, video_path):
frame_path_input = os.path.join(self.output_path, 'frames-input')
frame_path_interpolated = os.path.join(self.output_path, 'frames-interpolated')
frame_path_combined = os.path.join(self.output_path, 'frames-combined')
video_path_output = os.path.join(self.output_path, 'videos-output')
if not os.path.exists(self.output_path):
os.makedirs(self.output_path)
if not os.path.exists(frame_path_input):
os.makedirs(frame_path_input)
if not os.path.exists(frame_path_interpolated):
os.makedirs(frame_path_interpolated)
if not os.path.exists(frame_path_combined):
os.makedirs(frame_path_combined)
if not os.path.exists(video_path_output):
os.makedirs(video_path_output)
timestep = self.time_step
num_frames = int(1.0 / timestep) - 1
cap = cv2.VideoCapture(video_path)
fps = cap.get(cv2.CAP_PROP_FPS)
print('Old fps (frame rate): ', fps)
times_interp = int(1.0 / timestep)
r2 = str(int(fps) * times_interp)
print('New fps (frame rate): ', r2)
out_path = video2frames(video_path, frame_path_input)
vidname = video_path.split('/')[-1].split('.')[0]
frames = sorted(glob.glob(os.path.join(out_path, '*.png')))
if self.remove_duplicates:
def dhash(image, hash_size=8):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
resized = cv2.resize(gray, (hash_size + 1, hash_size))
diff = resized[:, 1:] > resized[:, :-1]
frames = sum([2 ** i for (i, v) in enumerate(diff.flatten()) if v])
hashes = {}
max_interp = 9
image_paths = sorted(glob.glob(os.path.join(out_path, '*.png')))
for image_path in image_paths:
image = cv2.imread(image_path)
h = dhash(image)
p = hashes.get(h, [])
p.append(image_path)
hashes[h] = p
for (h, hashed_paths) in hashes.items():
if len(hashed_paths) > 1:
first_index = int(hashed_paths[0].split('/')[-1].split('.')[-2])
last_index = int(hashed_paths[-1].split('/')[-1].split('.')[-2]) + 1
gap = 2 * (last_index - first_index) - 1
if gap > 2 * max_interp:
cut1 = len(hashed_paths) // 3
cut2 = cut1 * 2
for p in hashed_paths[1:cut1 - 1]:
os.remove(p)
for p in hashed_paths[cut1 + 1:cut2]:
os.remove(p)
for p in hashed_paths[cut2 + 1:]:
os.remove(p)
if gap > max_interp:
mid = len(hashed_paths) // 2
for p in hashed_paths[1:mid - 1]:
os.remove(p)
for p in hashed_paths[mid + 1:]:
os.remove(p)
else:
for p in hashed_paths[1:]:
os.remove(p)
frames = sorted(glob.glob(os.path.join(out_path, '*.png')))
frames = frames
img = imread(frames[0])
int_width = img.shape[1]
int_height = img.shape[0]
channel = img.shape[2]
if not channel == 3:
return
if int_width != int_width >> 7 << 7:
int_width_pad = (int_width >> 7) + 1 << 7
padding_left = int((int_width_pad - int_width) / 2)
padding_right = int_width_pad - int_width - padding_left
else:
int_width_pad = int_width
padding_left = 32
padding_right = 32
if int_height != int_height >> 7 << 7:
int_height_pad = (int_height >> 7) + 1 << 7
padding_top = int((int_height_pad - int_height) / 2)
padding_bottom = int_height_pad - int_height - padding_top
else:
int_height_pad = int_height
padding_top = 32
padding_bottom = 32
frame_num = len(frames)
if not os.path.exists(os.path.join(frame_path_interpolated, vidname)):
os.makedirs(os.path.join(frame_path_interpolated, vidname))
if not os.path.exists(os.path.join(frame_path_combined, vidname)):
os.makedirs(os.path.join(frame_path_combined, vidname))
for i in tqdm(range(frame_num - 1)):
first = frames[i]
second = frames[i + 1]
first_index = int(first.split('/')[-1].split('.')[-2])
second_index = int(second.split('/')[-1].split('.')[-2])
img_first = imread(first)
img_second = imread(second)
'--------------Frame change test------------------------'
'-------------------------------------------------------'
X0 = img_first.astype('float32').transpose((2, 0, 1)) / 255
X1 = img_second.astype('float32').transpose((2, 0, 1)) / 255
assert X0.shape[1] == X1.shape[1]
assert X0.shape[2] == X1.shape[2]
X0 = np.pad(X0, ((0, 0), (padding_top, padding_bottom), (padding_left, padding_right)), mode='edge')
X1 = np.pad(X1, ((0, 0), (padding_top, padding_bottom), (padding_left, padding_right)), mode='edge')
X0 = np.expand_dims(X0, axis=0)
X1 = np.expand_dims(X1, axis=0)
X0 = np.expand_dims(X0, axis=0)
X1 = np.expand_dims(X1, axis=0)
X = np.concatenate((X0, X1), axis=0)
o = self.base_forward(X)
y_ = o[0]
y_ = [np.transpose(255.0 * item.clip(0, 1.0)[0, :, padding_top:padding_top + int_height, padding_left:padding_left + int_width], (1, 2, 0)) for item in y_]
if self.remove_duplicates:
num_frames = times_interp * (second_index - first_index) - 1
time_offsets = [kk * timestep for kk in range(1, 1 + num_frames, 1)]
start = times_interp * first_index + 1
for (item, time_offset) in zip(y_, time_offsets):
out_dir = os.path.join(frame_path_interpolated, vidname, '{:08d}.png'.format(start))
imsave(out_dir, np.round(item).astype(np.uint8))
start = start + 1
else:
time_offsets = [kk * timestep for kk in range(1, 1 + num_frames, 1)]
count = 1
for (item, time_offset) in zip(y_, time_offsets):
out_dir = os.path.join(frame_path_interpolated, vidname, '{:0>6d}_{:0>4d}.png'.format(i, count))
count = count + 1
imsave(out_dir, np.round(item).astype(np.uint8))
input_dir = os.path.join(frame_path_input, vidname)
interpolated_dir = os.path.join(frame_path_interpolated, vidname)
combined_dir = os.path.join(frame_path_combined, vidname)
if self.remove_duplicates:
frames1 = sorted(glob.glob(os.path.join(input_dir, '*.png')))
frames2 = sorted(glob.glob(os.path.join(interpolated_dir, '*.png')))
num1 = len(frames1)
num2 = len(frames2)
for i in range(num1):
src = frames1[i]
index = int(src.split('/')[-1].split('.')[-2])
dst = os.path.join(combined_dir, '{:08d}.png'.format(times_interp * index))
shutil.copy2(src, dst)
for i in range(num2):
src = frames2[i]
imgname = src.split('/')[-1]
dst = os.path.join(combined_dir, imgname)
shutil.copy2(src, dst)
else:
num_frames = int(1.0 / timestep) - 1
frames1 = sorted(glob.glob(os.path.join(input_dir, '*.png')))
frames2 = sorted(glob.glob(os.path.join(interpolated_dir, '*.png')))
num1 = len(frames1)
num2 = len(frames2)
for i in range(num1):
src = frames1[i]
imgname = int(src.split('/')[-1].split('.')[-2])
assert i == imgname
dst = os.path.join(combined_dir, '{:08d}.png'.format(i * (num_frames + 1)))
shutil.copy2(src, dst)
if i < num1 - 1:
try:
for k in range(num_frames):
src = frames2[i * num_frames + k]
dst = os.path.join(combined_dir, '{:08d}.png'.format(i * (num_frames + 1) + k + 1))
shutil.copy2(src, dst)
except Exception as e:
print(e)
frame_pattern_combined = os.path.join(frame_path_combined, vidname, '%08d.png')
video_pattern_output = os.path.join(video_path_output, vidname + '.mp4')
if os.path.exists(video_pattern_output):
os.remove(video_pattern_output)
frames2video(frame_pattern_combined, video_pattern_output, r2)
return (frame_pattern_combined, video_pattern_output)
|
-AI-emmmm
|
positive
|
def load_image(image_path, target_size, intermediate_size=0, crop_percentage=87.5, data_type='uint8', convert_to_bgr=False, audit_test03=False, interpolation_method=cv2.INTER_LINEAR):
out_height = target_size
out_width = target_size
def resize_with_aspectratio(img):
(height, width, _) = img.shape
new_height = int(100.0 * out_height / crop_percentage)
new_width = int(100.0 * out_width / crop_percentage)
if height > width:
w = new_width
h = int(new_height * height / width)
else:
h = new_height
w = int(new_width * width / height)
img = cv2.resize(img, (w, h), interpolation=interpolation_method)
return img
def center_crop(img):
(height, width, _) = img.shape
left = int((width - out_width) / 2)
right = int((width + out_width) / 2)
top = int((height - out_height) / 2)
bottom = int((height + out_height) / 2)
img = img[top:bottom, left:right]
return img
img = cv2.imread(image_path)
if audit_test03:
img[:, :, 0] = 0
if len(img.shape) < 3 or img.shape[2] != 3:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
else:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
<DeepExtract>
(height, width, _) = img.shape
new_height = int(100.0 * out_height / crop_percentage)
new_width = int(100.0 * out_width / crop_percentage)
if height > width:
w = new_width
h = int(new_height * height / width)
else:
h = new_height
w = int(new_width * width / height)
img = cv2.resize(img, (w, h), interpolation=interpolation_method)
img = img
</DeepExtract>
<DeepExtract>
(height, width, _) = img.shape
left = int((width - out_width) / 2)
right = int((width + out_width) / 2)
top = int((height - out_height) / 2)
bottom = int((height + out_height) / 2)
img = img[top:bottom, left:right]
img = img
</DeepExtract>
if convert_to_bgr:
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
return img
|
def load_image(image_path, target_size, intermediate_size=0, crop_percentage=87.5, data_type='uint8', convert_to_bgr=False, audit_test03=False, interpolation_method=cv2.INTER_LINEAR):
out_height = target_size
out_width = target_size
def resize_with_aspectratio(img):
(height, width, _) = img.shape
new_height = int(100.0 * out_height / crop_percentage)
new_width = int(100.0 * out_width / crop_percentage)
if height > width:
w = new_width
h = int(new_height * height / width)
else:
h = new_height
w = int(new_width * width / height)
img = cv2.resize(img, (w, h), interpolation=interpolation_method)
return img
def center_crop(img):
(height, width, _) = img.shape
left = int((width - out_width) / 2)
right = int((width + out_width) / 2)
top = int((height - out_height) / 2)
bottom = int((height + out_height) / 2)
img = img[top:bottom, left:right]
return img
img = cv2.imread(image_path)
if audit_test03:
img[:, :, 0] = 0
if len(img.shape) < 3 or img.shape[2] != 3:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
else:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
(height, width, _) = img.shape
new_height = int(100.0 * out_height / crop_percentage)
new_width = int(100.0 * out_width / crop_percentage)
if height > width:
w = new_width
h = int(new_height * height / width)
else:
h = new_height
w = int(new_width * width / height)
img = cv2.resize(img, (w, h), interpolation=interpolation_method)
img = img
(height, width, _) = img.shape
left = int((width - out_width) / 2)
right = int((width + out_width) / 2)
top = int((height - out_height) / 2)
bottom = int((height + out_height) / 2)
img = img[top:bottom, left:right]
img = img
if convert_to_bgr:
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
return img
|
ck-env
|
positive
|
def __init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0, pad_mode='reflect', nobias=False, initialW=None, initial_bias=None, **kwargs):
super(Convolution2D, self).__init__(in_channels, out_channels, ksize, stride, 0, nobias, initialW, initial_bias, **kwargs)
<DeepExtract>
if hasattr(pad, '__getitem__'):
self.pad = pad
self.pad = [pad] * ndim
</DeepExtract>
self.pad_mode = pad_mode
|
def __init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0, pad_mode='reflect', nobias=False, initialW=None, initial_bias=None, **kwargs):
super(Convolution2D, self).__init__(in_channels, out_channels, ksize, stride, 0, nobias, initialW, initial_bias, **kwargs)
if hasattr(pad, '__getitem__'):
self.pad = pad
self.pad = [pad] * ndim
self.pad_mode = pad_mode
|
bayesian_unet
|
positive
|
def compute_latent_statistics(self):
"""
Compute latent statistics.
"""
N_class = numpy.max(self.test_codes) + 1
num_attempts = self.perturbations.shape[0]
perturbations = numpy.swapaxes(self.perturbations, 0, 1)
perturbations = perturbations.reshape((perturbations.shape[0] * perturbations.shape[1], perturbations.shape[2]))
success = numpy.swapaxes(self.success, 0, 1)
success = success.reshape(success.shape[0] * success.shape[1])
accuracy = numpy.repeat(self.accuracy, num_attempts, axis=0)
raw_overall_success = numpy.logical_and(success >= 0, accuracy)
if not numpy.any(raw_overall_success):
for n in range(len(self.norms)):
for type in ['raw_success', 'raw_iteration', 'raw_average', 'raw_latent']:
self.results[n][type] = 0
for type in ['raw_class_success', 'raw_class_average', 'raw_class_latent']:
self.results[n][type] = numpy.zeros(N_class)
if self.args.results_file:
utils.write_pickle(self.args.results_file, self.results)
log('[Testing] wrote %s' % self.args.results_file)
log('[Testing] no successful attacks found, no plots')
return
perturbation_images = numpy.repeat(self.perturbation_images, num_attempts, axis=0)
perturbation_codes = numpy.repeat(self.perturbation_codes, num_attempts, axis=0)
log('[Testing] computing nearest neighbor ...')
<DeepExtract>
fit = 100000
if self.pca is None:
self.pca = sklearn.decomposition.IncrementalPCA(n_components=20)
self.pca.fit(self.train_images[:fit])
log('[Testing] fitted PCA')
if self.neighbors is None:
data = self.pca.transform(self.train_images)
self.neighbors = sklearn.neighbors.NearestNeighbors(n_neighbors=10, algorithm='kd_tree')
self.neighbors.fit(data[:fit])
log('[Testing] fitted nearest neighbor')
data = self.pca.transform(perturbation_images)
(_, indices) = self.neighbors.kneighbors(data)
nearest_neighbors_indices = indices
</DeepExtract>
nearest_neighbors = self.train_theta[nearest_neighbors_indices[:, 0]]
<DeepExtract>
fit = 100000
if self.pca is None:
self.pca = sklearn.decomposition.IncrementalPCA(n_components=20)
self.pca.fit(self.train_images[:fit])
log('[Testing] fitted PCA')
if self.neighbors is None:
data = self.pca.transform(self.train_images)
self.neighbors = sklearn.neighbors.NearestNeighbors(n_neighbors=10, algorithm='kd_tree')
self.neighbors.fit(data[:fit])
log('[Testing] fitted nearest neighbor')
data = self.pca.transform(perturbations)
(_, indices) = self.neighbors.kneighbors(data)
perturbation_nearest_neighbor_indices = indices
</DeepExtract>
perturbation_nearest_neighbor = self.train_theta[perturbation_nearest_neighbor_indices[:, 0]]
if self.args.plot_directory and self.args.plot_manifolds and utils.display():
pure_perturbations = perturbations[raw_overall_success] - perturbation_images[raw_overall_success]
pure_perturbations_norm = numpy.linalg.norm(pure_perturbations, ord=2, axis=1)
for k in range(10):
direction = perturbation_images[raw_overall_success] - self.train_images[nearest_neighbors_indices[:, k][raw_overall_success]]
direction_norm = numpy.linalg.norm(direction, ord=2, axis=1)
dot_products = numpy.einsum('ij,ij->i', direction, pure_perturbations)
dot_product_norms = numpy.multiply(pure_perturbations_norm, direction_norm)
dot_product_norms[dot_product_norms == 0] = 1
dot_products /= dot_product_norms
dot_products = numpy.degrees(numpy.arccos(dot_products))
if dot_products.shape[0] > 0 and (not numpy.any(dot_products != dot_products)):
plot_file = os.path.join(self.args.plot_directory, 'dot_products_nn%d' % k)
plot.histogram(plot_file, dot_products, 100, title='Dot Products Between Adversarial Perturbations and Direction to Nearest Neighbor %d' % k, xlabel='Dot Product (Between Normalized Vectors)', ylabel='Count')
log('[Testing] wrote %s' % plot_file)
if self.args.plot_directory and utils.display():
iterations = success[raw_overall_success]
x = numpy.arange(numpy.max(iterations) + 1)
y = numpy.bincount(iterations)
plot_file = os.path.join(self.args.plot_directory, 'iterations')
plot.bar(plot_file, x, y, title='Distribution of Iterations of Successful Attacks', xlabel='Number of Iterations', ylabel='Count')
log('[Testing] wrote %s' % plot_file)
for n in range(len(self.norms)):
norm = self.norms[n]
delta = numpy.linalg.norm(perturbation_images - perturbations, norm, axis=1)
latent_delta = numpy.linalg.norm(nearest_neighbors - perturbation_nearest_neighbor, norm, axis=1)
if self.args.plot_directory and utils.display():
plot_file = os.path.join(self.args.plot_directory, 'distances_l%g' % norm)
plot.histogram(plot_file, delta[raw_overall_success], 50, title='Distribution of $L_{%g}$ Distances of Successful Attacks' % norm, xlabel='Distance', ylabel='Count')
log('[Testing] wrote %s' % plot_file)
N_accuracy = numpy.sum(accuracy)
self.results[n]['raw_success'] = numpy.sum(raw_overall_success) / N_accuracy
self.results[n]['raw_iteration'] = numpy.average(success[raw_overall_success])
self.results[n]['raw_average'] = numpy.average(delta[raw_overall_success]) if numpy.any(raw_overall_success) else 0
self.results[n]['raw_latent'] = numpy.average(latent_delta[raw_overall_success]) if numpy.any(raw_overall_success) else 0
raw_class_success = numpy.zeros((N_class, perturbation_images.shape[0]), bool)
self.results[n]['raw_class_success'] = numpy.zeros(N_class)
self.results[n]['raw_class_average'] = numpy.zeros(N_class)
self.results[n]['raw_class_latent'] = numpy.zeros(N_class)
for c in range(N_class):
N_samples = numpy.sum(numpy.logical_and(accuracy, perturbation_codes == c))
if N_samples <= 0:
continue
raw_class_success[c] = numpy.logical_and(raw_overall_success, perturbation_codes == c)
self.results[n]['raw_class_success'][c] = numpy.sum(raw_class_success[c]) / N_samples
if numpy.any(raw_class_success[c]):
self.results[n]['raw_class_average'][c] = numpy.average(delta[raw_class_success[c].astype(bool)])
if numpy.any(raw_class_success[c]):
self.results[n]['raw_class_latent'][c] = numpy.average(latent_delta[raw_class_success[c].astype(bool)])
if self.args.results_file:
utils.write_pickle(self.args.results_file, self.results)
log('[Testing] wrote %s' % self.args.results_file)
|
def compute_latent_statistics(self):
"""
Compute latent statistics.
"""
N_class = numpy.max(self.test_codes) + 1
num_attempts = self.perturbations.shape[0]
perturbations = numpy.swapaxes(self.perturbations, 0, 1)
perturbations = perturbations.reshape((perturbations.shape[0] * perturbations.shape[1], perturbations.shape[2]))
success = numpy.swapaxes(self.success, 0, 1)
success = success.reshape(success.shape[0] * success.shape[1])
accuracy = numpy.repeat(self.accuracy, num_attempts, axis=0)
raw_overall_success = numpy.logical_and(success >= 0, accuracy)
if not numpy.any(raw_overall_success):
for n in range(len(self.norms)):
for type in ['raw_success', 'raw_iteration', 'raw_average', 'raw_latent']:
self.results[n][type] = 0
for type in ['raw_class_success', 'raw_class_average', 'raw_class_latent']:
self.results[n][type] = numpy.zeros(N_class)
if self.args.results_file:
utils.write_pickle(self.args.results_file, self.results)
log('[Testing] wrote %s' % self.args.results_file)
log('[Testing] no successful attacks found, no plots')
return
perturbation_images = numpy.repeat(self.perturbation_images, num_attempts, axis=0)
perturbation_codes = numpy.repeat(self.perturbation_codes, num_attempts, axis=0)
log('[Testing] computing nearest neighbor ...')
fit = 100000
if self.pca is None:
self.pca = sklearn.decomposition.IncrementalPCA(n_components=20)
self.pca.fit(self.train_images[:fit])
log('[Testing] fitted PCA')
if self.neighbors is None:
data = self.pca.transform(self.train_images)
self.neighbors = sklearn.neighbors.NearestNeighbors(n_neighbors=10, algorithm='kd_tree')
self.neighbors.fit(data[:fit])
log('[Testing] fitted nearest neighbor')
data = self.pca.transform(perturbation_images)
(_, indices) = self.neighbors.kneighbors(data)
nearest_neighbors_indices = indices
nearest_neighbors = self.train_theta[nearest_neighbors_indices[:, 0]]
fit = 100000
if self.pca is None:
self.pca = sklearn.decomposition.IncrementalPCA(n_components=20)
self.pca.fit(self.train_images[:fit])
log('[Testing] fitted PCA')
if self.neighbors is None:
data = self.pca.transform(self.train_images)
self.neighbors = sklearn.neighbors.NearestNeighbors(n_neighbors=10, algorithm='kd_tree')
self.neighbors.fit(data[:fit])
log('[Testing] fitted nearest neighbor')
data = self.pca.transform(perturbations)
(_, indices) = self.neighbors.kneighbors(data)
perturbation_nearest_neighbor_indices = indices
perturbation_nearest_neighbor = self.train_theta[perturbation_nearest_neighbor_indices[:, 0]]
if self.args.plot_directory and self.args.plot_manifolds and utils.display():
pure_perturbations = perturbations[raw_overall_success] - perturbation_images[raw_overall_success]
pure_perturbations_norm = numpy.linalg.norm(pure_perturbations, ord=2, axis=1)
for k in range(10):
direction = perturbation_images[raw_overall_success] - self.train_images[nearest_neighbors_indices[:, k][raw_overall_success]]
direction_norm = numpy.linalg.norm(direction, ord=2, axis=1)
dot_products = numpy.einsum('ij,ij->i', direction, pure_perturbations)
dot_product_norms = numpy.multiply(pure_perturbations_norm, direction_norm)
dot_product_norms[dot_product_norms == 0] = 1
dot_products /= dot_product_norms
dot_products = numpy.degrees(numpy.arccos(dot_products))
if dot_products.shape[0] > 0 and (not numpy.any(dot_products != dot_products)):
plot_file = os.path.join(self.args.plot_directory, 'dot_products_nn%d' % k)
plot.histogram(plot_file, dot_products, 100, title='Dot Products Between Adversarial Perturbations and Direction to Nearest Neighbor %d' % k, xlabel='Dot Product (Between Normalized Vectors)', ylabel='Count')
log('[Testing] wrote %s' % plot_file)
if self.args.plot_directory and utils.display():
iterations = success[raw_overall_success]
x = numpy.arange(numpy.max(iterations) + 1)
y = numpy.bincount(iterations)
plot_file = os.path.join(self.args.plot_directory, 'iterations')
plot.bar(plot_file, x, y, title='Distribution of Iterations of Successful Attacks', xlabel='Number of Iterations', ylabel='Count')
log('[Testing] wrote %s' % plot_file)
for n in range(len(self.norms)):
norm = self.norms[n]
delta = numpy.linalg.norm(perturbation_images - perturbations, norm, axis=1)
latent_delta = numpy.linalg.norm(nearest_neighbors - perturbation_nearest_neighbor, norm, axis=1)
if self.args.plot_directory and utils.display():
plot_file = os.path.join(self.args.plot_directory, 'distances_l%g' % norm)
plot.histogram(plot_file, delta[raw_overall_success], 50, title='Distribution of $L_{%g}$ Distances of Successful Attacks' % norm, xlabel='Distance', ylabel='Count')
log('[Testing] wrote %s' % plot_file)
N_accuracy = numpy.sum(accuracy)
self.results[n]['raw_success'] = numpy.sum(raw_overall_success) / N_accuracy
self.results[n]['raw_iteration'] = numpy.average(success[raw_overall_success])
self.results[n]['raw_average'] = numpy.average(delta[raw_overall_success]) if numpy.any(raw_overall_success) else 0
self.results[n]['raw_latent'] = numpy.average(latent_delta[raw_overall_success]) if numpy.any(raw_overall_success) else 0
raw_class_success = numpy.zeros((N_class, perturbation_images.shape[0]), bool)
self.results[n]['raw_class_success'] = numpy.zeros(N_class)
self.results[n]['raw_class_average'] = numpy.zeros(N_class)
self.results[n]['raw_class_latent'] = numpy.zeros(N_class)
for c in range(N_class):
N_samples = numpy.sum(numpy.logical_and(accuracy, perturbation_codes == c))
if N_samples <= 0:
continue
raw_class_success[c] = numpy.logical_and(raw_overall_success, perturbation_codes == c)
self.results[n]['raw_class_success'][c] = numpy.sum(raw_class_success[c]) / N_samples
if numpy.any(raw_class_success[c]):
self.results[n]['raw_class_average'][c] = numpy.average(delta[raw_class_success[c].astype(bool)])
if numpy.any(raw_class_success[c]):
self.results[n]['raw_class_latent'][c] = numpy.average(latent_delta[raw_class_success[c].astype(bool)])
if self.args.results_file:
utils.write_pickle(self.args.results_file, self.results)
log('[Testing] wrote %s' % self.args.results_file)
|
disentangling-robustness-generalization
|
positive
|
def offload(d):
offloaded = {}
for (k, v) in d.items():
if (k in offload_keys or len(offload_keys) == 0) and isinstance(v, (dict, list)):
<DeepExtract>
key = '{}{}'.format(prefix, uuid4())
logger.info('Offloading data to S3 key %s', key)
s3.Object(bucket, key).put(Body=json.dumps(v, cls=DecimalEncoder))
offloaded[k] = 's3://{}/{}'.format(bucket, key)
</DeepExtract>
elif isinstance(v, dict):
<DeepExtract>
offloaded = {}
for (k, v) in v.items():
if (k in offload_keys or len(offload_keys) == 0) and isinstance(v, (dict, list)):
offloaded[k] = _offload_value(v)
elif isinstance(v, dict):
offloaded[k] = offload(v)
else:
offloaded[k] = v
offloaded[k] = offloaded
</DeepExtract>
else:
offloaded[k] = v
return offloaded
|
def offload(d):
offloaded = {}
for (k, v) in d.items():
if (k in offload_keys or len(offload_keys) == 0) and isinstance(v, (dict, list)):
key = '{}{}'.format(prefix, uuid4())
logger.info('Offloading data to S3 key %s', key)
s3.Object(bucket, key).put(Body=json.dumps(v, cls=DecimalEncoder))
offloaded[k] = 's3://{}/{}'.format(bucket, key)
elif isinstance(v, dict):
offloaded = {}
for (k, v) in v.items():
if (k in offload_keys or len(offload_keys) == 0) and isinstance(v, (dict, list)):
offloaded[k] = _offload_value(v)
elif isinstance(v, dict):
offloaded[k] = offload(v)
else:
offloaded[k] = v
offloaded[k] = offloaded
else:
offloaded[k] = v
return offloaded
|
amazon-s3-find-and-forget
|
positive
|
@classmethod
def parse_line(cls, line):
role = ''
<DeepExtract>
parts = line.strip().split(maxsplit=1)
while len(parts) < 2:
parts.append('')
(tag, data) = parts
</DeepExtract>
fields = []
if tag.endswith(':'):
role = tag.rstrip(':')
<DeepExtract>
parts = data.split(maxsplit=1)
while len(parts) < 2:
parts.append('')
(tag, data) = parts
</DeepExtract>
decoder = JSONDecoder()
while data:
data = data.lstrip()
try:
(decoded, end) = decoder.raw_decode(data)
except ValueError:
fields.append(data)
data = ''
else:
fields.append(decoded)
data = data[end:]
return (role, tag, fields)
|
@classmethod
def parse_line(cls, line):
role = ''
parts = line.strip().split(maxsplit=1)
while len(parts) < 2:
parts.append('')
(tag, data) = parts
fields = []
if tag.endswith(':'):
role = tag.rstrip(':')
parts = data.split(maxsplit=1)
while len(parts) < 2:
parts.append('')
(tag, data) = parts
decoder = JSONDecoder()
while data:
data = data.lstrip()
try:
(decoded, end) = decoder.raw_decode(data)
except ValueError:
fields.append(data)
data = ''
else:
fields.append(decoded)
data = data[end:]
return (role, tag, fields)
|
boltkit
|
positive
|
def get_config(self):
"""
Describes the specifics of the sensing implementation
"""
val = self.bus.read_word_data(self.device_address, self.REGISTER_CONFIG)
<DeepExtract>
if 16 <= 8:
val = val
if 16 <= 16:
val = (val & 65280) >> 8 | (val & 255) << 8
if 16 <= 32:
val = (val & 4278190080) >> 24 | (val & 16711680) >> 8 | (val & 65280) << 8 | (val & 255) << 24
raise Exception('Cannot swap endianness for length ' + 16)
</DeepExtract>
return val
|
def get_config(self):
"""
Describes the specifics of the sensing implementation
"""
val = self.bus.read_word_data(self.device_address, self.REGISTER_CONFIG)
if 16 <= 8:
val = val
if 16 <= 16:
val = (val & 65280) >> 8 | (val & 255) << 8
if 16 <= 32:
val = (val & 4278190080) >> 24 | (val & 16711680) >> 8 | (val & 65280) << 8 | (val & 255) << 24
raise Exception('Cannot swap endianness for length ' + 16)
return val
|
cyanobyte
|
positive
|
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
slug = kwargs.get(settings.PROFILE_URL_KWARG, None)
<DeepExtract>
redirect_url = _fail_provider(request, organization=slug, strength=WEAK, roledescription=roledescription)
</DeepExtract>
if redirect_url:
return redirect_or_denied(request, redirect_url, redirect_field_name=redirect_field_name, descr=_('%(auth)s is neither a manager of %(organization)s nor a manager of one of %(organization)s providers.') % {'auth': request.user, 'organization': slug})
return view_func(request, *args, **kwargs)
return _wrapped_view
|
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
slug = kwargs.get(settings.PROFILE_URL_KWARG, None)
redirect_url = _fail_provider(request, organization=slug, strength=WEAK, roledescription=roledescription)
if redirect_url:
return redirect_or_denied(request, redirect_url, redirect_field_name=redirect_field_name, descr=_('%(auth)s is neither a manager of %(organization)s nor a manager of one of %(organization)s providers.') % {'auth': request.user, 'organization': slug})
return view_func(request, *args, **kwargs)
return _wrapped_view
|
djaodjin-saas
|
positive
|
def _encode_function_score_query(self, query):
proto = search_pb2.FunctionScoreQuery()
<DeepExtract>
if isinstance(query.query, MatchQuery):
proto.query.type = search_pb2.MATCH_QUERY
proto.query.query = self._encode_match_query(query.query)
elif isinstance(query.query, MatchPhraseQuery):
proto.query.type = search_pb2.MATCH_PHRASE_QUERY
proto.query.query = self._encode_match_phase_query(query.query)
elif isinstance(query.query, TermQuery):
proto.query.type = search_pb2.TERM_QUERY
proto.query.query = self._encode_term_query(query.query)
elif isinstance(query.query, RangeQuery):
proto.query.type = search_pb2.RANGE_QUERY
proto.query.query = self._encode_range_query(query.query)
elif isinstance(query.query, PrefixQuery):
proto.query.type = search_pb2.PREFIX_QUERY
proto.query.query = self._encode_prefix_query(query.query)
elif isinstance(query.query, BoolQuery):
proto.query.type = search_pb2.BOOL_QUERY
proto.query.query = self._encode_bool_query(query.query)
elif isinstance(query.query, NestedQuery):
proto.query.type = search_pb2.NESTED_QUERY
proto.query.query = self._encode_nested_query(query.query)
elif isinstance(query.query, WildcardQuery):
proto.query.type = search_pb2.WILDCARD_QUERY
proto.query.query = self._encode_wildcard_query(query.query)
elif isinstance(query.query, MatchAllQuery):
proto.query.type = search_pb2.MATCH_ALL_QUERY
proto.query.query = self._encode_match_all_query(query.query)
elif isinstance(query.query, GeoBoundingBoxQuery):
proto.query.type = search_pb2.GEO_BOUNDING_BOX_QUERY
proto.query.query = self._encode_geo_bounding_box_query(query.query)
elif isinstance(query.query, GeoDistanceQuery):
proto.query.type = search_pb2.GEO_DISTANCE_QUERY
proto.query.query = self._encode_geo_distance_query(query.query)
elif isinstance(query.query, GeoPolygonQuery):
proto.query.type = search_pb2.GEO_POLYGON_QUERY
proto.query.query = self._encode_geo_polygon_query(query.query)
elif isinstance(query.query, TermsQuery):
proto.query.type = search_pb2.TERMS_QUERY
proto.query.query = self._encode_terms_query(query.query)
elif isinstance(query.query, FunctionScoreQuery):
proto.query.type = search_pb2.FUNCTION_SCORE_QUERY
proto.query.query = self._encode_function_score_query(query.query)
elif isinstance(query.query, ExistsQuery):
proto.query.type = search_pb2.EXISTS_QUERY
proto.query.query = self._encode_exists_query(query.query)
else:
raise OTSClientError('Invalid query type: %s' % query.query.__class__.__name__)
</DeepExtract>
<DeepExtract>
proto.field_value_factor.field_name = self._get_unicode(query.field_value_factor.field_name)
</DeepExtract>
return proto.SerializeToString()
|
def _encode_function_score_query(self, query):
proto = search_pb2.FunctionScoreQuery()
if isinstance(query.query, MatchQuery):
proto.query.type = search_pb2.MATCH_QUERY
proto.query.query = self._encode_match_query(query.query)
elif isinstance(query.query, MatchPhraseQuery):
proto.query.type = search_pb2.MATCH_PHRASE_QUERY
proto.query.query = self._encode_match_phase_query(query.query)
elif isinstance(query.query, TermQuery):
proto.query.type = search_pb2.TERM_QUERY
proto.query.query = self._encode_term_query(query.query)
elif isinstance(query.query, RangeQuery):
proto.query.type = search_pb2.RANGE_QUERY
proto.query.query = self._encode_range_query(query.query)
elif isinstance(query.query, PrefixQuery):
proto.query.type = search_pb2.PREFIX_QUERY
proto.query.query = self._encode_prefix_query(query.query)
elif isinstance(query.query, BoolQuery):
proto.query.type = search_pb2.BOOL_QUERY
proto.query.query = self._encode_bool_query(query.query)
elif isinstance(query.query, NestedQuery):
proto.query.type = search_pb2.NESTED_QUERY
proto.query.query = self._encode_nested_query(query.query)
elif isinstance(query.query, WildcardQuery):
proto.query.type = search_pb2.WILDCARD_QUERY
proto.query.query = self._encode_wildcard_query(query.query)
elif isinstance(query.query, MatchAllQuery):
proto.query.type = search_pb2.MATCH_ALL_QUERY
proto.query.query = self._encode_match_all_query(query.query)
elif isinstance(query.query, GeoBoundingBoxQuery):
proto.query.type = search_pb2.GEO_BOUNDING_BOX_QUERY
proto.query.query = self._encode_geo_bounding_box_query(query.query)
elif isinstance(query.query, GeoDistanceQuery):
proto.query.type = search_pb2.GEO_DISTANCE_QUERY
proto.query.query = self._encode_geo_distance_query(query.query)
elif isinstance(query.query, GeoPolygonQuery):
proto.query.type = search_pb2.GEO_POLYGON_QUERY
proto.query.query = self._encode_geo_polygon_query(query.query)
elif isinstance(query.query, TermsQuery):
proto.query.type = search_pb2.TERMS_QUERY
proto.query.query = self._encode_terms_query(query.query)
elif isinstance(query.query, FunctionScoreQuery):
proto.query.type = search_pb2.FUNCTION_SCORE_QUERY
proto.query.query = self._encode_function_score_query(query.query)
elif isinstance(query.query, ExistsQuery):
proto.query.type = search_pb2.EXISTS_QUERY
proto.query.query = self._encode_exists_query(query.query)
else:
raise OTSClientError('Invalid query type: %s' % query.query.__class__.__name__)
proto.field_value_factor.field_name = self._get_unicode(query.field_value_factor.field_name)
return proto.SerializeToString()
|
aliyun-tablestore-python-sdk
|
positive
|
@curry1
def make_pseudo_beta(protein, prefix=''):
"""Create pseudo-beta (alpha for glycine) position and mask."""
assert prefix in ['', 'template_']
<DeepExtract>
is_gly = tf.equal(protein['template_aatype' if prefix else 'all_atom_aatype'], residue_constants.restype_order['G'])
ca_idx = residue_constants.atom_order['CA']
cb_idx = residue_constants.atom_order['CB']
pseudo_beta = tf.where(tf.tile(is_gly[..., None], [1] * len(is_gly.shape) + [3]), protein[prefix + 'all_atom_positions'][..., ca_idx, :], protein[prefix + 'all_atom_positions'][..., cb_idx, :])
if protein['template_all_atom_masks' if prefix else 'all_atom_mask'] is not None:
pseudo_beta_mask = tf.where(is_gly, protein['template_all_atom_masks' if prefix else 'all_atom_mask'][..., ca_idx], protein['template_all_atom_masks' if prefix else 'all_atom_mask'][..., cb_idx])
pseudo_beta_mask = tf.cast(pseudo_beta_mask, tf.float32)
(protein[prefix + 'pseudo_beta'], protein[prefix + 'pseudo_beta_mask']) = (pseudo_beta, pseudo_beta_mask)
else:
(protein[prefix + 'pseudo_beta'], protein[prefix + 'pseudo_beta_mask']) = pseudo_beta
</DeepExtract>
return protein
|
@curry1
def make_pseudo_beta(protein, prefix=''):
"""Create pseudo-beta (alpha for glycine) position and mask."""
assert prefix in ['', 'template_']
is_gly = tf.equal(protein['template_aatype' if prefix else 'all_atom_aatype'], residue_constants.restype_order['G'])
ca_idx = residue_constants.atom_order['CA']
cb_idx = residue_constants.atom_order['CB']
pseudo_beta = tf.where(tf.tile(is_gly[..., None], [1] * len(is_gly.shape) + [3]), protein[prefix + 'all_atom_positions'][..., ca_idx, :], protein[prefix + 'all_atom_positions'][..., cb_idx, :])
if protein['template_all_atom_masks' if prefix else 'all_atom_mask'] is not None:
pseudo_beta_mask = tf.where(is_gly, protein['template_all_atom_masks' if prefix else 'all_atom_mask'][..., ca_idx], protein['template_all_atom_masks' if prefix else 'all_atom_mask'][..., cb_idx])
pseudo_beta_mask = tf.cast(pseudo_beta_mask, tf.float32)
(protein[prefix + 'pseudo_beta'], protein[prefix + 'pseudo_beta_mask']) = (pseudo_beta, pseudo_beta_mask)
else:
(protein[prefix + 'pseudo_beta'], protein[prefix + 'pseudo_beta_mask']) = pseudo_beta
return protein
|
alphafold
|
positive
|
def _install_custom(p, pkg_to_group=None):
if pkg_to_group is None:
pkg_config = get_config_file(env, 'custom.yaml').base
(packages, pkg_to_group) = _yaml_to_packages(pkg_config, None)
<DeepExtract>
try:
mod_name = pkg_to_group[p] if p in pkg_to_group else p
env.logger.debug('Importing module cloudbio.custom.%s' % mod_name)
mod = __import__('cloudbio.custom.%s' % mod_name, fromlist=['cloudbio', 'custom'])
except ImportError:
raise ImportError('Need to write module cloudbio.custom.%s' % pkg_to_group[p])
replace_chars = ['-']
try:
for to_replace in replace_chars:
p = p.replace(to_replace, '_')
env.logger.debug('Looking for custom install function %s.install_%s' % (mod.__name__, p))
fn = getattr(mod, 'install_%s' % p)
except AttributeError:
raise ImportError('Need to write a install_%s function in custom.%s' % (p, pkg_to_group[p]))
fn = fn
</DeepExtract>
fn(env)
|
def _install_custom(p, pkg_to_group=None):
if pkg_to_group is None:
pkg_config = get_config_file(env, 'custom.yaml').base
(packages, pkg_to_group) = _yaml_to_packages(pkg_config, None)
try:
mod_name = pkg_to_group[p] if p in pkg_to_group else p
env.logger.debug('Importing module cloudbio.custom.%s' % mod_name)
mod = __import__('cloudbio.custom.%s' % mod_name, fromlist=['cloudbio', 'custom'])
except ImportError:
raise ImportError('Need to write module cloudbio.custom.%s' % pkg_to_group[p])
replace_chars = ['-']
try:
for to_replace in replace_chars:
p = p.replace(to_replace, '_')
env.logger.debug('Looking for custom install function %s.install_%s' % (mod.__name__, p))
fn = getattr(mod, 'install_%s' % p)
except AttributeError:
raise ImportError('Need to write a install_%s function in custom.%s' % (p, pkg_to_group[p]))
fn = fn
fn(env)
|
cloudbiolinux
|
positive
|
def _repr_attrs(self, obj, level: int) -> str:
represented_attrs = []
for attr in self._iter_attrs(obj, obj.REPR_SQL_ATTR_SORT_FIRST if hasattr(obj, 'REPR_SQL_ATTR_SORT_FIRST') else []):
<DeepExtract>
(attr_name, attr_value) = attr
if hasattr(attr_value, 'isoformat'):
represented_attr = '%s=%r' % (attr_name, attr_value.isoformat())
elif isinstance(attr.__class__, sqlalchemy.ext.declarative.DeclarativeMeta):
represented_attr = self.repr_Base(attr, level)
else:
represented_attr = '%s=%s' % (attr_name, self.repr1(attr_value, level - 1))
</DeepExtract>
represented_attrs.append(represented_attr)
return ', '.join(represented_attrs)
|
def _repr_attrs(self, obj, level: int) -> str:
represented_attrs = []
for attr in self._iter_attrs(obj, obj.REPR_SQL_ATTR_SORT_FIRST if hasattr(obj, 'REPR_SQL_ATTR_SORT_FIRST') else []):
(attr_name, attr_value) = attr
if hasattr(attr_value, 'isoformat'):
represented_attr = '%s=%r' % (attr_name, attr_value.isoformat())
elif isinstance(attr.__class__, sqlalchemy.ext.declarative.DeclarativeMeta):
represented_attr = self.repr_Base(attr, level)
else:
represented_attr = '%s=%s' % (attr_name, self.repr1(attr_value, level - 1))
represented_attrs.append(represented_attr)
return ', '.join(represented_attrs)
|
benji
|
positive
|
def _prologue(self):
if self._with_start_end:
(startgcode, endgcode, variables) = conveyor.util.get_start_end_variables(self._profile, self._slicer_settings, self._material, False)
with tempfile.NamedTemporaryFile(suffix='.gcode', delete=False) as startfp:
self._tmp_startpath = startfp.name
for line in startgcode:
print(line, file=startfp)
with tempfile.NamedTemporaryFile(suffix='.gcode', delete=False) as endfp:
self._tmp_endpath = endfp.name
for line in endgcode:
print(line, file=endfp)
with tempfile.NamedTemporaryFile(suffix='.config', delete=False) as configfp:
self._tmp_configpath = configfp.name
if None is self._slicer_settings.path:
<DeepExtract>
config_file = self._getconfig_file()
with open(config_file) as fp:
config = conveyor.json.load(fp)
config['infillDensity'] = self._slicer_settings.infill
config['numberOfShells'] = self._slicer_settings.shells
config['rapidMoveFeedRateXY'] = self._slicer_settings.travel_speed
config['doRaft'] = self._slicer_settings.raft
config['doSupport'] = self._slicer_settings.support
config['doFanCommand'] = 'PLA' == self._material
config['layerHeight'] = self._slicer_settings.layer_height
config['defaultExtruder'] = int(self._slicer_settings.extruder)
config['extrusionProfiles']['insets']['feedrate'] = self._slicer_settings.print_speed
config['extrusionProfiles']['infill']['feedrate'] = self._slicer_settings.print_speed
if self._slicer_settings.raft:
raftLayers = config['raftLayers']
config['fanLayer'] = raftLayers
if self._dualstrusion:
config['doPutModelOnPlatform'] = False
config['startGcode'] = None
config['endGcode'] = None
config = config
</DeepExtract>
s = json.dumps(config)
self._log.debug('miracle grue configuration: %s', s)
with open(self._tmp_configpath, 'w') as configfp:
json.dump(config, configfp, indent=8)
else:
import shutil
shutil.copy2(self._slicer_settings.path, self._tmp_configpath)
self._log.debug('using miracle grue configuration at %s', self._slicer_settings.path)
|
def _prologue(self):
if self._with_start_end:
(startgcode, endgcode, variables) = conveyor.util.get_start_end_variables(self._profile, self._slicer_settings, self._material, False)
with tempfile.NamedTemporaryFile(suffix='.gcode', delete=False) as startfp:
self._tmp_startpath = startfp.name
for line in startgcode:
print(line, file=startfp)
with tempfile.NamedTemporaryFile(suffix='.gcode', delete=False) as endfp:
self._tmp_endpath = endfp.name
for line in endgcode:
print(line, file=endfp)
with tempfile.NamedTemporaryFile(suffix='.config', delete=False) as configfp:
self._tmp_configpath = configfp.name
if None is self._slicer_settings.path:
config_file = self._getconfig_file()
with open(config_file) as fp:
config = conveyor.json.load(fp)
config['infillDensity'] = self._slicer_settings.infill
config['numberOfShells'] = self._slicer_settings.shells
config['rapidMoveFeedRateXY'] = self._slicer_settings.travel_speed
config['doRaft'] = self._slicer_settings.raft
config['doSupport'] = self._slicer_settings.support
config['doFanCommand'] = 'PLA' == self._material
config['layerHeight'] = self._slicer_settings.layer_height
config['defaultExtruder'] = int(self._slicer_settings.extruder)
config['extrusionProfiles']['insets']['feedrate'] = self._slicer_settings.print_speed
config['extrusionProfiles']['infill']['feedrate'] = self._slicer_settings.print_speed
if self._slicer_settings.raft:
raftLayers = config['raftLayers']
config['fanLayer'] = raftLayers
if self._dualstrusion:
config['doPutModelOnPlatform'] = False
config['startGcode'] = None
config['endGcode'] = None
config = config
s = json.dumps(config)
self._log.debug('miracle grue configuration: %s', s)
with open(self._tmp_configpath, 'w') as configfp:
json.dump(config, configfp, indent=8)
else:
import shutil
shutil.copy2(self._slicer_settings.path, self._tmp_configpath)
self._log.debug('using miracle grue configuration at %s', self._slicer_settings.path)
|
conveyor
|
positive
|
def generate_doc(dir_name, vasprun_files, outcar_files):
"""
Adapted from matgendb.creator.generate_doc
"""
try:
fullpath = os.path.abspath(dir_name)
d = jsanitize(self.additional_fields, strict=True)
d['schema'] = {'code': 'atomate', 'version': VaspDrone.__version__}
d['dir_name'] = fullpath
d['calcs_reversed'] = [self.process_vasprun(dir_name, taskname, filename) for (taskname, filename) in vasprun_files.items()]
outcar_data = [Outcar(os.path.join(dir_name, filename)).as_dict() for (taskname, filename) in outcar_files.items()]
run_stats = {}
for (i, d_calc) in enumerate(d['calcs_reversed']):
run_stats[d_calc['task']['name']] = outcar_data[i].pop('run_stats')
if d_calc.get('output'):
d_calc['output'].update({'outcar': outcar_data[i]})
else:
d_calc['output'] = {'outcar': outcar_data[i]}
try:
overall_run_stats = {}
for key in ['Total CPU time used (sec)', 'User time (sec)', 'System time (sec)', 'Elapsed time (sec)']:
overall_run_stats[key] = sum((v[key] for v in run_stats.values()))
run_stats['overall'] = overall_run_stats
except Exception:
logger.error(f'Bad run stats for {fullpath}.')
d['run_stats'] = run_stats
d['calcs_reversed'].reverse()
d_calc_init = d['calcs_reversed'][-1]
d_calc_final = d['calcs_reversed'][0]
d['chemsys'] = '-'.join(sorted(d_calc_final['elements']))
comp = Composition(d_calc_final['composition_unit_cell'])
d['formula_anonymous'] = comp.anonymized_formula
d['formula_reduced_abc'] = comp.reduced_composition.alphabetical_formula
for root_key in ['completed_at', 'nsites', 'composition_unit_cell', 'composition_reduced', 'formula_pretty', 'elements', 'nelements']:
d[root_key] = d_calc_final[root_key]
xc = d_calc_init['input']['incar'].get('GGA')
if xc:
xc = xc.upper()
p = d_calc_init['input']['potcar_type'][0].split('_')
pot_type = p[0]
functional = 'lda' if len(pot_type) == 1 else '_'.join(p[1:])
d['input'] = {'structure': d_calc_init['input']['structure'], 'is_hubbard': d_calc_init.pop('is_hubbard'), 'hubbards': d_calc_init.pop('hubbards'), 'is_lasph': d_calc_init['input']['incar'].get('LASPH', False), 'potcar_spec': d_calc_init['input'].get('potcar_spec'), 'xc_override': xc, 'pseudo_potential': {'functional': functional.lower(), 'pot_type': pot_type.lower(), 'labels': d_calc_init['input']['potcar']}, 'parameters': d_calc_init['input']['parameters'], 'incar': d_calc_init['input']['incar']}
d['output'] = {'structure': d_calc_final['output']['structure'], 'density': d_calc_final.pop('density'), 'energy': d_calc_final['output']['energy'], 'energy_per_atom': d_calc_final['output']['energy_per_atom'], 'forces': d_calc_final['output']['ionic_steps'][-1].get('forces'), 'stress': d_calc_final['output']['ionic_steps'][-1].get('stress')}
if len(d_calc_final['output']['outcar']['magnetization']) != 0:
magmoms = [m['tot'] for m in d_calc_final['output']['outcar']['magnetization']]
s = Structure.from_dict(d['output']['structure'])
s.add_site_property('magmom', magmoms)
d['output']['structure'] = s.as_dict()
calc = d['calcs_reversed'][0]
d['output'].update({'bandgap': calc['output']['bandgap'], 'cbm': calc['output']['cbm'], 'vbm': calc['output']['vbm'], 'is_gap_direct': calc['output']['is_gap_direct']})
try:
d['output'].update({'is_metal': calc['output']['is_metal']})
if not calc['output']['is_gap_direct']:
d['output']['direct_gap'] = calc['output']['direct_gap']
if 'transition' in calc['output']:
d['output']['transition'] = calc['output']['transition']
except Exception:
if self.bandstructure_mode is True:
logger.error(traceback.format_exc())
logger.error('Error in ' + os.path.abspath(dir_name) + '.\n' + traceback.format_exc())
raise
sg = SpacegroupAnalyzer(Structure.from_dict(d_calc_final['output']['structure']), 0.1)
if not sg.get_symmetry_dataset():
sg = SpacegroupAnalyzer(Structure.from_dict(d_calc_final['output']['structure']), 0.001, 1)
d['output']['spacegroup'] = {'source': 'spglib', 'symbol': sg.get_space_group_symbol(), 'number': sg.get_space_group_number(), 'point_group': sg.get_point_group_symbol(), 'crystal_system': sg.get_crystal_system(), 'hall': sg.get_hall()}
if d['input']['parameters'].get('LEPSILON'):
for k in ['epsilon_static', 'epsilon_static_wolfe', 'epsilon_ionic']:
d['output'][k] = d_calc_final['output'][k]
if SymmOp.inversion() not in sg.get_symmetry_operations():
for k in ['piezo_ionic_tensor', 'piezo_tensor']:
d['output'][k] = d_calc_final['output']['outcar'][k]
if d['input']['parameters'].get('LOPTICS'):
for k in ['optical_absorption_coeff', 'dielectric']:
d['output'][k] = d_calc_final['output'][k]
if d['input']['incar'].get('ALGO') == 'CHI':
for k in ['optical_absorption_coeff', 'dielectric']:
d['output'][k] = d_calc_final['output'][k]
d['state'] = 'successful' if d_calc['has_vasp_completed'] else 'unsuccessful'
<DeepExtract>
initial_vol = d['input']['structure']['lattice']['volume']
final_vol = d['output']['structure']['lattice']['volume']
delta_vol = final_vol - initial_vol
percent_delta_vol = 100 * delta_vol / initial_vol
warning_msgs = []
error_msgs = []
if abs(percent_delta_vol) > volume_change_threshold:
warning_msgs.append(f'Volume change > {volume_change_threshold * 100}%')
max_force = None
calc = d['calcs_reversed'][0]
if d['state'] == 'successful':
if 'forces' in calc['output']['ionic_steps'][-1]:
forces = np.array(calc['output']['ionic_steps'][-1]['forces'])
final_structure = Structure.from_dict(calc['output']['structure'])
sdyn = final_structure.site_properties.get('selective_dynamics')
if sdyn:
forces[np.logical_not(sdyn)] = 0
max_force = max(np.linalg.norm(forces, axis=1))
if calc['input']['parameters'].get('NSW', 0) > 0:
drift = calc['output']['outcar'].get('drift', [[0, 0, 0]])
max_drift = max((np.linalg.norm(d) for d in drift))
ediffg = calc['input']['parameters'].get('EDIFFG', None)
if ediffg and float(ediffg) < 0:
desired_force_convergence = -float(ediffg)
else:
desired_force_convergence = np.inf
if max_drift > desired_force_convergence:
warning_msgs.append(f'Drift ({drift}) > desired force convergence ({desired_force_convergence}), structure likely not converged to desired accuracy.')
s = Structure.from_dict(d['output']['structure'])
if not s.is_valid():
error_msgs.append('Bad structure (atoms are too close!)')
d['state'] = 'error'
d['analysis'] = {'delta_volume': delta_vol, 'delta_volume_as_percent': percent_delta_vol, 'max_force': max_force, 'warnings': warning_msgs, 'errors': error_msgs}
</DeepExtract>
d['last_updated'] = datetime.datetime.utcnow()
return d
except Exception:
logger.error(traceback.format_exc())
logger.error('Error in ' + os.path.abspath(dir_name) + '.\n' + traceback.format_exc())
raise
|
def generate_doc(dir_name, vasprun_files, outcar_files):
"""
Adapted from matgendb.creator.generate_doc
"""
try:
fullpath = os.path.abspath(dir_name)
d = jsanitize(self.additional_fields, strict=True)
d['schema'] = {'code': 'atomate', 'version': VaspDrone.__version__}
d['dir_name'] = fullpath
d['calcs_reversed'] = [self.process_vasprun(dir_name, taskname, filename) for (taskname, filename) in vasprun_files.items()]
outcar_data = [Outcar(os.path.join(dir_name, filename)).as_dict() for (taskname, filename) in outcar_files.items()]
run_stats = {}
for (i, d_calc) in enumerate(d['calcs_reversed']):
run_stats[d_calc['task']['name']] = outcar_data[i].pop('run_stats')
if d_calc.get('output'):
d_calc['output'].update({'outcar': outcar_data[i]})
else:
d_calc['output'] = {'outcar': outcar_data[i]}
try:
overall_run_stats = {}
for key in ['Total CPU time used (sec)', 'User time (sec)', 'System time (sec)', 'Elapsed time (sec)']:
overall_run_stats[key] = sum((v[key] for v in run_stats.values()))
run_stats['overall'] = overall_run_stats
except Exception:
logger.error(f'Bad run stats for {fullpath}.')
d['run_stats'] = run_stats
d['calcs_reversed'].reverse()
d_calc_init = d['calcs_reversed'][-1]
d_calc_final = d['calcs_reversed'][0]
d['chemsys'] = '-'.join(sorted(d_calc_final['elements']))
comp = Composition(d_calc_final['composition_unit_cell'])
d['formula_anonymous'] = comp.anonymized_formula
d['formula_reduced_abc'] = comp.reduced_composition.alphabetical_formula
for root_key in ['completed_at', 'nsites', 'composition_unit_cell', 'composition_reduced', 'formula_pretty', 'elements', 'nelements']:
d[root_key] = d_calc_final[root_key]
xc = d_calc_init['input']['incar'].get('GGA')
if xc:
xc = xc.upper()
p = d_calc_init['input']['potcar_type'][0].split('_')
pot_type = p[0]
functional = 'lda' if len(pot_type) == 1 else '_'.join(p[1:])
d['input'] = {'structure': d_calc_init['input']['structure'], 'is_hubbard': d_calc_init.pop('is_hubbard'), 'hubbards': d_calc_init.pop('hubbards'), 'is_lasph': d_calc_init['input']['incar'].get('LASPH', False), 'potcar_spec': d_calc_init['input'].get('potcar_spec'), 'xc_override': xc, 'pseudo_potential': {'functional': functional.lower(), 'pot_type': pot_type.lower(), 'labels': d_calc_init['input']['potcar']}, 'parameters': d_calc_init['input']['parameters'], 'incar': d_calc_init['input']['incar']}
d['output'] = {'structure': d_calc_final['output']['structure'], 'density': d_calc_final.pop('density'), 'energy': d_calc_final['output']['energy'], 'energy_per_atom': d_calc_final['output']['energy_per_atom'], 'forces': d_calc_final['output']['ionic_steps'][-1].get('forces'), 'stress': d_calc_final['output']['ionic_steps'][-1].get('stress')}
if len(d_calc_final['output']['outcar']['magnetization']) != 0:
magmoms = [m['tot'] for m in d_calc_final['output']['outcar']['magnetization']]
s = Structure.from_dict(d['output']['structure'])
s.add_site_property('magmom', magmoms)
d['output']['structure'] = s.as_dict()
calc = d['calcs_reversed'][0]
d['output'].update({'bandgap': calc['output']['bandgap'], 'cbm': calc['output']['cbm'], 'vbm': calc['output']['vbm'], 'is_gap_direct': calc['output']['is_gap_direct']})
try:
d['output'].update({'is_metal': calc['output']['is_metal']})
if not calc['output']['is_gap_direct']:
d['output']['direct_gap'] = calc['output']['direct_gap']
if 'transition' in calc['output']:
d['output']['transition'] = calc['output']['transition']
except Exception:
if self.bandstructure_mode is True:
logger.error(traceback.format_exc())
logger.error('Error in ' + os.path.abspath(dir_name) + '.\n' + traceback.format_exc())
raise
sg = SpacegroupAnalyzer(Structure.from_dict(d_calc_final['output']['structure']), 0.1)
if not sg.get_symmetry_dataset():
sg = SpacegroupAnalyzer(Structure.from_dict(d_calc_final['output']['structure']), 0.001, 1)
d['output']['spacegroup'] = {'source': 'spglib', 'symbol': sg.get_space_group_symbol(), 'number': sg.get_space_group_number(), 'point_group': sg.get_point_group_symbol(), 'crystal_system': sg.get_crystal_system(), 'hall': sg.get_hall()}
if d['input']['parameters'].get('LEPSILON'):
for k in ['epsilon_static', 'epsilon_static_wolfe', 'epsilon_ionic']:
d['output'][k] = d_calc_final['output'][k]
if SymmOp.inversion() not in sg.get_symmetry_operations():
for k in ['piezo_ionic_tensor', 'piezo_tensor']:
d['output'][k] = d_calc_final['output']['outcar'][k]
if d['input']['parameters'].get('LOPTICS'):
for k in ['optical_absorption_coeff', 'dielectric']:
d['output'][k] = d_calc_final['output'][k]
if d['input']['incar'].get('ALGO') == 'CHI':
for k in ['optical_absorption_coeff', 'dielectric']:
d['output'][k] = d_calc_final['output'][k]
d['state'] = 'successful' if d_calc['has_vasp_completed'] else 'unsuccessful'
initial_vol = d['input']['structure']['lattice']['volume']
final_vol = d['output']['structure']['lattice']['volume']
delta_vol = final_vol - initial_vol
percent_delta_vol = 100 * delta_vol / initial_vol
warning_msgs = []
error_msgs = []
if abs(percent_delta_vol) > volume_change_threshold:
warning_msgs.append(f'Volume change > {volume_change_threshold * 100}%')
max_force = None
calc = d['calcs_reversed'][0]
if d['state'] == 'successful':
if 'forces' in calc['output']['ionic_steps'][-1]:
forces = np.array(calc['output']['ionic_steps'][-1]['forces'])
final_structure = Structure.from_dict(calc['output']['structure'])
sdyn = final_structure.site_properties.get('selective_dynamics')
if sdyn:
forces[np.logical_not(sdyn)] = 0
max_force = max(np.linalg.norm(forces, axis=1))
if calc['input']['parameters'].get('NSW', 0) > 0:
drift = calc['output']['outcar'].get('drift', [[0, 0, 0]])
max_drift = max((np.linalg.norm(d) for d in drift))
ediffg = calc['input']['parameters'].get('EDIFFG', None)
if ediffg and float(ediffg) < 0:
desired_force_convergence = -float(ediffg)
else:
desired_force_convergence = np.inf
if max_drift > desired_force_convergence:
warning_msgs.append(f'Drift ({drift}) > desired force convergence ({desired_force_convergence}), structure likely not converged to desired accuracy.')
s = Structure.from_dict(d['output']['structure'])
if not s.is_valid():
error_msgs.append('Bad structure (atoms are too close!)')
d['state'] = 'error'
d['analysis'] = {'delta_volume': delta_vol, 'delta_volume_as_percent': percent_delta_vol, 'max_force': max_force, 'warnings': warning_msgs, 'errors': error_msgs}
d['last_updated'] = datetime.datetime.utcnow()
return d
except Exception:
logger.error(traceback.format_exc())
logger.error('Error in ' + os.path.abspath(dir_name) + '.\n' + traceback.format_exc())
raise
|
atomate
|
positive
|
def test_get_default_security_group_config_should_set_proper_values_to_model():
<DeepExtract>
cluster_model = dict_to_objdict({'kind': 'epiphany-cluster', 'provider': 'aws', 'specification': {'name': 'TestCluster', 'prefix': 'prefix', 'cloud': {'vnet_address_pool': '10.20.0.0/22', 'network': {'use_network_security_groups': True}, 'default_os_image': 'default', 'use_public_ips': True}}})
cluster_model = cluster_model
</DeepExtract>
vpc_config = dict_to_objdict({'specification': {'name': 'prefix-testcluster-vpc'}})
builder = InfrastructureBuilder([cluster_model])
actual = builder.get_default_security_group_config(vpc_config)
assert actual.specification.vpc_name == 'prefix-testcluster-vpc'
|
def test_get_default_security_group_config_should_set_proper_values_to_model():
cluster_model = dict_to_objdict({'kind': 'epiphany-cluster', 'provider': 'aws', 'specification': {'name': 'TestCluster', 'prefix': 'prefix', 'cloud': {'vnet_address_pool': '10.20.0.0/22', 'network': {'use_network_security_groups': True}, 'default_os_image': 'default', 'use_public_ips': True}}})
cluster_model = cluster_model
vpc_config = dict_to_objdict({'specification': {'name': 'prefix-testcluster-vpc'}})
builder = InfrastructureBuilder([cluster_model])
actual = builder.get_default_security_group_config(vpc_config)
assert actual.specification.vpc_name == 'prefix-testcluster-vpc'
|
epiphany
|
positive
|
def test_flops():
from fblib.util.model_resources.flops import compute_gflops
<DeepExtract>
model = ResNeXt(ResNeXtBottleneck, layers=[3, 4, 6, 3], n_classes=21, nInputChannels=nInputChannels, cardinality=cardinality, **kwargs)
if 'imagenet' == 'imagenet':
model_full = resnext_imagenet.resnext50_32x4d(pretrained=True)
model.load_pretrained(model_full, nInputChannels=nInputChannels)
elif 'imagenet' == 'scratch':
print('Training from scratch')
else:
raise NotImplementedError('Select imagenet or scratch for pre-training')
net = model
</DeepExtract>
print('GFLOPS: {}'.format(compute_gflops(net, (2, 3, 256, 256))))
|
def test_flops():
from fblib.util.model_resources.flops import compute_gflops
model = ResNeXt(ResNeXtBottleneck, layers=[3, 4, 6, 3], n_classes=21, nInputChannels=nInputChannels, cardinality=cardinality, **kwargs)
if 'imagenet' == 'imagenet':
model_full = resnext_imagenet.resnext50_32x4d(pretrained=True)
model.load_pretrained(model_full, nInputChannels=nInputChannels)
elif 'imagenet' == 'scratch':
print('Training from scratch')
else:
raise NotImplementedError('Select imagenet or scratch for pre-training')
net = model
print('GFLOPS: {}'.format(compute_gflops(net, (2, 3, 256, 256))))
|
astmt
|
positive
|
def backup_txs(txs: Sequence[Tuple[Tx, bytes]], is_unspendable: Callable[[bytes], bool]):
undo_info = self.db.read_undo_info(self.height)
if undo_info is None:
raise ChainError(f'no undo information found for height {self.height:,d}')
n = len(undo_info)
put_utxo = self.utxo_cache.__setitem__
spend_utxo = self.spend_utxo
touched = self.touched
undo_entry_len = HASHX_LEN + TXNUM_LEN + 8
for (tx, tx_hash) in reversed(txs):
for (idx, txout) in enumerate(tx.outputs):
if is_unspendable(txout.pk_script):
continue
<DeepExtract>
idx_packed = pack_le_uint32(idx)
cache_value = self.utxo_cache.pop(tx_hash + idx_packed, None)
if cache_value:
cache_value = cache_value
txnum_padding = bytes(8 - TXNUM_LEN)
prefix = b'h' + tx_hash[:COMP_TXID_LEN] + idx_packed
candidates = {db_key: hashX for (db_key, hashX) in self.db.utxo_db.iterator(prefix=prefix)}
for (hdb_key, hashX) in candidates.items():
tx_num_packed = hdb_key[-TXNUM_LEN:]
if len(candidates) > 1:
(tx_num,) = unpack_le_uint64(tx_num_packed + txnum_padding)
(hash, _height) = self.db.fs_tx_hash(tx_num)
if hash != tx_hash:
assert hash is not None
continue
udb_key = b'u' + hashX + hdb_key[-4 - TXNUM_LEN:]
utxo_value_packed = self.db.utxo_db.get(udb_key)
if utxo_value_packed:
self.db_deletes.append(hdb_key)
self.db_deletes.append(udb_key)
cache_value = hashX + tx_num_packed + utxo_value_packed
raise ChainError(f'UTXO {hash_to_hex_str(tx_hash)} / {idx:,d} not found in "h" table')
</DeepExtract>
hashX = cache_value[:HASHX_LEN]
touched.add(hashX)
for txin in reversed(tx.inputs):
if txin.is_generation():
continue
n -= undo_entry_len
undo_item = undo_info[n:n + undo_entry_len]
put_utxo(txin.prev_hash + pack_le_uint32(txin.prev_idx), undo_item)
hashX = undo_item[:HASHX_LEN]
touched.add(hashX)
assert n == 0
self.tx_count -= len(txs)
|
def backup_txs(txs: Sequence[Tuple[Tx, bytes]], is_unspendable: Callable[[bytes], bool]):
undo_info = self.db.read_undo_info(self.height)
if undo_info is None:
raise ChainError(f'no undo information found for height {self.height:,d}')
n = len(undo_info)
put_utxo = self.utxo_cache.__setitem__
spend_utxo = self.spend_utxo
touched = self.touched
undo_entry_len = HASHX_LEN + TXNUM_LEN + 8
for (tx, tx_hash) in reversed(txs):
for (idx, txout) in enumerate(tx.outputs):
if is_unspendable(txout.pk_script):
continue
idx_packed = pack_le_uint32(idx)
cache_value = self.utxo_cache.pop(tx_hash + idx_packed, None)
if cache_value:
cache_value = cache_value
txnum_padding = bytes(8 - TXNUM_LEN)
prefix = b'h' + tx_hash[:COMP_TXID_LEN] + idx_packed
candidates = {db_key: hashX for (db_key, hashX) in self.db.utxo_db.iterator(prefix=prefix)}
for (hdb_key, hashX) in candidates.items():
tx_num_packed = hdb_key[-TXNUM_LEN:]
if len(candidates) > 1:
(tx_num,) = unpack_le_uint64(tx_num_packed + txnum_padding)
(hash, _height) = self.db.fs_tx_hash(tx_num)
if hash != tx_hash:
assert hash is not None
continue
udb_key = b'u' + hashX + hdb_key[-4 - TXNUM_LEN:]
utxo_value_packed = self.db.utxo_db.get(udb_key)
if utxo_value_packed:
self.db_deletes.append(hdb_key)
self.db_deletes.append(udb_key)
cache_value = hashX + tx_num_packed + utxo_value_packed
raise ChainError(f'UTXO {hash_to_hex_str(tx_hash)} / {idx:,d} not found in "h" table')
hashX = cache_value[:HASHX_LEN]
touched.add(hashX)
for txin in reversed(tx.inputs):
if txin.is_generation():
continue
n -= undo_entry_len
undo_item = undo_info[n:n + undo_entry_len]
put_utxo(txin.prev_hash + pack_le_uint32(txin.prev_idx), undo_item)
hashX = undo_item[:HASHX_LEN]
touched.add(hashX)
assert n == 0
self.tx_count -= len(txs)
|
electrumx
|
positive
|
def test_fire_alarm_query_matched_string(self):
<DeepExtract>
alarm_id = kwargs.get('id') or uuidutils.generate_uuid()
alarm = models.Alarm(name=kwargs.get('name', alarm_id), type='event', enabled=True, alarm_id=alarm_id, description='desc', state=kwargs.get('state', 'insufficient data'), state_reason='reason', severity='critical', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, ok_actions=[], insufficient_data_actions=[], alarm_actions=[], repeat_actions=kwargs.get('repeat', False), user_id='user', project_id=kwargs.get('project', ''), time_constraints=[], rule=dict(event_type=kwargs.get('event_type', '*'), query=kwargs.get('query', [])))
</DeepExtract>
<DeepExtract>
event = {'message_id': kwargs.get('id') or uuidutils.generate_uuid(), 'event_type': kwargs.get('event_type', 'type0'), 'traits': kwargs.get('traits', [])}
</DeepExtract>
<DeepExtract>
self._setup_alarm_storage([alarm])
self._setup_alarm_notifier()
self.evaluator.evaluate_events([event])
if expect_db_queries is not None:
expected = [mock.call(enabled=True, type='event', project_id=p) for p in expect_db_queries]
self.assertEqual(expected, self.storage_conn.get_alarms.call_args_list)
if {alarm.alarm_id: evaluator.ALARM} is not None:
for (alarm_id, state) in {alarm.alarm_id: evaluator.ALARM}.items():
self.assertEqual(state, self._stored_alarms[alarm_id].state)
if [alarm] is not None:
self.assertEqual(len([alarm]), len(self._update_history))
for (alarm, h) in zip([alarm], self._update_history):
expected = dict(alarm_id=alarm.alarm_id, state=evaluator.ALARM)
self.assertEqual(expected, h)
if [dict(alarm=alarm, event=event)] is not None:
self.assertEqual(len([dict(alarm=alarm, event=event)]), len(self._notification_history))
for (n, h) in zip([dict(alarm=alarm, event=event)], self._notification_history):
alarm = n['alarm']
event = n['event']
previous = n.get('previous', evaluator.UNKNOWN)
reason = 'Event <id=%(e)s,event_type=%(type)s> hits the query <query=%(query)s>.' % {'e': event['message_id'], 'type': event['event_type'], 'query': json.dumps(alarm.rule['query'], sort_keys=True)}
data = {'type': 'event', 'event': event}
expected = dict(alarm_id=alarm.alarm_id, state=evaluator.ALARM, previous=previous, reason=reason, data=data)
self.assertEqual(expected, h)
</DeepExtract>
|
def test_fire_alarm_query_matched_string(self):
alarm_id = kwargs.get('id') or uuidutils.generate_uuid()
alarm = models.Alarm(name=kwargs.get('name', alarm_id), type='event', enabled=True, alarm_id=alarm_id, description='desc', state=kwargs.get('state', 'insufficient data'), state_reason='reason', severity='critical', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, ok_actions=[], insufficient_data_actions=[], alarm_actions=[], repeat_actions=kwargs.get('repeat', False), user_id='user', project_id=kwargs.get('project', ''), time_constraints=[], rule=dict(event_type=kwargs.get('event_type', '*'), query=kwargs.get('query', [])))
event = {'message_id': kwargs.get('id') or uuidutils.generate_uuid(), 'event_type': kwargs.get('event_type', 'type0'), 'traits': kwargs.get('traits', [])}
self._setup_alarm_storage([alarm])
self._setup_alarm_notifier()
self.evaluator.evaluate_events([event])
if expect_db_queries is not None:
expected = [mock.call(enabled=True, type='event', project_id=p) for p in expect_db_queries]
self.assertEqual(expected, self.storage_conn.get_alarms.call_args_list)
if {alarm.alarm_id: evaluator.ALARM} is not None:
for (alarm_id, state) in {alarm.alarm_id: evaluator.ALARM}.items():
self.assertEqual(state, self._stored_alarms[alarm_id].state)
if [alarm] is not None:
self.assertEqual(len([alarm]), len(self._update_history))
for (alarm, h) in zip([alarm], self._update_history):
expected = dict(alarm_id=alarm.alarm_id, state=evaluator.ALARM)
self.assertEqual(expected, h)
if [dict(alarm=alarm, event=event)] is not None:
self.assertEqual(len([dict(alarm=alarm, event=event)]), len(self._notification_history))
for (n, h) in zip([dict(alarm=alarm, event=event)], self._notification_history):
alarm = n['alarm']
event = n['event']
previous = n.get('previous', evaluator.UNKNOWN)
reason = 'Event <id=%(e)s,event_type=%(type)s> hits the query <query=%(query)s>.' % {'e': event['message_id'], 'type': event['event_type'], 'query': json.dumps(alarm.rule['query'], sort_keys=True)}
data = {'type': 'event', 'event': event}
expected = dict(alarm_id=alarm.alarm_id, state=evaluator.ALARM, previous=previous, reason=reason, data=data)
self.assertEqual(expected, h)
</DeepExtract>
|
aodh
|
positive
|
def converged(self):
"""Function that checks the convergence of the min. surrogate model."""
<DeepExtract>
forces_flatten = -np.array([self.list_gradients[-1]])
list_fmax = np.zeros((len(np.array([self.list_gradients[-1]])), 1))
j = 0
for i in forces_flatten:
atoms_forces_i = np.reshape(i, (-1, 3))
list_fmax[j] = np.max(np.sqrt(np.sum(atoms_forces_i ** 2, axis=1)))
j = j + 1
self.list_fmax = list_fmax
</DeepExtract>
self.max_abs_forces = np.max(np.abs(self.list_fmax))
if self.max_abs_forces < self.fmax:
parprint('Congratulations. Structural optimization has converged.')
parprint('All the evaluated structures can be found in:', self.filename)
print_cite_mlmin()
return True
return False
|
def converged(self):
"""Function that checks the convergence of the min. surrogate model."""
forces_flatten = -np.array([self.list_gradients[-1]])
list_fmax = np.zeros((len(np.array([self.list_gradients[-1]])), 1))
j = 0
for i in forces_flatten:
atoms_forces_i = np.reshape(i, (-1, 3))
list_fmax[j] = np.max(np.sqrt(np.sum(atoms_forces_i ** 2, axis=1)))
j = j + 1
self.list_fmax = list_fmax
self.max_abs_forces = np.max(np.abs(self.list_fmax))
if self.max_abs_forces < self.fmax:
parprint('Congratulations. Structural optimization has converged.')
parprint('All the evaluated structures can be found in:', self.filename)
print_cite_mlmin()
return True
return False
|
CatLearn
|
positive
|
def step(self, closure=None):
"""Performs a single optimization step.
In automatic mode also updates SWA running averages.
"""
<DeepExtract>
if self.swa_lr is None:
return
for param_group in self.param_groups:
if param_group['step_counter'] >= self.swa_start:
param_group['lr'] = self.swa_lr
</DeepExtract>
loss = self.optimizer.step(closure)
for group in self.param_groups:
group['step_counter'] += 1
steps = group['step_counter']
if self._auto_mode:
if steps > self.swa_start and steps % self.swa_freq == 0:
<DeepExtract>
for p in group['params']:
param_state = self.state[p]
if 'swa_buffer' not in param_state:
param_state['swa_buffer'] = torch.zeros_like(p.data)
buf = param_state['swa_buffer']
virtual_decay = 1 / float(group['n_avg'] + 1)
diff = (p.data - buf) * virtual_decay
buf.add_(diff)
group['n_avg'] += 1
</DeepExtract>
return loss
|
def step(self, closure=None):
"""Performs a single optimization step.
In automatic mode also updates SWA running averages.
"""
if self.swa_lr is None:
return
for param_group in self.param_groups:
if param_group['step_counter'] >= self.swa_start:
param_group['lr'] = self.swa_lr
loss = self.optimizer.step(closure)
for group in self.param_groups:
group['step_counter'] += 1
steps = group['step_counter']
if self._auto_mode:
if steps > self.swa_start and steps % self.swa_freq == 0:
for p in group['params']:
param_state = self.state[p]
if 'swa_buffer' not in param_state:
param_state['swa_buffer'] = torch.zeros_like(p.data)
buf = param_state['swa_buffer']
virtual_decay = 1 / float(group['n_avg'] + 1)
diff = (p.data - buf) * virtual_decay
buf.add_(diff)
group['n_avg'] += 1
return loss
|
elektronn3
|
positive
|
def _transfer_resnet101(src, dst):
dst.conv1.W.data[:] = src.conv1.W.data
dst.bn1.avg_mean[:] = src.bn_conv1.avg_mean
dst.bn1.avg_var[:] = src.bn_conv1.avg_var
dst.bn1.gamma.data[:] = src.scale_conv1.W.data
dst.bn1.beta.data[:] = src.scale_conv1.bias.b.data
<DeepExtract>
_transfer_bottleneckA(src, dst.res2.a, ['2a', '2b', '2c'][0])
for (i, name) in enumerate(['2a', '2b', '2c'][1:]):
dst_bottleneckB = getattr(dst.res2, 'b{}'.format(i + 1))
_transfer_bottleneckB(src, dst_bottleneckB, name)
</DeepExtract>
<DeepExtract>
_transfer_bottleneckA(src, dst.res3.a, ['3a', '3b1', '3b2', '3b3'][0])
for (i, name) in enumerate(['3a', '3b1', '3b2', '3b3'][1:]):
dst_bottleneckB = getattr(dst.res3, 'b{}'.format(i + 1))
_transfer_bottleneckB(src, dst_bottleneckB, name)
</DeepExtract>
<DeepExtract>
_transfer_bottleneckA(src, dst.res4.a, ['4a'] + ['4b{}'.format(i) for i in range(1, 23)][0])
for (i, name) in enumerate(['4a'] + ['4b{}'.format(i) for i in range(1, 23)][1:]):
dst_bottleneckB = getattr(dst.res4, 'b{}'.format(i + 1))
_transfer_bottleneckB(src, dst_bottleneckB, name)
</DeepExtract>
<DeepExtract>
_transfer_bottleneckA(src, dst.res5.a, ['5a', '5b', '5c'][0])
for (i, name) in enumerate(['5a', '5b', '5c'][1:]):
dst_bottleneckB = getattr(dst.res5, 'b{}'.format(i + 1))
_transfer_bottleneckB(src, dst_bottleneckB, name)
</DeepExtract>
dst.fc6.W.data[:] = src.fc1000.W.data
dst.fc6.b.data[:] = src.fc1000.b.data
|
def _transfer_resnet101(src, dst):
dst.conv1.W.data[:] = src.conv1.W.data
dst.bn1.avg_mean[:] = src.bn_conv1.avg_mean
dst.bn1.avg_var[:] = src.bn_conv1.avg_var
dst.bn1.gamma.data[:] = src.scale_conv1.W.data
dst.bn1.beta.data[:] = src.scale_conv1.bias.b.data
_transfer_bottleneckA(src, dst.res2.a, ['2a', '2b', '2c'][0])
for (i, name) in enumerate(['2a', '2b', '2c'][1:]):
dst_bottleneckB = getattr(dst.res2, 'b{}'.format(i + 1))
_transfer_bottleneckB(src, dst_bottleneckB, name)
_transfer_bottleneckA(src, dst.res3.a, ['3a', '3b1', '3b2', '3b3'][0])
for (i, name) in enumerate(['3a', '3b1', '3b2', '3b3'][1:]):
dst_bottleneckB = getattr(dst.res3, 'b{}'.format(i + 1))
_transfer_bottleneckB(src, dst_bottleneckB, name)
_transfer_bottleneckA(src, dst.res4.a, ['4a'] + ['4b{}'.format(i) for i in range(1, 23)][0])
for (i, name) in enumerate(['4a'] + ['4b{}'.format(i) for i in range(1, 23)][1:]):
dst_bottleneckB = getattr(dst.res4, 'b{}'.format(i + 1))
_transfer_bottleneckB(src, dst_bottleneckB, name)
_transfer_bottleneckA(src, dst.res5.a, ['5a', '5b', '5c'][0])
for (i, name) in enumerate(['5a', '5b', '5c'][1:]):
dst_bottleneckB = getattr(dst.res5, 'b{}'.format(i + 1))
_transfer_bottleneckB(src, dst_bottleneckB, name)
dst.fc6.W.data[:] = src.fc1000.W.data
dst.fc6.b.data[:] = src.fc1000.b.data
|
caad_18
|
positive
|
def remove(self, key: int) -> None:
"""
Removes the mapping of the specified value key if this map contains a mapping for the key
"""
<DeepExtract>
idx = hash(str(key)) % self.MAXSIZE
</DeepExtract>
leng = len(self.map[idx])
if leng <= 0:
return None
else:
for i in range(leng):
if self.map[idx][i][0] == key:
self.map[idx].remove(self.map[idx][i])
return None
|
def remove(self, key: int) -> None:
"""
Removes the mapping of the specified value key if this map contains a mapping for the key
"""
idx = hash(str(key)) % self.MAXSIZE
leng = len(self.map[idx])
if leng <= 0:
return None
else:
for i in range(leng):
if self.map[idx][i][0] == key:
self.map[idx].remove(self.map[idx][i])
return None
|
Competitive_Programming
|
positive
|
def __init__(self):
self.lig2code = {}
self.code2lig = {}
<DeepExtract>
assert type('') == unicode or not re.search('[\\x80-\\xff]', '')
if not override and self.lig2code.get('') is not None:
raise Exception("character '%s' (%d) already in ligature table" % ('', self.ord('')))
self.lig2code[''] = 0
self.code2lig[0] = unicode('')
</DeepExtract>
<DeepExtract>
assert type('<RHO>') == unicode or not re.search('[\\x80-\\xff]', '<RHO>')
if not override and self.lig2code.get('<RHO>') is not None:
raise Exception("character '%s' (%d) already in ligature table" % ('<RHO>', self.ord('<RHO>')))
self.lig2code['<RHO>'] = 2
self.code2lig[2] = unicode('<RHO>')
</DeepExtract>
<DeepExtract>
assert type('<SIG>') == unicode or not re.search('[\\x80-\\xff]', '<SIG>')
if not override and self.lig2code.get('<SIG>') is not None:
raise Exception("character '%s' (%d) already in ligature table" % ('<SIG>', self.ord('<SIG>')))
self.lig2code['<SIG>'] = 3
self.code2lig[3] = unicode('<SIG>')
</DeepExtract>
<DeepExtract>
assert type('<PHI>') == unicode or not re.search('[\\x80-\\xff]', '<PHI>')
if not override and self.lig2code.get('<PHI>') is not None:
raise Exception("character '%s' (%d) already in ligature table" % ('<PHI>', self.ord('<PHI>')))
self.lig2code['<PHI>'] = 4
self.code2lig[4] = unicode('<PHI>')
</DeepExtract>
for i in range(32, 1024):
<DeepExtract>
assert type(unichr(i)) == unicode or not re.search('[\\x80-\\xff]', unichr(i))
if not override and self.lig2code.get(unichr(i)) is not None:
raise Exception("character '%s' (%d) already in ligature table" % (unichr(i), self.ord(unichr(i))))
self.lig2code[unichr(i)] = i
self.code2lig[i] = unicode(unichr(i))
</DeepExtract>
for c in common_chars:
<DeepExtract>
assert type(c) == unicode or not re.search('[\\x80-\\xff]', c)
if not override and self.lig2code.get(c) is not None:
raise Exception("character '%s' (%d) already in ligature table" % (c, self.ord(c)))
self.lig2code[c] = ord(c)
self.code2lig[ord(c)] = unicode(c)
</DeepExtract>
|
def __init__(self):
self.lig2code = {}
self.code2lig = {}
assert type('') == unicode or not re.search('[\\x80-\\xff]', '')
if not override and self.lig2code.get('') is not None:
raise Exception("character '%s' (%d) already in ligature table" % ('', self.ord('')))
self.lig2code[''] = 0
self.code2lig[0] = unicode('')
assert type('<RHO>') == unicode or not re.search('[\\x80-\\xff]', '<RHO>')
if not override and self.lig2code.get('<RHO>') is not None:
raise Exception("character '%s' (%d) already in ligature table" % ('<RHO>', self.ord('<RHO>')))
self.lig2code['<RHO>'] = 2
self.code2lig[2] = unicode('<RHO>')
assert type('<SIG>') == unicode or not re.search('[\\x80-\\xff]', '<SIG>')
if not override and self.lig2code.get('<SIG>') is not None:
raise Exception("character '%s' (%d) already in ligature table" % ('<SIG>', self.ord('<SIG>')))
self.lig2code['<SIG>'] = 3
self.code2lig[3] = unicode('<SIG>')
assert type('<PHI>') == unicode or not re.search('[\\x80-\\xff]', '<PHI>')
if not override and self.lig2code.get('<PHI>') is not None:
raise Exception("character '%s' (%d) already in ligature table" % ('<PHI>', self.ord('<PHI>')))
self.lig2code['<PHI>'] = 4
self.code2lig[4] = unicode('<PHI>')
for i in range(32, 1024):
assert type(unichr(i)) == unicode or not re.search('[\\x80-\\xff]', unichr(i))
if not override and self.lig2code.get(unichr(i)) is not None:
raise Exception("character '%s' (%d) already in ligature table" % (unichr(i), self.ord(unichr(i))))
self.lig2code[unichr(i)] = i
self.code2lig[i] = unicode(unichr(i))
for c in common_chars:
assert type(c) == unicode or not re.search('[\\x80-\\xff]', c)
if not override and self.lig2code.get(c) is not None:
raise Exception("character '%s' (%d) already in ligature table" % (c, self.ord(c)))
self.lig2code[c] = ord(c)
self.code2lig[ord(c)] = unicode(c)
</DeepExtract>
|
deep_ocr
|
positive
|
def add_hunk(self, hunk_slice):
<DeepExtract>
hunk = LinearHunk(self.diff_buffer, hunk_slice)
</DeepExtract>
self.append(hunk)
return hunk
|
def add_hunk(self, hunk_slice):
hunk = LinearHunk(self.diff_buffer, hunk_slice)
self.append(hunk)
return hunk
|
CodeReview
|
positive
|
def main():
ap = argparse.ArgumentParser()
ap.add_argument('-all', required=False, action='store_true', help='Build all')
ap.add_argument('-usd', required=False, action='store_true', help='Build USD')
ap.add_argument('-hdrpr', required=False, action='store_true', help='Build HdRPR')
ap.add_argument('-bin-dir', required=False, type=str, default='', help='Path to binary directory')
ap.add_argument('-libs', required=False, action='store_true', help='Create libs dir')
ap.add_argument('-mx-classes', required=False, action='store_true', help='Generate MaterialX classes')
ap.add_argument('-addon', required=False, action='store_true', help='Create zip addon')
ap.add_argument('-G', required=False, type=str, help='Compiler for HdRPR and MaterialX in cmake. For example: -G "Visual Studio 16 2019"', default='Visual Studio 16 2019' if OS == 'Windows' else '')
ap.add_argument('-j', required=False, type=int, default=0, help='Number of jobs run in parallel')
ap.add_argument('-build-var', required=False, type=str, default='release', choices=('release', 'relwithdebuginfo'), help='Build variant for USD, HdRPR and dependencies. (default: release)')
ap.add_argument('-clean', required=False, action='store_true', help='Clean build dirs before start USD or HdRPR build')
ap.add_argument('--prman', required=False, action='store_true', help='Build with RenderMan render delegate')
ap.add_argument('--prman-location', required=False, type=str, default='', help='Path to RenderMan directory')
args = ap.parse_args()
bin_dir = Path(args.bin_dir).resolve() if args.bin_dir else repo_dir / 'bin'
bin_dir = bin_dir.absolute()
bin_dir.mkdir(parents=True, exist_ok=True)
if args.all or args.usd:
<DeepExtract>
print_start('Building USD')
import build_usd
args = []
if args.j > 0:
args += ['-j', str(args.j)]
if args.prman:
args += ['--prman']
args += ['--prman-location', args.prman_location]
build_usd.main(bin_dir, args.clean, args.build_var, *args)
</DeepExtract>
if args.all or args.hdrpr:
<DeepExtract>
print_start('Building HdRPR')
hdrpr_dir = repo_dir / 'deps/HdRPR'
usd_dir = bin_dir / 'USD/install'
if args.clean:
rm_dir(hdrpr_dir / 'build')
os.environ['PXR_PLUGINPATH_NAME'] = str(usd_dir / 'lib/usd')
_cmake(hdrpr_dir, args.G, args.j, args.build_var, [f'-Dpxr_DIR={usd_dir}', f"-DCMAKE_INSTALL_PREFIX={bin_dir / 'USD/install'}", '-DRPR_BUILD_AS_HOUDINI_PLUGIN=FALSE', f'-DPYTHON_EXECUTABLE={sys.executable}', f"-DOPENEXR_LOCATION={bin_dir / 'USD/install'}"])
</DeepExtract>
if args.all or args.libs:
<DeepExtract>
print_start('Copying binaries to libs')
import create_libs
create_libs.main(bin_dir, args.build_var)
</DeepExtract>
if args.all or args.mx_classes:
<DeepExtract>
print_start('Generating code for MaterialX classes')
import generate_mx_classes
generate_mx_classes.main()
</DeepExtract>
if args.all or args.addon:
<DeepExtract>
print_start('Creating zip Addon')
import create_zip_addon
create_zip_addon.main()
</DeepExtract>
<DeepExtract>
print(f"\n-------------------------------------------------------------\n{'Finished'}\n-------------------------------------------------------------")
</DeepExtract>
|
def main():
ap = argparse.ArgumentParser()
ap.add_argument('-all', required=False, action='store_true', help='Build all')
ap.add_argument('-usd', required=False, action='store_true', help='Build USD')
ap.add_argument('-hdrpr', required=False, action='store_true', help='Build HdRPR')
ap.add_argument('-bin-dir', required=False, type=str, default='', help='Path to binary directory')
ap.add_argument('-libs', required=False, action='store_true', help='Create libs dir')
ap.add_argument('-mx-classes', required=False, action='store_true', help='Generate MaterialX classes')
ap.add_argument('-addon', required=False, action='store_true', help='Create zip addon')
ap.add_argument('-G', required=False, type=str, help='Compiler for HdRPR and MaterialX in cmake. For example: -G "Visual Studio 16 2019"', default='Visual Studio 16 2019' if OS == 'Windows' else '')
ap.add_argument('-j', required=False, type=int, default=0, help='Number of jobs run in parallel')
ap.add_argument('-build-var', required=False, type=str, default='release', choices=('release', 'relwithdebuginfo'), help='Build variant for USD, HdRPR and dependencies. (default: release)')
ap.add_argument('-clean', required=False, action='store_true', help='Clean build dirs before start USD or HdRPR build')
ap.add_argument('--prman', required=False, action='store_true', help='Build with RenderMan render delegate')
ap.add_argument('--prman-location', required=False, type=str, default='', help='Path to RenderMan directory')
args = ap.parse_args()
bin_dir = Path(args.bin_dir).resolve() if args.bin_dir else repo_dir / 'bin'
bin_dir = bin_dir.absolute()
bin_dir.mkdir(parents=True, exist_ok=True)
if args.all or args.usd:
print_start('Building USD')
import build_usd
args = []
if args.j > 0:
args += ['-j', str(args.j)]
if args.prman:
args += ['--prman']
args += ['--prman-location', args.prman_location]
build_usd.main(bin_dir, args.clean, args.build_var, *args)
if args.all or args.hdrpr:
print_start('Building HdRPR')
hdrpr_dir = repo_dir / 'deps/HdRPR'
usd_dir = bin_dir / 'USD/install'
if args.clean:
rm_dir(hdrpr_dir / 'build')
os.environ['PXR_PLUGINPATH_NAME'] = str(usd_dir / 'lib/usd')
_cmake(hdrpr_dir, args.G, args.j, args.build_var, [f'-Dpxr_DIR={usd_dir}', f"-DCMAKE_INSTALL_PREFIX={bin_dir / 'USD/install'}", '-DRPR_BUILD_AS_HOUDINI_PLUGIN=FALSE', f'-DPYTHON_EXECUTABLE={sys.executable}', f"-DOPENEXR_LOCATION={bin_dir / 'USD/install'}"])
if args.all or args.libs:
print_start('Copying binaries to libs')
import create_libs
create_libs.main(bin_dir, args.build_var)
if args.all or args.mx_classes:
print_start('Generating code for MaterialX classes')
import generate_mx_classes
generate_mx_classes.main()
if args.all or args.addon:
print_start('Creating zip Addon')
import create_zip_addon
create_zip_addon.main()
print(f"\n-------------------------------------------------------------\n{'Finished'}\n-------------------------------------------------------------")
</DeepExtract>
|
BlenderUSDHydraAddon
|
positive
|
def pressure_ashpa(self):
"""
Reads the atmospheric pressure
"""
hpa = None
raw_comp1 = None
raw_comp2 = None
raw_comp3 = None
raw_pressure = None
raw_temperature = None
value_d_p1 = None
value_d_p2 = None
value_d_p3 = None
value_d_p4 = None
value_d_p5 = None
value_d_p6 = None
value_d_p7 = None
value_d_p8 = None
value_d_p9 = None
value_lsb = None
value_msb = None
value_xlsb = None
<DeepExtract>
byte_list = self.i2c.readfrom_mem(self.device_address, self.REGISTER_PRESSUREMSB, 1, addrsize=8)
val = 0
val = val << 8 | byte_list[0]
value_msb = val
</DeepExtract>
<DeepExtract>
byte_list = self.i2c.readfrom_mem(self.device_address, self.REGISTER_PRESSURELSB, 1, addrsize=8)
val = 0
val = val << 8 | byte_list[0]
value_lsb = val
</DeepExtract>
<DeepExtract>
byte_list = self.i2c.readfrom_mem(self.device_address, self.REGISTER_PRESSUREXLSB, 1, addrsize=8)
val = 0
val = val << 8 | byte_list[0]
value_xlsb = val
</DeepExtract>
<DeepExtract>
byte_list = self.i2c.readfrom_mem(self.device_address, self.REGISTER_DIGP1, 2, addrsize=16)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
value_d_p1 = val
</DeepExtract>
<DeepExtract>
byte_list = self.i2c.readfrom_mem(self.device_address, self.REGISTER_DIGP2, 2, addrsize=16)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
val = _sign(val, 16)
value_d_p2 = val
</DeepExtract>
<DeepExtract>
byte_list = self.i2c.readfrom_mem(self.device_address, self.REGISTER_DIGP3, 2, addrsize=16)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
val = _sign(val, 16)
value_d_p3 = val
</DeepExtract>
<DeepExtract>
byte_list = self.i2c.readfrom_mem(self.device_address, self.REGISTER_DIGP4, 2, addrsize=16)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
val = _sign(val, 16)
value_d_p4 = val
</DeepExtract>
<DeepExtract>
byte_list = self.i2c.readfrom_mem(self.device_address, self.REGISTER_DIGP5, 2, addrsize=16)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
val = _sign(val, 16)
value_d_p5 = val
</DeepExtract>
<DeepExtract>
byte_list = self.i2c.readfrom_mem(self.device_address, self.REGISTER_DIGP6, 2, addrsize=16)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
val = _sign(val, 16)
value_d_p6 = val
</DeepExtract>
<DeepExtract>
byte_list = self.i2c.readfrom_mem(self.device_address, self.REGISTER_DIGP7, 2, addrsize=16)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
val = _sign(val, 16)
value_d_p7 = val
</DeepExtract>
<DeepExtract>
byte_list = self.i2c.readfrom_mem(self.device_address, self.REGISTER_DIGP8, 2, addrsize=16)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
val = _sign(val, 16)
value_d_p8 = val
</DeepExtract>
<DeepExtract>
byte_list = self.i2c.readfrom_mem(self.device_address, self.REGISTER_DIGP9, 2, addrsize=16)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
val = _sign(val, 16)
value_d_p9 = val
</DeepExtract>
<DeepExtract>
celsius = None
raw_comp1 = None
raw_comp2 = None
raw_comp3 = None
raw_temp = None
value_d_t1 = None
value_d_t2 = None
value_d_t3 = None
value_lsb = None
value_msb = None
value_xlsb = None
value_msb = self.get_tempmsb()
value_lsb = self.get_templsb()
value_xlsb = self.get_tempxlsb()
value_d_t1 = self.get_digt1()
value_d_t2 = self.get_digt2()
value_d_t3 = self.get_digt3()
raw_temp = (value_msb << 12) + (value_lsb << 4) + (value_xlsb >> 4)
raw_comp1 = (raw_temp / 16384.0 - value_d_t1 / 1024.0) * value_d_t2
raw_comp3 = raw_temp / 131072.0 - value_d_t1 / 8192.0
raw_comp2 = raw_comp3 * raw_comp3 * value_d_t3
celsius = (raw_comp1 + raw_comp2) / 5120.0
raw_temperature = celsius
</DeepExtract>
raw_temperature = raw_temperature * 5120.0
raw_pressure = (value_msb << 12) + (value_lsb << 4) + (value_xlsb >> 4)
raw_comp1 = raw_temperature / 2 - 64000.0
raw_comp2 = raw_comp1 * raw_comp1 * value_d_p6 / 32768.0
raw_comp2 = raw_comp2 + raw_comp1 * value_d_p5 * 2.0
raw_comp2 = raw_comp2 / 4.0 + value_d_p4 * 65536.0
raw_comp3 = value_d_p3 * raw_comp1 * raw_comp1
raw_comp1 = (raw_comp3 / 524288.0 + value_d_p2 * raw_comp1) / 524288.0
raw_comp1 = (1.0 + raw_comp1 / 32768.0) * value_d_p1
hpa = 1048576.0 - raw_pressure
hpa = (hpa - raw_comp2 / 4096.0) * (6250.0 / raw_comp1)
raw_comp1 = value_d_p9 * hpa * hpa / 2147483648.0
raw_comp2 = hpa * value_d_p8 / 32768.0
hpa = hpa + (raw_comp1 + raw_comp2 + value_d_p7) / 16.0
hpa = hpa / 100.0
return hpa
|
def pressure_ashpa(self):
"""
Reads the atmospheric pressure
"""
hpa = None
raw_comp1 = None
raw_comp2 = None
raw_comp3 = None
raw_pressure = None
raw_temperature = None
value_d_p1 = None
value_d_p2 = None
value_d_p3 = None
value_d_p4 = None
value_d_p5 = None
value_d_p6 = None
value_d_p7 = None
value_d_p8 = None
value_d_p9 = None
value_lsb = None
value_msb = None
value_xlsb = None
byte_list = self.i2c.readfrom_mem(self.device_address, self.REGISTER_PRESSUREMSB, 1, addrsize=8)
val = 0
val = val << 8 | byte_list[0]
value_msb = val
byte_list = self.i2c.readfrom_mem(self.device_address, self.REGISTER_PRESSURELSB, 1, addrsize=8)
val = 0
val = val << 8 | byte_list[0]
value_lsb = val
byte_list = self.i2c.readfrom_mem(self.device_address, self.REGISTER_PRESSUREXLSB, 1, addrsize=8)
val = 0
val = val << 8 | byte_list[0]
value_xlsb = val
byte_list = self.i2c.readfrom_mem(self.device_address, self.REGISTER_DIGP1, 2, addrsize=16)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
value_d_p1 = val
byte_list = self.i2c.readfrom_mem(self.device_address, self.REGISTER_DIGP2, 2, addrsize=16)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
val = _sign(val, 16)
value_d_p2 = val
byte_list = self.i2c.readfrom_mem(self.device_address, self.REGISTER_DIGP3, 2, addrsize=16)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
val = _sign(val, 16)
value_d_p3 = val
byte_list = self.i2c.readfrom_mem(self.device_address, self.REGISTER_DIGP4, 2, addrsize=16)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
val = _sign(val, 16)
value_d_p4 = val
byte_list = self.i2c.readfrom_mem(self.device_address, self.REGISTER_DIGP5, 2, addrsize=16)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
val = _sign(val, 16)
value_d_p5 = val
byte_list = self.i2c.readfrom_mem(self.device_address, self.REGISTER_DIGP6, 2, addrsize=16)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
val = _sign(val, 16)
value_d_p6 = val
byte_list = self.i2c.readfrom_mem(self.device_address, self.REGISTER_DIGP7, 2, addrsize=16)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
val = _sign(val, 16)
value_d_p7 = val
byte_list = self.i2c.readfrom_mem(self.device_address, self.REGISTER_DIGP8, 2, addrsize=16)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
val = _sign(val, 16)
value_d_p8 = val
byte_list = self.i2c.readfrom_mem(self.device_address, self.REGISTER_DIGP9, 2, addrsize=16)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
val = _sign(val, 16)
value_d_p9 = val
celsius = None
raw_comp1 = None
raw_comp2 = None
raw_comp3 = None
raw_temp = None
value_d_t1 = None
value_d_t2 = None
value_d_t3 = None
value_lsb = None
value_msb = None
value_xlsb = None
value_msb = self.get_tempmsb()
value_lsb = self.get_templsb()
value_xlsb = self.get_tempxlsb()
value_d_t1 = self.get_digt1()
value_d_t2 = self.get_digt2()
value_d_t3 = self.get_digt3()
raw_temp = (value_msb << 12) + (value_lsb << 4) + (value_xlsb >> 4)
raw_comp1 = (raw_temp / 16384.0 - value_d_t1 / 1024.0) * value_d_t2
raw_comp3 = raw_temp / 131072.0 - value_d_t1 / 8192.0
raw_comp2 = raw_comp3 * raw_comp3 * value_d_t3
celsius = (raw_comp1 + raw_comp2) / 5120.0
raw_temperature = celsius
raw_temperature = raw_temperature * 5120.0
raw_pressure = (value_msb << 12) + (value_lsb << 4) + (value_xlsb >> 4)
raw_comp1 = raw_temperature / 2 - 64000.0
raw_comp2 = raw_comp1 * raw_comp1 * value_d_p6 / 32768.0
raw_comp2 = raw_comp2 + raw_comp1 * value_d_p5 * 2.0
raw_comp2 = raw_comp2 / 4.0 + value_d_p4 * 65536.0
raw_comp3 = value_d_p3 * raw_comp1 * raw_comp1
raw_comp1 = (raw_comp3 / 524288.0 + value_d_p2 * raw_comp1) / 524288.0
raw_comp1 = (1.0 + raw_comp1 / 32768.0) * value_d_p1
hpa = 1048576.0 - raw_pressure
hpa = (hpa - raw_comp2 / 4096.0) * (6250.0 / raw_comp1)
raw_comp1 = value_d_p9 * hpa * hpa / 2147483648.0
raw_comp2 = hpa * value_d_p8 / 32768.0
hpa = hpa + (raw_comp1 + raw_comp2 + value_d_p7) / 16.0
hpa = hpa / 100.0
return hpa
|
cyanobyte
|
positive
|
def camPosToQuaternion(cx, cy, cz):
q1a = 0
q1b = 0
q1c = math.sqrt(2) / 2
q1d = math.sqrt(2) / 2
camDist = math.sqrt(cx * cx + cy * cy + cz * cz)
cx = cx / camDist
cy = cy / camDist
cz = cz / camDist
t = math.sqrt(cx * cx + cy * cy)
tx = cx / t
ty = cy / t
yaw = math.acos(ty)
if tx > 0:
yaw = 2 * math.pi - yaw
pitch = 0
tmp = min(max(tx * cx + ty * cy, -1), 1)
roll = math.acos(tmp)
if cz < 0:
roll = -roll
<DeepExtract>
c1 = math.cos(yaw / 2.0)
c2 = math.cos(pitch / 2.0)
c3 = math.cos(roll / 2.0)
s1 = math.sin(yaw / 2.0)
s2 = math.sin(pitch / 2.0)
s3 = math.sin(roll / 2.0)
q1 = c1 * c2 * c3 + s1 * s2 * s3
q2 = c1 * c2 * s3 - s1 * s2 * c3
q3 = c1 * s2 * c3 + s1 * c2 * s3
q4 = s1 * c2 * c3 - c1 * s2 * s3
(q2a, q2b, q2c, q2d) = (q1, q2, q3, q4)
</DeepExtract>
q1 = q1a * q2a - q1b * q2b - q1c * q2c - q1d * q2d
q2 = q1b * q2a + q1a * q2b + q1d * q2c - q1c * q2d
q3 = q1c * q2a - q1d * q2b + q1a * q2c + q1b * q2d
q4 = q1d * q2a + q1c * q2b - q1b * q2c + q1a * q2d
return (q1, q2, q3, q4)
|
def camPosToQuaternion(cx, cy, cz):
q1a = 0
q1b = 0
q1c = math.sqrt(2) / 2
q1d = math.sqrt(2) / 2
camDist = math.sqrt(cx * cx + cy * cy + cz * cz)
cx = cx / camDist
cy = cy / camDist
cz = cz / camDist
t = math.sqrt(cx * cx + cy * cy)
tx = cx / t
ty = cy / t
yaw = math.acos(ty)
if tx > 0:
yaw = 2 * math.pi - yaw
pitch = 0
tmp = min(max(tx * cx + ty * cy, -1), 1)
roll = math.acos(tmp)
if cz < 0:
roll = -roll
c1 = math.cos(yaw / 2.0)
c2 = math.cos(pitch / 2.0)
c3 = math.cos(roll / 2.0)
s1 = math.sin(yaw / 2.0)
s2 = math.sin(pitch / 2.0)
s3 = math.sin(roll / 2.0)
q1 = c1 * c2 * c3 + s1 * s2 * s3
q2 = c1 * c2 * s3 - s1 * s2 * c3
q3 = c1 * s2 * c3 + s1 * c2 * s3
q4 = s1 * c2 * c3 - c1 * s2 * s3
(q2a, q2b, q2c, q2d) = (q1, q2, q3, q4)
q1 = q1a * q2a - q1b * q2b - q1c * q2c - q1d * q2d
q2 = q1b * q2a + q1a * q2b + q1d * q2c - q1c * q2d
q3 = q1c * q2a - q1d * q2b + q1a * q2c + q1b * q2d
q4 = q1d * q2a + q1c * q2b - q1b * q2c + q1a * q2d
return (q1, q2, q3, q4)
|
acsm
|
positive
|
def __init__(self, input_depth, output_depth, expansion, stride, bn_type='bn', kernel=3, width_divisor=1, shuffle_type=None, pw_group=1, se=False, cdw=False, dw_skip_bn=False, dw_skip_relu=False):
super(IRFBlock, self).__init__()
assert kernel in [1, 3, 5, 7], kernel
self.use_res_connect = stride == 1 and input_depth == output_depth
self.output_depth = output_depth
mid_depth = int(input_depth * expansion)
<DeepExtract>
ret = int(mid_depth)
if width_divisor > 0 and mid_depth % width_divisor != 0:
ret = int((_py2_round(mid_depth / width_divisor) or width_divisor) * width_divisor)
mid_depth = ret
</DeepExtract>
self.pw = ConvBNRelu(input_depth, mid_depth, kernel=1, stride=1, pad=0, no_bias=1, use_relu='relu', bn_type=bn_type, group=pw_group)
<DeepExtract>
assert stride in [1, 2, 4] or stride in [-1, -2, -4] or (isinstance(stride, tuple) and all((x in [-1, -2, -4] for x in stride)))
scales = stride
ret = None
if isinstance(stride, tuple) or stride < 0:
scales = [-x for x in stride] if isinstance(stride, tuple) else -stride
stride = 1
ret = Upsample(scale_factor=scales, mode='nearest', align_corners=None)
(self.upscale, stride) = (ret, stride)
</DeepExtract>
if kernel == 1:
self.dw = nn.Sequential()
elif cdw:
dw1 = ConvBNRelu(mid_depth, mid_depth, kernel=kernel, stride=stride, pad=kernel // 2, group=mid_depth, no_bias=1, use_relu='relu', bn_type=bn_type)
dw2 = ConvBNRelu(mid_depth, mid_depth, kernel=kernel, stride=1, pad=kernel // 2, group=mid_depth, no_bias=1, use_relu='relu' if not dw_skip_relu else None, bn_type=bn_type if not dw_skip_bn else None)
self.dw = nn.Sequential(OrderedDict([('dw1', dw1), ('dw2', dw2)]))
else:
self.dw = ConvBNRelu(mid_depth, mid_depth, kernel=kernel, stride=stride, pad=kernel // 2, group=mid_depth, no_bias=1, use_relu='relu' if not dw_skip_relu else None, bn_type=bn_type if not dw_skip_bn else None)
self.pwl = ConvBNRelu(mid_depth, output_depth, kernel=1, stride=1, pad=0, no_bias=1, use_relu=None, bn_type=bn_type, group=pw_group)
self.shuffle_type = shuffle_type
if shuffle_type is not None:
self.shuffle = ChannelShuffle(pw_group)
self.se4 = SEModule(output_depth) if se else nn.Sequential()
self.output_depth = output_depth
|
def __init__(self, input_depth, output_depth, expansion, stride, bn_type='bn', kernel=3, width_divisor=1, shuffle_type=None, pw_group=1, se=False, cdw=False, dw_skip_bn=False, dw_skip_relu=False):
super(IRFBlock, self).__init__()
assert kernel in [1, 3, 5, 7], kernel
self.use_res_connect = stride == 1 and input_depth == output_depth
self.output_depth = output_depth
mid_depth = int(input_depth * expansion)
ret = int(mid_depth)
if width_divisor > 0 and mid_depth % width_divisor != 0:
ret = int((_py2_round(mid_depth / width_divisor) or width_divisor) * width_divisor)
mid_depth = ret
self.pw = ConvBNRelu(input_depth, mid_depth, kernel=1, stride=1, pad=0, no_bias=1, use_relu='relu', bn_type=bn_type, group=pw_group)
assert stride in [1, 2, 4] or stride in [-1, -2, -4] or (isinstance(stride, tuple) and all((x in [-1, -2, -4] for x in stride)))
scales = stride
ret = None
if isinstance(stride, tuple) or stride < 0:
scales = [-x for x in stride] if isinstance(stride, tuple) else -stride
stride = 1
ret = Upsample(scale_factor=scales, mode='nearest', align_corners=None)
(self.upscale, stride) = (ret, stride)
if kernel == 1:
self.dw = nn.Sequential()
elif cdw:
dw1 = ConvBNRelu(mid_depth, mid_depth, kernel=kernel, stride=stride, pad=kernel // 2, group=mid_depth, no_bias=1, use_relu='relu', bn_type=bn_type)
dw2 = ConvBNRelu(mid_depth, mid_depth, kernel=kernel, stride=1, pad=kernel // 2, group=mid_depth, no_bias=1, use_relu='relu' if not dw_skip_relu else None, bn_type=bn_type if not dw_skip_bn else None)
self.dw = nn.Sequential(OrderedDict([('dw1', dw1), ('dw2', dw2)]))
else:
self.dw = ConvBNRelu(mid_depth, mid_depth, kernel=kernel, stride=stride, pad=kernel // 2, group=mid_depth, no_bias=1, use_relu='relu' if not dw_skip_relu else None, bn_type=bn_type if not dw_skip_bn else None)
self.pwl = ConvBNRelu(mid_depth, output_depth, kernel=1, stride=1, pad=0, no_bias=1, use_relu=None, bn_type=bn_type, group=pw_group)
self.shuffle_type = shuffle_type
if shuffle_type is not None:
self.shuffle = ChannelShuffle(pw_group)
self.se4 = SEModule(output_depth) if se else nn.Sequential()
self.output_depth = output_depth
|
DetNAS
|
positive
|
def get_search_field_values(contentitem):
"""
Extract the search fields from the model.
"""
plugin = contentitem.plugin
values = []
for field_name in plugin.search_fields:
value = getattr(contentitem, field_name)
if value and isinstance(value, str):
<DeepExtract>
value = strip_tags(force_str(value))
</DeepExtract>
values.append(value)
return values
|
def get_search_field_values(contentitem):
"""
Extract the search fields from the model.
"""
plugin = contentitem.plugin
values = []
for field_name in plugin.search_fields:
value = getattr(contentitem, field_name)
if value and isinstance(value, str):
value = strip_tags(force_str(value))
values.append(value)
return values
|
django-fluent-contents
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.