before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def show_dconv_offset(im, all_offset, step=[2, 2], filter_size=3, dilation=2, pad=2, plot_area=2, plot_level=3):
vis_attr = {'filter_size': filter_size, 'dilation': dilation, 'pad': pad, 'plot_area': plot_area, 'plot_level': plot_level}
map_h = all_offset[0].shape[2]
map_w = all_offset[0].shape[3]
step_h = step[0]
step_w = step[1]
start_h = np.round(step_h / 2)
start_w = np.round(step_w / 2)
plt.figure()
for im_h in range(start_h, map_h, step_h):
for im_w in range(start_w, map_w, step_w):
target_point = np.array([im_h, im_w])
source_y = np.round(target_point[0] * im.shape[0] / map_h)
source_x = np.round(target_point[1] * im.shape[1] / map_w)
if source_y < plot_area or source_x < plot_area or source_y >= im.shape[0] - plot_area or (source_x >= im.shape[1] - plot_area):
continue
cur_im = np.copy(im)
<DeepExtract>
map_h = all_offset[0].shape[2]
map_w = all_offset[0].shape[3]
for level in range(vis_attr['plot_level']):
source_points = []
for (idx, cur_top_point) in enumerate([target_point]):
cur_top_point = np.round(cur_top_point)
if cur_top_point[0] < 0 or cur_top_point[1] < 0 or cur_top_point[0] > map_h - 1 or (cur_top_point[1] > map_w - 1):
continue
cur_source_point = kernel_inv_map(vis_attr, cur_top_point, map_h, map_w)
cur_offset = np.squeeze(all_offset[level][:, :, int(cur_top_point[0]), int(cur_top_point[1])])
cur_source_point = offset_inv_map(cur_source_point, cur_offset)
source_points = source_points + cur_source_point
[target_point] = source_points
source_points = source_points
</DeepExtract>
<DeepExtract>
plot_area = vis_attr['plot_area']
for (idx, cur_source_point) in enumerate(source_points):
y = np.round((cur_source_point[0] + 0.5) * cur_im.shape[0] / map_h).astype('i')
x = np.round((cur_source_point[1] + 0.5) * cur_im.shape[1] / map_w).astype('i')
if x < 0 or y < 0 or x > cur_im.shape[1] - 1 or (y > cur_im.shape[0] - 1):
continue
y = min(y, cur_im.shape[0] - vis_attr['plot_area'] - 1)
x = min(x, cur_im.shape[1] - vis_attr['plot_area'] - 1)
y = max(y, vis_attr['plot_area'])
x = max(x, vis_attr['plot_area'])
cur_im[y - plot_area:y + plot_area + 1, x - plot_area:x + plot_area + 1, :] = np.tile(np.reshape(color, (1, 1, 3)), (2 * plot_area + 1, 2 * plot_area + 1, 1))
cur_im = cur_im
</DeepExtract>
cur_im[source_y - plot_area:source_y + plot_area + 1, source_x - plot_area:source_x + plot_area + 1, :] = np.tile(np.reshape([0, 255, 0], (1, 1, 3)), (2 * plot_area + 1, 2 * plot_area + 1, 1))
plt.axis('off')
plt.imshow(cur_im)
plt.show(block=False)
plt.pause(0.01)
plt.clf()
|
def show_dconv_offset(im, all_offset, step=[2, 2], filter_size=3, dilation=2, pad=2, plot_area=2, plot_level=3):
vis_attr = {'filter_size': filter_size, 'dilation': dilation, 'pad': pad, 'plot_area': plot_area, 'plot_level': plot_level}
map_h = all_offset[0].shape[2]
map_w = all_offset[0].shape[3]
step_h = step[0]
step_w = step[1]
start_h = np.round(step_h / 2)
start_w = np.round(step_w / 2)
plt.figure()
for im_h in range(start_h, map_h, step_h):
for im_w in range(start_w, map_w, step_w):
target_point = np.array([im_h, im_w])
source_y = np.round(target_point[0] * im.shape[0] / map_h)
source_x = np.round(target_point[1] * im.shape[1] / map_w)
if source_y < plot_area or source_x < plot_area or source_y >= im.shape[0] - plot_area or (source_x >= im.shape[1] - plot_area):
continue
cur_im = np.copy(im)
map_h = all_offset[0].shape[2]
map_w = all_offset[0].shape[3]
for level in range(vis_attr['plot_level']):
source_points = []
for (idx, cur_top_point) in enumerate([target_point]):
cur_top_point = np.round(cur_top_point)
if cur_top_point[0] < 0 or cur_top_point[1] < 0 or cur_top_point[0] > map_h - 1 or (cur_top_point[1] > map_w - 1):
continue
cur_source_point = kernel_inv_map(vis_attr, cur_top_point, map_h, map_w)
cur_offset = np.squeeze(all_offset[level][:, :, int(cur_top_point[0]), int(cur_top_point[1])])
cur_source_point = offset_inv_map(cur_source_point, cur_offset)
source_points = source_points + cur_source_point
[target_point] = source_points
source_points = source_points
plot_area = vis_attr['plot_area']
for (idx, cur_source_point) in enumerate(source_points):
y = np.round((cur_source_point[0] + 0.5) * cur_im.shape[0] / map_h).astype('i')
x = np.round((cur_source_point[1] + 0.5) * cur_im.shape[1] / map_w).astype('i')
if x < 0 or y < 0 or x > cur_im.shape[1] - 1 or (y > cur_im.shape[0] - 1):
continue
y = min(y, cur_im.shape[0] - vis_attr['plot_area'] - 1)
x = min(x, cur_im.shape[1] - vis_attr['plot_area'] - 1)
y = max(y, vis_attr['plot_area'])
x = max(x, vis_attr['plot_area'])
cur_im[y - plot_area:y + plot_area + 1, x - plot_area:x + plot_area + 1, :] = np.tile(np.reshape(color, (1, 1, 3)), (2 * plot_area + 1, 2 * plot_area + 1, 1))
cur_im = cur_im
cur_im[source_y - plot_area:source_y + plot_area + 1, source_x - plot_area:source_x + plot_area + 1, :] = np.tile(np.reshape([0, 255, 0], (1, 1, 3)), (2 * plot_area + 1, 2 * plot_area + 1, 1))
plt.axis('off')
plt.imshow(cur_im)
plt.show(block=False)
plt.pause(0.01)
plt.clf()
|
Deformable-ConvNets
|
positive
|
def test_narma10_prediction_esn_ridge10(self):
"""
Test NARMA-10 prediction with default hyper-parameters (Nx=100, SP=0.99)
"""
<DeepExtract>
use_cuda = torch.cuda.is_available() if use_cuda else False
echotorch.utils.manual_seed(1)
narma10_train_dataset = NARMADataset(train_sample_length, n_train_samples, system_order=10)
narma10_test_dataset = NARMADataset(test_sample_length, n_test_samples, system_order=10)
trainloader = DataLoader(narma10_train_dataset, batch_size=batch_size, shuffle=False, num_workers=1)
testloader = DataLoader(narma10_test_dataset, batch_size=batch_size, shuffle=False, num_workers=1)
w_matrix_generator = mg.matrix_factory.get_generator(name='normal', connectivity=connectivity, spectral_radius=spectral_radius, dtype=dtype)
win_matrix_generator = mg.matrix_factory.get_generator(name='normal', connectivity=connectivity, scale=input_scaling, apply_spectral_radius=False, dtype=dtype)
wbias_matrix_generator = mg.matrix_factory.get_generator(name='normal', connectivity=connectivity, scale=bias_scaling, apply_spectral_radius=False, dtype=dtype)
esn = etrs.LiESN(input_dim=1, hidden_dim=reservoir_size, output_dim=1, spectral_radius=spectral_radius, leaky_rate=leaky_rate, learning_algo='inv', w_generator=w_matrix_generator, win_generator=win_matrix_generator, wbias_generator=wbias_matrix_generator, input_scaling=input_scaling, bias_scaling=bias_scaling, ridge_param=10, dtype=dtype)
if use_cuda:
esn.cuda()
for data in trainloader:
(inputs, targets) = data
if dtype == torch.float64:
(inputs, targets) = (inputs.double(), targets.double())
(inputs, targets) = (Variable(inputs), Variable(targets))
if use_cuda:
(inputs, targets) = (inputs.cuda(), targets.cuda())
esn(inputs, targets)
esn.finalize()
dataiter = iter(trainloader)
(train_u, train_y) = dataiter.next()
if dtype == torch.float64:
(train_u, train_y) = (train_u.double(), train_y.double())
(train_u, train_y) = (Variable(train_u), Variable(train_y))
if use_cuda:
(train_u, train_y) = (train_u.cuda(), train_y.cuda())
y_train_predicted = esn(train_u)
dataiter = iter(testloader)
(test_u, test_y) = dataiter.next()
if dtype == torch.float64:
(test_u, test_y) = (test_u.double(), test_y.double())
(test_u, test_y) = (Variable(test_u), Variable(test_y))
if use_cuda:
(test_u, test_y) = (test_u.cuda(), test_y.cuda())
y_test_predicted = esn(test_u)
(train_mse, train_nrmse, test_mse, test_nrmse) = (echotorch.utils.mse(y_train_predicted.data, train_y.data), echotorch.utils.nrmse(y_train_predicted.data, train_y.data), echotorch.utils.mse(y_test_predicted.data, test_y.data), echotorch.utils.nrmse(y_test_predicted.data, test_y.data))
</DeepExtract>
<DeepExtract>
use_cuda = torch.cuda.is_available() if use_cuda else False
echotorch.utils.manual_seed(1)
narma10_train_dataset = NARMADataset(train_sample_length, n_train_samples, system_order=10)
narma10_test_dataset = NARMADataset(test_sample_length, n_test_samples, system_order=10)
trainloader = DataLoader(narma10_train_dataset, batch_size=batch_size, shuffle=False, num_workers=1)
testloader = DataLoader(narma10_test_dataset, batch_size=batch_size, shuffle=False, num_workers=1)
w_matrix_generator = mg.matrix_factory.get_generator(name='normal', connectivity=connectivity, spectral_radius=spectral_radius, dtype=torch.float32)
win_matrix_generator = mg.matrix_factory.get_generator(name='normal', connectivity=connectivity, scale=input_scaling, apply_spectral_radius=False, dtype=torch.float32)
wbias_matrix_generator = mg.matrix_factory.get_generator(name='normal', connectivity=connectivity, scale=bias_scaling, apply_spectral_radius=False, dtype=torch.float32)
esn = etrs.LiESN(input_dim=1, hidden_dim=reservoir_size, output_dim=1, spectral_radius=spectral_radius, leaky_rate=leaky_rate, learning_algo='inv', w_generator=w_matrix_generator, win_generator=win_matrix_generator, wbias_generator=wbias_matrix_generator, input_scaling=input_scaling, bias_scaling=bias_scaling, ridge_param=10.0, dtype=torch.float32)
if use_cuda:
esn.cuda()
for data in trainloader:
(inputs, targets) = data
if torch.float32 == torch.float64:
(inputs, targets) = (inputs.double(), targets.double())
(inputs, targets) = (Variable(inputs), Variable(targets))
if use_cuda:
(inputs, targets) = (inputs.cuda(), targets.cuda())
esn(inputs, targets)
esn.finalize()
dataiter = iter(trainloader)
(train_u, train_y) = dataiter.next()
if torch.float32 == torch.float64:
(train_u, train_y) = (train_u.double(), train_y.double())
(train_u, train_y) = (Variable(train_u), Variable(train_y))
if use_cuda:
(train_u, train_y) = (train_u.cuda(), train_y.cuda())
y_train_predicted = esn(train_u)
dataiter = iter(testloader)
(test_u, test_y) = dataiter.next()
if torch.float32 == torch.float64:
(test_u, test_y) = (test_u.double(), test_y.double())
(test_u, test_y) = (Variable(test_u), Variable(test_y))
if use_cuda:
(test_u, test_y) = (test_u.cuda(), test_y.cuda())
y_test_predicted = esn(test_u)
(train_mse32, train_nrmse32, test_mse32, test_nrmse32) = (echotorch.utils.mse(y_train_predicted.data, train_y.data), echotorch.utils.nrmse(y_train_predicted.data, train_y.data), echotorch.utils.mse(y_test_predicted.data, test_y.data), echotorch.utils.nrmse(y_test_predicted.data, test_y.data))
</DeepExtract>
self.assertLessEqual(train_mse, 0.1)
self.assertLessEqual(train_nrmse, 3.0)
self.assertLessEqual(test_mse, 0.1)
self.assertLessEqual(test_nrmse, 3.0)
self.assertLessEqual(train_mse32, 0.1)
self.assertLessEqual(train_nrmse32, 3.0)
self.assertLessEqual(test_mse32, 0.2)
self.assertLessEqual(test_nrmse32, 3.0)
|
def test_narma10_prediction_esn_ridge10(self):
"""
Test NARMA-10 prediction with default hyper-parameters (Nx=100, SP=0.99)
"""
use_cuda = torch.cuda.is_available() if use_cuda else False
echotorch.utils.manual_seed(1)
narma10_train_dataset = NARMADataset(train_sample_length, n_train_samples, system_order=10)
narma10_test_dataset = NARMADataset(test_sample_length, n_test_samples, system_order=10)
trainloader = DataLoader(narma10_train_dataset, batch_size=batch_size, shuffle=False, num_workers=1)
testloader = DataLoader(narma10_test_dataset, batch_size=batch_size, shuffle=False, num_workers=1)
w_matrix_generator = mg.matrix_factory.get_generator(name='normal', connectivity=connectivity, spectral_radius=spectral_radius, dtype=dtype)
win_matrix_generator = mg.matrix_factory.get_generator(name='normal', connectivity=connectivity, scale=input_scaling, apply_spectral_radius=False, dtype=dtype)
wbias_matrix_generator = mg.matrix_factory.get_generator(name='normal', connectivity=connectivity, scale=bias_scaling, apply_spectral_radius=False, dtype=dtype)
esn = etrs.LiESN(input_dim=1, hidden_dim=reservoir_size, output_dim=1, spectral_radius=spectral_radius, leaky_rate=leaky_rate, learning_algo='inv', w_generator=w_matrix_generator, win_generator=win_matrix_generator, wbias_generator=wbias_matrix_generator, input_scaling=input_scaling, bias_scaling=bias_scaling, ridge_param=10, dtype=dtype)
if use_cuda:
esn.cuda()
for data in trainloader:
(inputs, targets) = data
if dtype == torch.float64:
(inputs, targets) = (inputs.double(), targets.double())
(inputs, targets) = (Variable(inputs), Variable(targets))
if use_cuda:
(inputs, targets) = (inputs.cuda(), targets.cuda())
esn(inputs, targets)
esn.finalize()
dataiter = iter(trainloader)
(train_u, train_y) = dataiter.next()
if dtype == torch.float64:
(train_u, train_y) = (train_u.double(), train_y.double())
(train_u, train_y) = (Variable(train_u), Variable(train_y))
if use_cuda:
(train_u, train_y) = (train_u.cuda(), train_y.cuda())
y_train_predicted = esn(train_u)
dataiter = iter(testloader)
(test_u, test_y) = dataiter.next()
if dtype == torch.float64:
(test_u, test_y) = (test_u.double(), test_y.double())
(test_u, test_y) = (Variable(test_u), Variable(test_y))
if use_cuda:
(test_u, test_y) = (test_u.cuda(), test_y.cuda())
y_test_predicted = esn(test_u)
(train_mse, train_nrmse, test_mse, test_nrmse) = (echotorch.utils.mse(y_train_predicted.data, train_y.data), echotorch.utils.nrmse(y_train_predicted.data, train_y.data), echotorch.utils.mse(y_test_predicted.data, test_y.data), echotorch.utils.nrmse(y_test_predicted.data, test_y.data))
use_cuda = torch.cuda.is_available() if use_cuda else False
echotorch.utils.manual_seed(1)
narma10_train_dataset = NARMADataset(train_sample_length, n_train_samples, system_order=10)
narma10_test_dataset = NARMADataset(test_sample_length, n_test_samples, system_order=10)
trainloader = DataLoader(narma10_train_dataset, batch_size=batch_size, shuffle=False, num_workers=1)
testloader = DataLoader(narma10_test_dataset, batch_size=batch_size, shuffle=False, num_workers=1)
w_matrix_generator = mg.matrix_factory.get_generator(name='normal', connectivity=connectivity, spectral_radius=spectral_radius, dtype=torch.float32)
win_matrix_generator = mg.matrix_factory.get_generator(name='normal', connectivity=connectivity, scale=input_scaling, apply_spectral_radius=False, dtype=torch.float32)
wbias_matrix_generator = mg.matrix_factory.get_generator(name='normal', connectivity=connectivity, scale=bias_scaling, apply_spectral_radius=False, dtype=torch.float32)
esn = etrs.LiESN(input_dim=1, hidden_dim=reservoir_size, output_dim=1, spectral_radius=spectral_radius, leaky_rate=leaky_rate, learning_algo='inv', w_generator=w_matrix_generator, win_generator=win_matrix_generator, wbias_generator=wbias_matrix_generator, input_scaling=input_scaling, bias_scaling=bias_scaling, ridge_param=10.0, dtype=torch.float32)
if use_cuda:
esn.cuda()
for data in trainloader:
(inputs, targets) = data
if torch.float32 == torch.float64:
(inputs, targets) = (inputs.double(), targets.double())
(inputs, targets) = (Variable(inputs), Variable(targets))
if use_cuda:
(inputs, targets) = (inputs.cuda(), targets.cuda())
esn(inputs, targets)
esn.finalize()
dataiter = iter(trainloader)
(train_u, train_y) = dataiter.next()
if torch.float32 == torch.float64:
(train_u, train_y) = (train_u.double(), train_y.double())
(train_u, train_y) = (Variable(train_u), Variable(train_y))
if use_cuda:
(train_u, train_y) = (train_u.cuda(), train_y.cuda())
y_train_predicted = esn(train_u)
dataiter = iter(testloader)
(test_u, test_y) = dataiter.next()
if torch.float32 == torch.float64:
(test_u, test_y) = (test_u.double(), test_y.double())
(test_u, test_y) = (Variable(test_u), Variable(test_y))
if use_cuda:
(test_u, test_y) = (test_u.cuda(), test_y.cuda())
y_test_predicted = esn(test_u)
(train_mse32, train_nrmse32, test_mse32, test_nrmse32) = (echotorch.utils.mse(y_train_predicted.data, train_y.data), echotorch.utils.nrmse(y_train_predicted.data, train_y.data), echotorch.utils.mse(y_test_predicted.data, test_y.data), echotorch.utils.nrmse(y_test_predicted.data, test_y.data))
self.assertLessEqual(train_mse, 0.1)
self.assertLessEqual(train_nrmse, 3.0)
self.assertLessEqual(test_mse, 0.1)
self.assertLessEqual(test_nrmse, 3.0)
self.assertLessEqual(train_mse32, 0.1)
self.assertLessEqual(train_nrmse32, 3.0)
self.assertLessEqual(test_mse32, 0.2)
self.assertLessEqual(test_nrmse32, 3.0)
|
EchoTorch
|
positive
|
def get(self, key):
<DeepExtract>
if self.root is None:
self.root = None
if key < self.root.key:
self.root = self._get(self.root.left, key, val)
elif key > self.root.key:
self.root = self._get(self.root.right, key, val)
else:
self.root = self.root
</DeepExtract>
if x is None:
return None
else:
return x.val
|
def get(self, key):
if self.root is None:
self.root = None
if key < self.root.key:
self.root = self._get(self.root.left, key, val)
elif key > self.root.key:
self.root = self._get(self.root.right, key, val)
else:
self.root = self.root
if x is None:
return None
else:
return x.val
|
CtCI-6th-Edition
|
positive
|
@tools.ormcache('templates', 'current_fp_taxes')
def find_fp_tax_by_templates(self, templates, current_fp_taxes):
result = []
for tpl in templates:
<DeepExtract>
fp_model = self.env['account.fiscal.position']
for matching in self.fp_matching_ids.sorted('sequence'):
if matching.matching_value == 'xml_id':
real = self.env['account.fiscal.position']
for template in tpl.position_id:
try:
real |= self.env.ref(self._get_real_xml_name(template))
except BaseException:
_logger.info('Is not real xml Name')
if not real:
continue
criteria = ('id', 'in', real.ids)
else:
field_name = matching.matching_value
field_values = tpl.position_id.mapped(field_name)
if not field_values:
continue
criteria = (field_name, 'in', field_values)
result = fp_model.search([criteria, ('company_id', '=', self.company_id.id)], limit=1)
if result:
pos_id = result.id
pos_id = False
</DeepExtract>
<DeepExtract>
tax_model = self.env['account.tax'].with_context(active_test=False)
for template in tpl.tax_src_id:
for matching in self.tax_matching_ids.sorted('sequence'):
if matching.matching_value == 'xml_id':
real = self.env.ref(self._get_real_xml_name(template), raise_if_not_found=False)
if not real:
continue
criteria = ('id', '=', real.id)
else:
field_name = matching.matching_value
if not template[field_name]:
continue
criteria = (field_name, '=', template[field_name])
result = tax_model.search([criteria, ('company_id', '=', self.company_id.id), ('type_tax_use', '=', template.type_tax_use)], limit=1)
if result:
src_id = result.id
src_id = False
</DeepExtract>
<DeepExtract>
tax_model = self.env['account.tax'].with_context(active_test=False)
for template in tpl.tax_dest_id:
for matching in self.tax_matching_ids.sorted('sequence'):
if matching.matching_value == 'xml_id':
real = self.env.ref(self._get_real_xml_name(template), raise_if_not_found=False)
if not real:
continue
criteria = ('id', '=', real.id)
else:
field_name = matching.matching_value
if not template[field_name]:
continue
criteria = (field_name, '=', template[field_name])
result = tax_model.search([criteria, ('company_id', '=', self.company_id.id), ('type_tax_use', '=', template.type_tax_use)], limit=1)
if result:
dest_id = result.id
dest_id = False
</DeepExtract>
existing = self.env['account.fiscal.position.tax'].search([('position_id', '=', pos_id), ('tax_src_id', '=', src_id), ('tax_dest_id', '=', dest_id)])
if not existing:
result.append((0, 0, {'position_id': pos_id, 'tax_src_id': src_id, 'tax_dest_id': dest_id}))
else:
current_fp_taxes -= existing
if current_fp_taxes:
result += [(2, x.id) for x in current_fp_taxes]
return result
|
@tools.ormcache('templates', 'current_fp_taxes')
def find_fp_tax_by_templates(self, templates, current_fp_taxes):
result = []
for tpl in templates:
fp_model = self.env['account.fiscal.position']
for matching in self.fp_matching_ids.sorted('sequence'):
if matching.matching_value == 'xml_id':
real = self.env['account.fiscal.position']
for template in tpl.position_id:
try:
real |= self.env.ref(self._get_real_xml_name(template))
except BaseException:
_logger.info('Is not real xml Name')
if not real:
continue
criteria = ('id', 'in', real.ids)
else:
field_name = matching.matching_value
field_values = tpl.position_id.mapped(field_name)
if not field_values:
continue
criteria = (field_name, 'in', field_values)
result = fp_model.search([criteria, ('company_id', '=', self.company_id.id)], limit=1)
if result:
pos_id = result.id
pos_id = False
tax_model = self.env['account.tax'].with_context(active_test=False)
for template in tpl.tax_src_id:
for matching in self.tax_matching_ids.sorted('sequence'):
if matching.matching_value == 'xml_id':
real = self.env.ref(self._get_real_xml_name(template), raise_if_not_found=False)
if not real:
continue
criteria = ('id', '=', real.id)
else:
field_name = matching.matching_value
if not template[field_name]:
continue
criteria = (field_name, '=', template[field_name])
result = tax_model.search([criteria, ('company_id', '=', self.company_id.id), ('type_tax_use', '=', template.type_tax_use)], limit=1)
if result:
src_id = result.id
src_id = False
tax_model = self.env['account.tax'].with_context(active_test=False)
for template in tpl.tax_dest_id:
for matching in self.tax_matching_ids.sorted('sequence'):
if matching.matching_value == 'xml_id':
real = self.env.ref(self._get_real_xml_name(template), raise_if_not_found=False)
if not real:
continue
criteria = ('id', '=', real.id)
else:
field_name = matching.matching_value
if not template[field_name]:
continue
criteria = (field_name, '=', template[field_name])
result = tax_model.search([criteria, ('company_id', '=', self.company_id.id), ('type_tax_use', '=', template.type_tax_use)], limit=1)
if result:
dest_id = result.id
dest_id = False
existing = self.env['account.fiscal.position.tax'].search([('position_id', '=', pos_id), ('tax_src_id', '=', src_id), ('tax_dest_id', '=', dest_id)])
if not existing:
result.append((0, 0, {'position_id': pos_id, 'tax_src_id': src_id, 'tax_dest_id': dest_id}))
else:
current_fp_taxes -= existing
if current_fp_taxes:
result += [(2, x.id) for x in current_fp_taxes]
return result
|
account-financial-tools
|
positive
|
@mock.patch('dlrn.api.api_logging.get_config')
@mock.patch('dlrn.api.api_logging.get_config')
@mock.patch('dlrn.api.api_logging.create_rotating_file_handler_dict', side_effect=mock_handler_dict)
@mock.patch('dlrn.api.api_logging.create_logger_dict', side_effect=mock_logger_dict)
def test_setup_complex_dict_config(self, mocked_logged_dict, mocked_handler_dict, mocked_retr_debug, mocked_retr_path):
mocked_retr_debug.return_value = self.debug_bool
mocked_retr_path.return_value = self.log_path_var
<DeepExtract>
result_dlrn_handler_dict = {self.dlrn_handler_name: {'class': 'logging.handlers.RotatingFileHandler', 'filename': self.file_path, 'backupCount': 3, 'maxBytes': 15728640, 'formatter': 'default'}}
</DeepExtract>
<DeepExtract>
result_dlrn_logger_dict = {self.dlrn_logger_name: {'level': self.log_level, 'handlers': [self.dlrn_handler_name], 'propagate': False}}
</DeepExtract>
<DeepExtract>
result_auth_handler_dict = {self.auth_handler_name: {'class': 'logging.handlers.RotatingFileHandler', 'filename': self.file_path, 'backupCount': 3, 'maxBytes': 15728640, 'formatter': 'default'}}
</DeepExtract>
<DeepExtract>
result_auth_logger_dict = {self.auth_logger_name: {'level': self.log_level, 'handlers': [self.auth_handler_name], 'propagate': False}}
</DeepExtract>
return_dict = setup_dict_config({})
return_loggers = return_dict['loggers']
return_handlers = return_dict['handlers']
self.assertDictEqual(return_loggers[self.dlrn_logger_name], result_dlrn_logger_dict[self.dlrn_logger_name])
self.assertDictEqual(return_handlers[self.dlrn_handler_name], result_dlrn_handler_dict[self.dlrn_handler_name])
self.assertDictEqual(return_loggers[self.auth_logger_name], result_auth_logger_dict[self.auth_logger_name])
self.assertDictEqual(return_handlers[self.auth_handler_name], result_auth_handler_dict[self.auth_handler_name])
|
@mock.patch('dlrn.api.api_logging.get_config')
@mock.patch('dlrn.api.api_logging.get_config')
@mock.patch('dlrn.api.api_logging.create_rotating_file_handler_dict', side_effect=mock_handler_dict)
@mock.patch('dlrn.api.api_logging.create_logger_dict', side_effect=mock_logger_dict)
def test_setup_complex_dict_config(self, mocked_logged_dict, mocked_handler_dict, mocked_retr_debug, mocked_retr_path):
mocked_retr_debug.return_value = self.debug_bool
mocked_retr_path.return_value = self.log_path_var
result_dlrn_handler_dict = {self.dlrn_handler_name: {'class': 'logging.handlers.RotatingFileHandler', 'filename': self.file_path, 'backupCount': 3, 'maxBytes': 15728640, 'formatter': 'default'}}
result_dlrn_logger_dict = {self.dlrn_logger_name: {'level': self.log_level, 'handlers': [self.dlrn_handler_name], 'propagate': False}}
result_auth_handler_dict = {self.auth_handler_name: {'class': 'logging.handlers.RotatingFileHandler', 'filename': self.file_path, 'backupCount': 3, 'maxBytes': 15728640, 'formatter': 'default'}}
result_auth_logger_dict = {self.auth_logger_name: {'level': self.log_level, 'handlers': [self.auth_handler_name], 'propagate': False}}
return_dict = setup_dict_config({})
return_loggers = return_dict['loggers']
return_handlers = return_dict['handlers']
self.assertDictEqual(return_loggers[self.dlrn_logger_name], result_dlrn_logger_dict[self.dlrn_logger_name])
self.assertDictEqual(return_handlers[self.dlrn_handler_name], result_dlrn_handler_dict[self.dlrn_handler_name])
self.assertDictEqual(return_loggers[self.auth_logger_name], result_auth_logger_dict[self.auth_logger_name])
self.assertDictEqual(return_handlers[self.auth_handler_name], result_auth_handler_dict[self.auth_handler_name])
|
DLRN
|
positive
|
@pytest.mark.scalecleanup
def test_cleanup_scale(mom, min_index, max_index, service_id_list) -> None:
"""
Args:
mom: Marathon on Marathon instance name
min_index: minimum index to begin jenkins suffixes at
max_index: maximum index to end jenkins suffixes at
service_id_list: list of service ids to delete
Blanket clean-up of jenkins instances on a DC/OS cluster.
1. Delete list of service ids if specified
2. Delete range between max and min if specified
1. Queries MoM for all apps matching "jenkins" prefix
2. Delete all jobs on running Jenkins instances
3. Uninstall all found Jenkins installs
"""
service_ids = list()
if service_id_list != '':
service_ids = service_id_list.split(',')
elif min_index != -1 and max_index != -1:
service_ids = ['jenkins/jenkins{}'.format(index) for index in range(min_index, max_index)]
else:
r = sdk_marathon.filter_apps_by_id('jenkins', mom)
jenkins_apps = r.json()['apps']
jenkins_ids = [x['id'] for x in jenkins_apps]
for service_id in jenkins_ids:
if service_id.startswith('/'):
service_id = service_id[1:]
if service_id == 'jenkins':
continue
service_ids.append(service_id)
<DeepExtract>
thread_list = list()
for service_name in service_ids:
t = ResultThread(target=_cleanup_jenkins_install, daemon=False, name=service_name, args=(service_name,), kwargs=kwargs)
t.event = event
thread_list.append(t)
t.start()
cleanup_threads = thread_list
</DeepExtract>
<DeepExtract>
timeout_failures = _wait_on_threads(cleanup_threads, **kwargs)
timeout_names = [x.name for x in timeout_failures]
if timeout_names:
log.warning('The following {:d} Jenkins instance(s) failed to complete in {:d} minutes: {}'.format(len(timeout_names), DEPLOY_TIMEOUT // 60, ', '.join(timeout_names)))
run_failures = [x for x in cleanup_threads if not x.result]
run_fail_names = [x.name for x in run_failures]
if run_fail_names:
log.warning('The following {:d} Jenkins instance(s) encountered an error: {}'.format(len(run_fail_names), ', '.join(run_fail_names)))
failure_list = timeout_names + run_fail_names
failure_set = set(failure_list)
return failure_set
</DeepExtract>
|
@pytest.mark.scalecleanup
def test_cleanup_scale(mom, min_index, max_index, service_id_list) -> None:
"""
Args:
mom: Marathon on Marathon instance name
min_index: minimum index to begin jenkins suffixes at
max_index: maximum index to end jenkins suffixes at
service_id_list: list of service ids to delete
Blanket clean-up of jenkins instances on a DC/OS cluster.
1. Delete list of service ids if specified
2. Delete range between max and min if specified
1. Queries MoM for all apps matching "jenkins" prefix
2. Delete all jobs on running Jenkins instances
3. Uninstall all found Jenkins installs
"""
service_ids = list()
if service_id_list != '':
service_ids = service_id_list.split(',')
elif min_index != -1 and max_index != -1:
service_ids = ['jenkins/jenkins{}'.format(index) for index in range(min_index, max_index)]
else:
r = sdk_marathon.filter_apps_by_id('jenkins', mom)
jenkins_apps = r.json()['apps']
jenkins_ids = [x['id'] for x in jenkins_apps]
for service_id in jenkins_ids:
if service_id.startswith('/'):
service_id = service_id[1:]
if service_id == 'jenkins':
continue
service_ids.append(service_id)
thread_list = list()
for service_name in service_ids:
t = ResultThread(target=_cleanup_jenkins_install, daemon=False, name=service_name, args=(service_name,), kwargs=kwargs)
t.event = event
thread_list.append(t)
t.start()
cleanup_threads = thread_list
timeout_failures = _wait_on_threads(cleanup_threads, **kwargs)
timeout_names = [x.name for x in timeout_failures]
if timeout_names:
log.warning('The following {:d} Jenkins instance(s) failed to complete in {:d} minutes: {}'.format(len(timeout_names), DEPLOY_TIMEOUT // 60, ', '.join(timeout_names)))
run_failures = [x for x in cleanup_threads if not x.result]
run_fail_names = [x.name for x in run_failures]
if run_fail_names:
log.warning('The following {:d} Jenkins instance(s) encountered an error: {}'.format(len(run_fail_names), ', '.join(run_fail_names)))
failure_list = timeout_names + run_fail_names
failure_set = set(failure_list)
return failure_set
</DeepExtract>
|
dcos-jenkins-service
|
positive
|
def _read_sections(self):
while not self._doc.eof():
<DeepExtract>
section = self._doc.read_to_next_empty_line()
while not self._is_at_section() and (not self._doc.eof()):
if not self._doc.peek(-1).strip():
section += ['']
section += self._doc.read_to_next_empty_line()
data = section
</DeepExtract>
name = data[0].strip()
if name.startswith('..'):
yield (name, data[1:])
elif len(data) < 2:
yield StopIteration
else:
yield (name, self._strip(data[2:]))
|
def _read_sections(self):
while not self._doc.eof():
section = self._doc.read_to_next_empty_line()
while not self._is_at_section() and (not self._doc.eof()):
if not self._doc.peek(-1).strip():
section += ['']
section += self._doc.read_to_next_empty_line()
data = section
name = data[0].strip()
if name.startswith('..'):
yield (name, data[1:])
elif len(data) < 2:
yield StopIteration
else:
yield (name, self._strip(data[2:]))
|
astroML
|
positive
|
def dump_set(self, key):
members = self.keyspace.smembers(key)
length = len(members)
<DeepExtract>
if length < 1 << 6:
result = struct.pack('>B', length & 255 | RDB_6BITLEN << 6)
elif length < 1 << 14:
result = struct.pack('>BB', length >> 8 & 255 | RDB_14BITLEN << 6, length & 255)
else:
result = struct.pack('>BL', RDB_32BITLEN << 6, length)
</DeepExtract>
for member in members:
result += self._dump_string(member)
return result
|
def dump_set(self, key):
members = self.keyspace.smembers(key)
length = len(members)
if length < 1 << 6:
result = struct.pack('>B', length & 255 | RDB_6BITLEN << 6)
elif length < 1 << 14:
result = struct.pack('>BB', length >> 8 & 255 | RDB_14BITLEN << 6, length & 255)
else:
result = struct.pack('>BL', RDB_32BITLEN << 6, length)
for member in members:
result += self._dump_string(member)
return result
|
dredis
|
positive
|
def marginal_expectation(prediction_method: Callable[[np.ndarray], np.ndarray], feature_samples: np.ndarray, baseline_samples: np.ndarray, baseline_feature_indices: List[int], return_averaged_results: bool=True, feature_perturbation: str='randomize_columns_jointly', max_batch_size: int=-1) -> np.ndarray:
"""Estimates the marginal expectation for samples in baseline_noise_samples when randomizing features that are not
part of baseline_feature_indices. This is, this function estimates
y^i = E[Y | do(x^i_s)] := \\int_x_s' E[Y | x^i_s, x_s'] p(x_s') d x_s',
where x^i_s is the i-th sample from baseline_noise_samples, s denotes the baseline_feature_indices and
x_s' ~ X_s' denotes the randomized features that are not in s. For an approximation of the integral, the given
prediction_method is evaluated multiple times for the same x^i_s, but different x_s' ~ X_s'.
:param prediction_method: Prediction method of interest. This should expect a numpy array as input for making
predictions.
:param feature_samples: Samples from the joint distribution. These are used for randomizing the features that are not in
baseline_feature_indices.
:param baseline_samples: Samples for which the marginal expectation should be estimated.
:param baseline_feature_indices: Column indices of the features in s. These values for these features are remain constant
when estimating the expectation.
:param return_averaged_results: If set to True, the expectation over all evaluated samples for the i-th
baseline_noise_samples is returned. If set to False, all corresponding results for the i-th sample are returned.
:param feature_perturbation: Type of feature permutation:
'randomize_columns_independently': Each feature not in s is randomly permuted separately.
'randomize_columns_jointly': All features not in s are jointly permuted. Note that this still represents an
interventional distribution.
:param max_batch_size: Maximum batch size for a estimating the predictions. This has a significant influence on the
overall memory usage. If set to -1, all samples are used in one batch.
:return: If return_averaged_results is False, a numpy array where the i-th entry belongs to the marginal expectation
of x^i_s when randomizing the remaining features.
If return_averaged_results is True, a two dimensional numpy array where the i-th entry contains all
predictions for x^i_s when randomizing the remaining features.
"""
(feature_samples, baseline_samples) = shape_into_2d(feature_samples, baseline_samples)
batch_size = baseline_samples.shape[0] if max_batch_size == -1 else max_batch_size
result = [np.nan] * baseline_samples.shape[0]
feature_samples = np.array(feature_samples)
features_to_randomize = np.delete(np.arange(0, feature_samples.shape[1]), baseline_feature_indices)
if feature_perturbation == 'randomize_columns_independently':
<DeepExtract>
feature_samples = np.array(feature_samples)
if False:
feature_samples[:, features_to_randomize] = feature_samples[np.random.choice(feature_samples.shape[0], feature_samples.shape[0], replace=False)][:, features_to_randomize]
else:
for feature in features_to_randomize:
np.random.shuffle(feature_samples[:, feature])
feature_samples = feature_samples
</DeepExtract>
elif feature_perturbation == 'randomize_columns_jointly':
<DeepExtract>
feature_samples = np.array(feature_samples)
if True:
feature_samples[:, features_to_randomize] = feature_samples[np.random.choice(feature_samples.shape[0], feature_samples.shape[0], replace=False)][:, features_to_randomize]
else:
for feature in features_to_randomize:
np.random.shuffle(feature_samples[:, feature])
feature_samples = feature_samples
</DeepExtract>
else:
raise ValueError('Unknown argument %s as feature_perturbation type!' % feature_perturbation)
inputs = repmat(feature_samples, batch_size, 1)
for offset in range(0, baseline_samples.shape[0], batch_size):
if offset + batch_size > baseline_samples.shape[0]:
adjusted_batch_size = baseline_samples.shape[0] - offset
inputs = inputs[:adjusted_batch_size * feature_samples.shape[0]]
else:
adjusted_batch_size = batch_size
for index in range(adjusted_batch_size):
inputs[index * feature_samples.shape[0]:(index + 1) * feature_samples.shape[0], baseline_feature_indices] = baseline_samples[offset + index, baseline_feature_indices]
predictions = np.array(prediction_method(inputs))
for index in range(adjusted_batch_size):
if return_averaged_results:
result[offset + index] = np.mean(predictions[index * feature_samples.shape[0]:(index + 1) * feature_samples.shape[0]], axis=0)
else:
result[offset + index] = predictions[index * feature_samples.shape[0]:(index + 1) * feature_samples.shape[0]]
return np.array(result)
|
def marginal_expectation(prediction_method: Callable[[np.ndarray], np.ndarray], feature_samples: np.ndarray, baseline_samples: np.ndarray, baseline_feature_indices: List[int], return_averaged_results: bool=True, feature_perturbation: str='randomize_columns_jointly', max_batch_size: int=-1) -> np.ndarray:
"""Estimates the marginal expectation for samples in baseline_noise_samples when randomizing features that are not
part of baseline_feature_indices. This is, this function estimates
y^i = E[Y | do(x^i_s)] := \\int_x_s' E[Y | x^i_s, x_s'] p(x_s') d x_s',
where x^i_s is the i-th sample from baseline_noise_samples, s denotes the baseline_feature_indices and
x_s' ~ X_s' denotes the randomized features that are not in s. For an approximation of the integral, the given
prediction_method is evaluated multiple times for the same x^i_s, but different x_s' ~ X_s'.
:param prediction_method: Prediction method of interest. This should expect a numpy array as input for making
predictions.
:param feature_samples: Samples from the joint distribution. These are used for randomizing the features that are not in
baseline_feature_indices.
:param baseline_samples: Samples for which the marginal expectation should be estimated.
:param baseline_feature_indices: Column indices of the features in s. These values for these features are remain constant
when estimating the expectation.
:param return_averaged_results: If set to True, the expectation over all evaluated samples for the i-th
baseline_noise_samples is returned. If set to False, all corresponding results for the i-th sample are returned.
:param feature_perturbation: Type of feature permutation:
'randomize_columns_independently': Each feature not in s is randomly permuted separately.
'randomize_columns_jointly': All features not in s are jointly permuted. Note that this still represents an
interventional distribution.
:param max_batch_size: Maximum batch size for a estimating the predictions. This has a significant influence on the
overall memory usage. If set to -1, all samples are used in one batch.
:return: If return_averaged_results is False, a numpy array where the i-th entry belongs to the marginal expectation
of x^i_s when randomizing the remaining features.
If return_averaged_results is True, a two dimensional numpy array where the i-th entry contains all
predictions for x^i_s when randomizing the remaining features.
"""
(feature_samples, baseline_samples) = shape_into_2d(feature_samples, baseline_samples)
batch_size = baseline_samples.shape[0] if max_batch_size == -1 else max_batch_size
result = [np.nan] * baseline_samples.shape[0]
feature_samples = np.array(feature_samples)
features_to_randomize = np.delete(np.arange(0, feature_samples.shape[1]), baseline_feature_indices)
if feature_perturbation == 'randomize_columns_independently':
feature_samples = np.array(feature_samples)
if False:
feature_samples[:, features_to_randomize] = feature_samples[np.random.choice(feature_samples.shape[0], feature_samples.shape[0], replace=False)][:, features_to_randomize]
else:
for feature in features_to_randomize:
np.random.shuffle(feature_samples[:, feature])
feature_samples = feature_samples
elif feature_perturbation == 'randomize_columns_jointly':
feature_samples = np.array(feature_samples)
if True:
feature_samples[:, features_to_randomize] = feature_samples[np.random.choice(feature_samples.shape[0], feature_samples.shape[0], replace=False)][:, features_to_randomize]
else:
for feature in features_to_randomize:
np.random.shuffle(feature_samples[:, feature])
feature_samples = feature_samples
else:
raise ValueError('Unknown argument %s as feature_perturbation type!' % feature_perturbation)
inputs = repmat(feature_samples, batch_size, 1)
for offset in range(0, baseline_samples.shape[0], batch_size):
if offset + batch_size > baseline_samples.shape[0]:
adjusted_batch_size = baseline_samples.shape[0] - offset
inputs = inputs[:adjusted_batch_size * feature_samples.shape[0]]
else:
adjusted_batch_size = batch_size
for index in range(adjusted_batch_size):
inputs[index * feature_samples.shape[0]:(index + 1) * feature_samples.shape[0], baseline_feature_indices] = baseline_samples[offset + index, baseline_feature_indices]
predictions = np.array(prediction_method(inputs))
for index in range(adjusted_batch_size):
if return_averaged_results:
result[offset + index] = np.mean(predictions[index * feature_samples.shape[0]:(index + 1) * feature_samples.shape[0]], axis=0)
else:
result[offset + index] = predictions[index * feature_samples.shape[0]:(index + 1) * feature_samples.shape[0]]
return np.array(result)
|
dowhy
|
positive
|
def _publish_json(**kwargs):
<DeepExtract>
prod_folder = _get_request_param('prod_folder', prod_folder)
with temporary_directory() as workdir:
input_folder = workdir / 'input'
output_folder = workdir / 'output'
input_folder.mkdir(parents=True, exist_ok=True)
output_folder.mkdir(parents=True, exist_ok=True)
forbid_tokens = ('location/', 'main.')
filter_func = lambda x: x.suffix == '.csv' and all((token not in str(x) for token in forbid_tokens))
download_folder(GCS_BUCKET_PROD, prod_folder, input_folder, filter_func)
logger.log_info(f"Downloaded {sum((1 for _ in input_folder.glob('**/*.csv')))} CSV files")
convert_tables_to_json(input_folder, output_folder)
logger.log_info('CSV files converted to JSON')
upload_folder(GCS_BUCKET_PROD, prod_folder, output_folder)
return Response('OK', status=200)
</DeepExtract>
<DeepExtract>
prod_folder = _get_request_param('prod_folder', prod_folder)
location_key_from = _get_request_param('location_key_from', location_key_from)
location_key_until = _get_request_param('location_key_until', location_key_until)
with temporary_directory() as workdir:
input_folder = workdir / 'input'
output_folder = workdir / 'output'
input_folder.mkdir(parents=True, exist_ok=True)
output_folder.mkdir(parents=True, exist_ok=True)
location_keys = list(table_read_column(SRC / 'data' / 'metadata.csv', 'key'))
if location_key_from is not None:
location_keys = [key for key in location_keys if key >= location_key_from]
if location_key_until is not None:
location_keys = [key for key in location_keys if key <= location_key_until]
logger.log_info(f'Converting {len(location_keys)} location subsets to JSON from {location_keys[0]} until {location_keys[-1]}')
def match_path(table_path: Path) -> bool:
try:
if prod_folder == 'v2':
(location_key, table_name) = str(table_path).split('/', 1)
return table_name == 'main.csv' and location_key in location_keys
elif prod_folder == 'v3':
(location_path, location_key) = (table_path.parent.name, table_path.stem)
return location_path == 'location' and location_key in location_keys
except:
return False
download_folder(GCS_BUCKET_PROD, prod_folder, input_folder, match_path)
logger.log_info(f"Downloaded {sum((1 for _ in input_folder.glob('**/*.csv')))} CSV files")
convert_tables_to_json(input_folder, output_folder)
converted_count = sum((1 for _ in output_folder.glob('**/*.json')))
logger.log_info(f'Converted {converted_count} files to JSON')
upload_folder(GCS_BUCKET_PROD, prod_folder, output_folder)
return Response('OK', status=200)
</DeepExtract>
|
def _publish_json(**kwargs):
prod_folder = _get_request_param('prod_folder', prod_folder)
with temporary_directory() as workdir:
input_folder = workdir / 'input'
output_folder = workdir / 'output'
input_folder.mkdir(parents=True, exist_ok=True)
output_folder.mkdir(parents=True, exist_ok=True)
forbid_tokens = ('location/', 'main.')
filter_func = lambda x: x.suffix == '.csv' and all((token not in str(x) for token in forbid_tokens))
download_folder(GCS_BUCKET_PROD, prod_folder, input_folder, filter_func)
logger.log_info(f"Downloaded {sum((1 for _ in input_folder.glob('**/*.csv')))} CSV files")
convert_tables_to_json(input_folder, output_folder)
logger.log_info('CSV files converted to JSON')
upload_folder(GCS_BUCKET_PROD, prod_folder, output_folder)
return Response('OK', status=200)
prod_folder = _get_request_param('prod_folder', prod_folder)
location_key_from = _get_request_param('location_key_from', location_key_from)
location_key_until = _get_request_param('location_key_until', location_key_until)
with temporary_directory() as workdir:
input_folder = workdir / 'input'
output_folder = workdir / 'output'
input_folder.mkdir(parents=True, exist_ok=True)
output_folder.mkdir(parents=True, exist_ok=True)
location_keys = list(table_read_column(SRC / 'data' / 'metadata.csv', 'key'))
if location_key_from is not None:
location_keys = [key for key in location_keys if key >= location_key_from]
if location_key_until is not None:
location_keys = [key for key in location_keys if key <= location_key_until]
logger.log_info(f'Converting {len(location_keys)} location subsets to JSON from {location_keys[0]} until {location_keys[-1]}')
def match_path(table_path: Path) -> bool:
try:
if prod_folder == 'v2':
(location_key, table_name) = str(table_path).split('/', 1)
return table_name == 'main.csv' and location_key in location_keys
elif prod_folder == 'v3':
(location_path, location_key) = (table_path.parent.name, table_path.stem)
return location_path == 'location' and location_key in location_keys
except:
return False
download_folder(GCS_BUCKET_PROD, prod_folder, input_folder, match_path)
logger.log_info(f"Downloaded {sum((1 for _ in input_folder.glob('**/*.csv')))} CSV files")
convert_tables_to_json(input_folder, output_folder)
converted_count = sum((1 for _ in output_folder.glob('**/*.json')))
logger.log_info(f'Converted {converted_count} files to JSON')
upload_folder(GCS_BUCKET_PROD, prod_folder, output_folder)
return Response('OK', status=200)
</DeepExtract>
|
covid-19-open-data
|
positive
|
def iter_releases(self) -> List['PatcherRelease']:
"""Generate releases available in the storage, latest first"""
<DeepExtract>
base = self.fspath(f'cdtb/releases/{self.patchline}')
</DeepExtract>
if not os.path.isdir(base):
return
versions = []
for version in os.listdir(base):
try:
versions.append(int(version))
except ValueError:
continue
for version in sorted(versions, reverse=True):
if os.path.isfile(f'{base}/{version}/release.json'):
yield PatcherRelease(self, version)
|
def iter_releases(self) -> List['PatcherRelease']:
"""Generate releases available in the storage, latest first"""
base = self.fspath(f'cdtb/releases/{self.patchline}')
if not os.path.isdir(base):
return
versions = []
for version in os.listdir(base):
try:
versions.append(int(version))
except ValueError:
continue
for version in sorted(versions, reverse=True):
if os.path.isfile(f'{base}/{version}/release.json'):
yield PatcherRelease(self, version)
|
CDTB
|
positive
|
def __setattr__(self, key, value):
if key in ('_config', '_prefix'):
self.__dict__[key] = value
return
<DeepExtract>
warnings.warn('Attribute assignment is deprecated.', DeprecationWarning, stacklevel=3)
</DeepExtract>
if hasattr(DictMixin, key):
raise AttributeError('Read-only attribute.')
if key in self and self[key] and isinstance(self[key], self.__class__):
raise AttributeError('Non-empty namespace attribute.')
self[key] = value
|
def __setattr__(self, key, value):
if key in ('_config', '_prefix'):
self.__dict__[key] = value
return
warnings.warn('Attribute assignment is deprecated.', DeprecationWarning, stacklevel=3)
if hasattr(DictMixin, key):
raise AttributeError('Read-only attribute.')
if key in self and self[key] and isinstance(self[key], self.__class__):
raise AttributeError('Non-empty namespace attribute.')
self[key] = value
|
aws-servicebroker
|
positive
|
def likelihood_ratio_filter(node_pairs, modified_adjacency, original_adjacency, d_min, threshold=0.004, undirected=True):
"""
Filter the input node pairs based on the likelihood ratio test proposed by Zügner et al. 2018, see
https://dl.acm.org/citation.cfm?id=3220078. In essence, for each node pair return 1 if adding/removing the edge
between the two nodes does not violate the unnoticeability constraint, and return 0 otherwise. Assumes unweighted
and undirected graphs.
"""
N = int(modified_adjacency.shape[0])
original_degree_sequence = original_adjacency.sum(0)
current_degree_sequence = modified_adjacency.sum(0)
concat_degree_sequence = torch.cat((current_degree_sequence, original_degree_sequence))
<DeepExtract>
D_G = original_degree_sequence[original_degree_sequence >= d_min.item()]
try:
sum_log_degrees = torch.log(D_G).sum()
except:
sum_log_degrees = np.log(D_G).sum()
n = len(D_G)
alpha = compute_alpha(n, sum_log_degrees, d_min)
ll = compute_log_likelihood(n, alpha, sum_log_degrees, d_min)
(ll_orig, alpha_orig, n_orig, sum_log_degrees_original) = (ll, alpha, n, sum_log_degrees)
</DeepExtract>
<DeepExtract>
D_G = current_degree_sequence[current_degree_sequence >= d_min.item()]
try:
sum_log_degrees = torch.log(D_G).sum()
except:
sum_log_degrees = np.log(D_G).sum()
n = len(D_G)
alpha = compute_alpha(n, sum_log_degrees, d_min)
ll = compute_log_likelihood(n, alpha, sum_log_degrees, d_min)
(ll_current, alpha_current, n_current, sum_log_degrees_current) = (ll, alpha, n, sum_log_degrees)
</DeepExtract>
<DeepExtract>
D_G = concat_degree_sequence[concat_degree_sequence >= d_min.item()]
try:
sum_log_degrees = torch.log(D_G).sum()
except:
sum_log_degrees = np.log(D_G).sum()
n = len(D_G)
alpha = compute_alpha(n, sum_log_degrees, d_min)
ll = compute_log_likelihood(n, alpha, sum_log_degrees, d_min)
(ll_comb, alpha_comb, n_comb, sum_log_degrees_combined) = (ll, alpha, n, sum_log_degrees)
</DeepExtract>
current_ratio = -2 * ll_comb + 2 * (ll_orig + ll_current)
<DeepExtract>
edge_entries_before = modified_adjacency[node_pairs.T]
degree_sequence = modified_adjacency.sum(1)
D_G = degree_sequence[degree_sequence >= d_min.item()]
sum_log_degrees = torch.log(D_G).sum()
n = len(D_G)
deltas = -2 * edge_entries_before + 1
d_edges_before = degree_sequence[node_pairs]
d_edges_after = degree_sequence[node_pairs] + deltas[:, None]
(sum_log_degrees_after, new_n) = update_sum_log_degrees(sum_log_degrees, n, d_edges_before, d_edges_after, d_min)
new_alpha = compute_alpha(new_n, sum_log_degrees_after, d_min)
new_ll = compute_log_likelihood(new_n, new_alpha, sum_log_degrees_after, d_min)
(new_lls, new_alphas, new_ns, new_sum_log_degrees) = (new_ll, new_alpha, new_n, sum_log_degrees_after)
</DeepExtract>
n_combined = n_orig + new_ns
new_sum_log_degrees_combined = sum_log_degrees_original + new_sum_log_degrees
<DeepExtract>
try:
alpha = 1 + n_combined / (new_sum_log_degrees_combined - n_combined * torch.log(d_min - 0.5))
except:
alpha = 1 + n_combined / (new_sum_log_degrees_combined - n_combined * np.log(d_min - 0.5))
alpha_combined = alpha
</DeepExtract>
<DeepExtract>
try:
ll = n_combined * torch.log(alpha_combined) + n_combined * alpha_combined * torch.log(d_min) + (alpha_combined + 1) * new_sum_log_degrees_combined
except:
ll = n_combined * np.log(alpha_combined) + n_combined * alpha_combined * np.log(d_min) + (alpha_combined + 1) * new_sum_log_degrees_combined
new_ll_combined = ll
</DeepExtract>
new_ratios = -2 * new_ll_combined + 2 * (new_lls + ll_orig)
allowed_edges = new_ratios < threshold
if allowed_edges.is_cuda:
filtered_edges = node_pairs[allowed_edges.cpu().numpy().astype(np.bool)]
else:
filtered_edges = node_pairs[allowed_edges.numpy().astype(np.bool)]
allowed_mask = torch.zeros(modified_adjacency.shape)
allowed_mask[filtered_edges.T] = 1
if undirected:
allowed_mask += allowed_mask.t()
return (allowed_mask, current_ratio)
|
def likelihood_ratio_filter(node_pairs, modified_adjacency, original_adjacency, d_min, threshold=0.004, undirected=True):
"""
Filter the input node pairs based on the likelihood ratio test proposed by Zügner et al. 2018, see
https://dl.acm.org/citation.cfm?id=3220078. In essence, for each node pair return 1 if adding/removing the edge
between the two nodes does not violate the unnoticeability constraint, and return 0 otherwise. Assumes unweighted
and undirected graphs.
"""
N = int(modified_adjacency.shape[0])
original_degree_sequence = original_adjacency.sum(0)
current_degree_sequence = modified_adjacency.sum(0)
concat_degree_sequence = torch.cat((current_degree_sequence, original_degree_sequence))
D_G = original_degree_sequence[original_degree_sequence >= d_min.item()]
try:
sum_log_degrees = torch.log(D_G).sum()
except:
sum_log_degrees = np.log(D_G).sum()
n = len(D_G)
alpha = compute_alpha(n, sum_log_degrees, d_min)
ll = compute_log_likelihood(n, alpha, sum_log_degrees, d_min)
(ll_orig, alpha_orig, n_orig, sum_log_degrees_original) = (ll, alpha, n, sum_log_degrees)
D_G = current_degree_sequence[current_degree_sequence >= d_min.item()]
try:
sum_log_degrees = torch.log(D_G).sum()
except:
sum_log_degrees = np.log(D_G).sum()
n = len(D_G)
alpha = compute_alpha(n, sum_log_degrees, d_min)
ll = compute_log_likelihood(n, alpha, sum_log_degrees, d_min)
(ll_current, alpha_current, n_current, sum_log_degrees_current) = (ll, alpha, n, sum_log_degrees)
D_G = concat_degree_sequence[concat_degree_sequence >= d_min.item()]
try:
sum_log_degrees = torch.log(D_G).sum()
except:
sum_log_degrees = np.log(D_G).sum()
n = len(D_G)
alpha = compute_alpha(n, sum_log_degrees, d_min)
ll = compute_log_likelihood(n, alpha, sum_log_degrees, d_min)
(ll_comb, alpha_comb, n_comb, sum_log_degrees_combined) = (ll, alpha, n, sum_log_degrees)
current_ratio = -2 * ll_comb + 2 * (ll_orig + ll_current)
edge_entries_before = modified_adjacency[node_pairs.T]
degree_sequence = modified_adjacency.sum(1)
D_G = degree_sequence[degree_sequence >= d_min.item()]
sum_log_degrees = torch.log(D_G).sum()
n = len(D_G)
deltas = -2 * edge_entries_before + 1
d_edges_before = degree_sequence[node_pairs]
d_edges_after = degree_sequence[node_pairs] + deltas[:, None]
(sum_log_degrees_after, new_n) = update_sum_log_degrees(sum_log_degrees, n, d_edges_before, d_edges_after, d_min)
new_alpha = compute_alpha(new_n, sum_log_degrees_after, d_min)
new_ll = compute_log_likelihood(new_n, new_alpha, sum_log_degrees_after, d_min)
(new_lls, new_alphas, new_ns, new_sum_log_degrees) = (new_ll, new_alpha, new_n, sum_log_degrees_after)
n_combined = n_orig + new_ns
new_sum_log_degrees_combined = sum_log_degrees_original + new_sum_log_degrees
try:
alpha = 1 + n_combined / (new_sum_log_degrees_combined - n_combined * torch.log(d_min - 0.5))
except:
alpha = 1 + n_combined / (new_sum_log_degrees_combined - n_combined * np.log(d_min - 0.5))
alpha_combined = alpha
try:
ll = n_combined * torch.log(alpha_combined) + n_combined * alpha_combined * torch.log(d_min) + (alpha_combined + 1) * new_sum_log_degrees_combined
except:
ll = n_combined * np.log(alpha_combined) + n_combined * alpha_combined * np.log(d_min) + (alpha_combined + 1) * new_sum_log_degrees_combined
new_ll_combined = ll
new_ratios = -2 * new_ll_combined + 2 * (new_lls + ll_orig)
allowed_edges = new_ratios < threshold
if allowed_edges.is_cuda:
filtered_edges = node_pairs[allowed_edges.cpu().numpy().astype(np.bool)]
else:
filtered_edges = node_pairs[allowed_edges.numpy().astype(np.bool)]
allowed_mask = torch.zeros(modified_adjacency.shape)
allowed_mask[filtered_edges.T] = 1
if undirected:
allowed_mask += allowed_mask.t()
return (allowed_mask, current_ratio)
|
DeepRobust
|
positive
|
def __call__(self, loc, score, anchor, img_size, scale=1.0):
if self.parent_model.training:
n_pre_nms = self.n_train_pre_nms
n_post_nms = self.n_train_post_nms
else:
n_pre_nms = self.n_test_pre_nms
n_post_nms = self.n_test_post_nms
<DeepExtract>
if anchor.shape[0] == 0:
roi = np.zeros((0, 4), dtype=loc.dtype)
anchor = anchor.astype(anchor.dtype, copy=False)
src_height = anchor[:, 2] - anchor[:, 0]
src_width = anchor[:, 3] - anchor[:, 1]
src_ctr_y = anchor[:, 0] + 0.5 * src_height
src_ctr_x = anchor[:, 1] + 0.5 * src_width
dy = loc[:, 0::4]
dx = loc[:, 1::4]
dh = loc[:, 2::4]
dw = loc[:, 3::4]
ctr_y = dy * src_height[:, np.newaxis] + src_ctr_y[:, np.newaxis]
ctr_x = dx * src_width[:, np.newaxis] + src_ctr_x[:, np.newaxis]
h = np.exp(dh) * src_height[:, np.newaxis]
w = np.exp(dw) * src_width[:, np.newaxis]
dst_bbox = np.zeros(loc.shape, dtype=loc.dtype)
dst_bbox[:, 0::4] = ctr_y - 0.5 * h
dst_bbox[:, 1::4] = ctr_x - 0.5 * w
dst_bbox[:, 2::4] = ctr_y + 0.5 * h
dst_bbox[:, 3::4] = ctr_x + 0.5 * w
roi = dst_bbox
</DeepExtract>
roi[:, slice(0, 4, 2)] = np.clip(roi[:, slice(0, 4, 2)], 0, img_size[0])
roi[:, slice(1, 4, 2)] = np.clip(roi[:, slice(1, 4, 2)], 0, img_size[1])
min_size = self.min_size * scale
hs = roi[:, 2] - roi[:, 0]
ws = roi[:, 3] - roi[:, 1]
keep = np.where((hs >= min_size) & (ws >= min_size))
roi = roi[keep, :]
score = score[keep]
order = score.ravel().argsort()[::-1]
if n_pre_nms > 0:
order = order[:n_pre_nms]
roi = roi[order, :]
keep = non_maximum_supression(cp.ascontiguousarray(cp.asarray(roi)), thresh=self.nms_thresh)
if n_post_nms > 0:
keep = keep[:n_post_nms]
roi = roi[keep]
return roi
|
def __call__(self, loc, score, anchor, img_size, scale=1.0):
if self.parent_model.training:
n_pre_nms = self.n_train_pre_nms
n_post_nms = self.n_train_post_nms
else:
n_pre_nms = self.n_test_pre_nms
n_post_nms = self.n_test_post_nms
if anchor.shape[0] == 0:
roi = np.zeros((0, 4), dtype=loc.dtype)
anchor = anchor.astype(anchor.dtype, copy=False)
src_height = anchor[:, 2] - anchor[:, 0]
src_width = anchor[:, 3] - anchor[:, 1]
src_ctr_y = anchor[:, 0] + 0.5 * src_height
src_ctr_x = anchor[:, 1] + 0.5 * src_width
dy = loc[:, 0::4]
dx = loc[:, 1::4]
dh = loc[:, 2::4]
dw = loc[:, 3::4]
ctr_y = dy * src_height[:, np.newaxis] + src_ctr_y[:, np.newaxis]
ctr_x = dx * src_width[:, np.newaxis] + src_ctr_x[:, np.newaxis]
h = np.exp(dh) * src_height[:, np.newaxis]
w = np.exp(dw) * src_width[:, np.newaxis]
dst_bbox = np.zeros(loc.shape, dtype=loc.dtype)
dst_bbox[:, 0::4] = ctr_y - 0.5 * h
dst_bbox[:, 1::4] = ctr_x - 0.5 * w
dst_bbox[:, 2::4] = ctr_y + 0.5 * h
dst_bbox[:, 3::4] = ctr_x + 0.5 * w
roi = dst_bbox
roi[:, slice(0, 4, 2)] = np.clip(roi[:, slice(0, 4, 2)], 0, img_size[0])
roi[:, slice(1, 4, 2)] = np.clip(roi[:, slice(1, 4, 2)], 0, img_size[1])
min_size = self.min_size * scale
hs = roi[:, 2] - roi[:, 0]
ws = roi[:, 3] - roi[:, 1]
keep = np.where((hs >= min_size) & (ws >= min_size))
roi = roi[keep, :]
score = score[keep]
order = score.ravel().argsort()[::-1]
if n_pre_nms > 0:
order = order[:n_pre_nms]
roi = roi[order, :]
keep = non_maximum_supression(cp.ascontiguousarray(cp.asarray(roi)), thresh=self.nms_thresh)
if n_post_nms > 0:
keep = keep[:n_post_nms]
roi = roi[keep]
return roi
|
DMMN
|
positive
|
def test_get_aws_security_credentials_get_ecs_from_option_url(mocker):
<DeepExtract>
try:
config = ConfigParser.SafeConfigParser()
except AttributeError:
config = ConfigParser()
if add_test_profile:
config.add_section(AWSPROFILE)
config = config
</DeepExtract>
response = json.dumps({'AccessKeyId': ACCESS_KEY_ID_VAL, 'Expiration': 'EXPIRATION_DATE', 'RoleArn': 'TASK_ROLE_ARN', 'SecretAccessKey': SECRET_ACCESS_KEY_VAL, 'Token': SESSION_TOKEN_VAL})
mocker.patch('mount_efs.urlopen', return_value=MockUrlLibResponse(data=response))
(credentials, credentials_source) = mount_efs.get_aws_security_credentials(config, True, 'us-east-1', None, AWSCREDSURI)
assert credentials['AccessKeyId'] == ACCESS_KEY_ID_VAL
assert credentials['SecretAccessKey'] == SECRET_ACCESS_KEY_VAL
assert credentials['Token'] == SESSION_TOKEN_VAL
assert credentials_source == 'ecs:' + AWSCREDSURI
|
def test_get_aws_security_credentials_get_ecs_from_option_url(mocker):
try:
config = ConfigParser.SafeConfigParser()
except AttributeError:
config = ConfigParser()
if add_test_profile:
config.add_section(AWSPROFILE)
config = config
response = json.dumps({'AccessKeyId': ACCESS_KEY_ID_VAL, 'Expiration': 'EXPIRATION_DATE', 'RoleArn': 'TASK_ROLE_ARN', 'SecretAccessKey': SECRET_ACCESS_KEY_VAL, 'Token': SESSION_TOKEN_VAL})
mocker.patch('mount_efs.urlopen', return_value=MockUrlLibResponse(data=response))
(credentials, credentials_source) = mount_efs.get_aws_security_credentials(config, True, 'us-east-1', None, AWSCREDSURI)
assert credentials['AccessKeyId'] == ACCESS_KEY_ID_VAL
assert credentials['SecretAccessKey'] == SECRET_ACCESS_KEY_VAL
assert credentials['Token'] == SESSION_TOKEN_VAL
assert credentials_source == 'ecs:' + AWSCREDSURI
|
efs-utils
|
positive
|
def accumulate(self, state, reward):
W = self.net.layers.get_weights(unfold=True)
if self.rewards > self.bestreward:
self.bestreward = self.rewards
self.shadow_net = W
self.net.layers.set_weights(W + np.random.randn(*W.shape) * 0.1)
<DeepExtract>
self.rewards = 0
</DeepExtract>
self.bestreward *= 0.1
return self.bestreward
|
def accumulate(self, state, reward):
W = self.net.layers.get_weights(unfold=True)
if self.rewards > self.bestreward:
self.bestreward = self.rewards
self.shadow_net = W
self.net.layers.set_weights(W + np.random.randn(*W.shape) * 0.1)
self.rewards = 0
self.bestreward *= 0.1
return self.bestreward
|
brainforge
|
positive
|
def create_s3_signal(self, signal):
<DeepExtract>
session = boto3.session.Session()
s3_client = session.client('s3', region_name=self.aws_region)
</DeepExtract>
s3_client.upload_fileobj(io.BytesIO(b''), self.s3_bucket, self._get_s3_key(signal))
|
def create_s3_signal(self, signal):
session = boto3.session.Session()
s3_client = session.client('s3', region_name=self.aws_region)
s3_client.upload_fileobj(io.BytesIO(b''), self.s3_bucket, self._get_s3_key(signal))
|
deepracer-local
|
positive
|
def filter_string_values(self, obj, ignored=None, seen=None):
"""
Remove any value from the dictionary which match the key filters
"""
if not ignored:
<DeepExtract>
setattr(ThreadContextVar.local_context(), self.name, new_value)
</DeepExtract>
if seen is None:
seen = []
if type(ignored) is list:
<DeepExtract>
setattr(ThreadContextVar.local_context(), self.name, ignored)
</DeepExtract>
if id(obj) in ignored:
return self.recursive_value
if isinstance(obj, dict):
ignored.add(id(obj))
seen.append(obj)
clean_dict = {}
for (key, value) in obj.items():
if self._should_filter(key):
clean_dict[key] = self.filtered_value
else:
<DeepExtract>
if not ignored:
ignored = set()
if seen is None:
seen = []
if type(ignored) is list:
ignored = set(ignored)
if id(value) in ignored:
clean_dict[key] = self.recursive_value
if isinstance(value, dict):
ignored.add(id(value))
seen.append(value)
clean_dict = {}
for (key, value) in value.items():
if self._should_filter(key):
clean_dict[key] = self.filtered_value
else:
clean_dict[key] = self.filter_string_values(value, ignored, seen)
clean_dict[key] = clean_dict
clean_dict[key] = value
</DeepExtract>
return clean_dict
return obj
|
def filter_string_values(self, obj, ignored=None, seen=None):
"""
Remove any value from the dictionary which match the key filters
"""
if not ignored:
setattr(ThreadContextVar.local_context(), self.name, new_value)
if seen is None:
seen = []
if type(ignored) is list:
setattr(ThreadContextVar.local_context(), self.name, ignored)
if id(obj) in ignored:
return self.recursive_value
if isinstance(obj, dict):
ignored.add(id(obj))
seen.append(obj)
clean_dict = {}
for (key, value) in obj.items():
if self._should_filter(key):
clean_dict[key] = self.filtered_value
else:
if not ignored:
ignored = set()
if seen is None:
seen = []
if type(ignored) is list:
ignored = set(ignored)
if id(value) in ignored:
clean_dict[key] = self.recursive_value
if isinstance(value, dict):
ignored.add(id(value))
seen.append(value)
clean_dict = {}
for (key, value) in value.items():
if self._should_filter(key):
clean_dict[key] = self.filtered_value
else:
clean_dict[key] = self.filter_string_values(value, ignored, seen)
clean_dict[key] = clean_dict
clean_dict[key] = value
return clean_dict
return obj
|
bugsnag-python
|
positive
|
def main(destination_schema, batch_size, start_id, upper_lim_override):
Database.DATABASE_NAME = destination_schema
db = Database()
db.connect()
if use_autocommit:
db._connection.autocommit = True
if upper_lim_override:
upper_lim = upper_lim_override
else:
db._cursor.execute('SELECT MAX(id) FROM epidata.covidcast;')
for (max_id,) in db._cursor:
upper_lim = 1 + max_id
print(f"migrating data to schema '{destination_schema}', with batch size {batch_size} and {start_id} <= ids < {upper_lim}")
if start_id == 0:
print('this WILL truncate any existing v4 tables')
print()
if input("type 'yes' to continue: ") != 'yes':
sys.exit('operation cancelled!')
print(f"starting run at: {time.strftime('%c')}")
if start_id == 0:
print('truncating tables...')
for table in 'epimetric_load epimetric_latest epimetric_full geo_dim signal_dim'.split():
db._cursor.execute(f'TRUNCATE TABLE {table}')
db.commit()
start_id = 1
<DeepExtract>
batch_lower = start_id
while batch_lower < upper_lim:
batch_upper = min(batch_lower + batch_size, upper_lim)
batch_sql = f'\n INSERT INTO epimetric_load (\n `issue`, `source`, `signal`, geo_type, geo_value, time_type, time_value, `value`, stderr, sample_size, `lag`, value_updated_timestamp, is_latest_issue, missing_value, missing_stderr, missing_sample_size\n ) SELECT\n `issue`, `source`, `signal`, geo_type, geo_value, time_type, time_value, `value`, stderr, sample_size, `lag`, value_updated_timestamp, is_latest_issue, missing_value, missing_stderr, missing_sample_size\n FROM epidata.covidcast AS cc\n USE INDEX(`PRIMARY`)\n WHERE {batch_lower} <= cc.id AND cc.id < {batch_upper}; '
if use_transaction_wrappers:
start_tx(db._cursor)
print(f'-=-=-=-=-=-=-=- RUNNING BATCH STARTING AT {batch_lower} -=-=-=-=-=-=-=-')
print(f"-=-=-=-=-=-=-=- RUNNING ''INSERT INTO SELECT FROM''... ", end='')
t = time.time()
db._cursor.execute(batch_sql)
print(f'elapsed: {time.time() - t} sec, rows: {db._cursor.rowcount} -=-=-=-=-=-=-=-')
t = time.time()
db.run_dbjobs()
print(f'-=-=-=-=-=-=-=- RAN db_jobs()... elapsed: {time.time() - t} sec -=-=-=-=-=-=-=-')
print('-=-=-=-=-=-=-=- RUNNING commit()... ', end='')
t = time.time()
db.commit()
if use_transaction_wrappers:
finish_tx(db._cursor)
print(f'elapsed: {time.time() - t} sec -=-=-=-=-=-=-=-')
print('\n\n')
batch_lower = batch_upper
</DeepExtract>
print('-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-')
db._cursor.execute(f'SELECT MAX(epimetric_id) FROM epimetric_full;')
for (max_id,) in db._cursor:
print(f'epimetric_full: {max_id}')
db._cursor.execute(f'SELECT MAX(epimetric_id) FROM epimetric_latest;')
for (max_id,) in db._cursor:
print(f'epimetric_latest: {max_id} (this should be <= the number above)')
db._cursor.execute(f'SELECT COUNT(signal_key_id), MAX(signal_key_id) FROM signal_dim;')
for (count_id, max_id) in db._cursor:
print(f'signal_dim: count {count_id} / max {max_id}')
db._cursor.execute(f'SELECT COUNT(geo_key_id), MAX(geo_key_id) FROM geo_dim;')
for (count_id, max_id) in db._cursor:
print(f'geo_dim: count {count_id} / max {max_id}')
return upper_lim
|
def main(destination_schema, batch_size, start_id, upper_lim_override):
Database.DATABASE_NAME = destination_schema
db = Database()
db.connect()
if use_autocommit:
db._connection.autocommit = True
if upper_lim_override:
upper_lim = upper_lim_override
else:
db._cursor.execute('SELECT MAX(id) FROM epidata.covidcast;')
for (max_id,) in db._cursor:
upper_lim = 1 + max_id
print(f"migrating data to schema '{destination_schema}', with batch size {batch_size} and {start_id} <= ids < {upper_lim}")
if start_id == 0:
print('this WILL truncate any existing v4 tables')
print()
if input("type 'yes' to continue: ") != 'yes':
sys.exit('operation cancelled!')
print(f"starting run at: {time.strftime('%c')}")
if start_id == 0:
print('truncating tables...')
for table in 'epimetric_load epimetric_latest epimetric_full geo_dim signal_dim'.split():
db._cursor.execute(f'TRUNCATE TABLE {table}')
db.commit()
start_id = 1
batch_lower = start_id
while batch_lower < upper_lim:
batch_upper = min(batch_lower + batch_size, upper_lim)
batch_sql = f'\n INSERT INTO epimetric_load (\n `issue`, `source`, `signal`, geo_type, geo_value, time_type, time_value, `value`, stderr, sample_size, `lag`, value_updated_timestamp, is_latest_issue, missing_value, missing_stderr, missing_sample_size\n ) SELECT\n `issue`, `source`, `signal`, geo_type, geo_value, time_type, time_value, `value`, stderr, sample_size, `lag`, value_updated_timestamp, is_latest_issue, missing_value, missing_stderr, missing_sample_size\n FROM epidata.covidcast AS cc\n USE INDEX(`PRIMARY`)\n WHERE {batch_lower} <= cc.id AND cc.id < {batch_upper}; '
if use_transaction_wrappers:
start_tx(db._cursor)
print(f'-=-=-=-=-=-=-=- RUNNING BATCH STARTING AT {batch_lower} -=-=-=-=-=-=-=-')
print(f"-=-=-=-=-=-=-=- RUNNING ''INSERT INTO SELECT FROM''... ", end='')
t = time.time()
db._cursor.execute(batch_sql)
print(f'elapsed: {time.time() - t} sec, rows: {db._cursor.rowcount} -=-=-=-=-=-=-=-')
t = time.time()
db.run_dbjobs()
print(f'-=-=-=-=-=-=-=- RAN db_jobs()... elapsed: {time.time() - t} sec -=-=-=-=-=-=-=-')
print('-=-=-=-=-=-=-=- RUNNING commit()... ', end='')
t = time.time()
db.commit()
if use_transaction_wrappers:
finish_tx(db._cursor)
print(f'elapsed: {time.time() - t} sec -=-=-=-=-=-=-=-')
print('\n\n')
batch_lower = batch_upper
print('-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-')
db._cursor.execute(f'SELECT MAX(epimetric_id) FROM epimetric_full;')
for (max_id,) in db._cursor:
print(f'epimetric_full: {max_id}')
db._cursor.execute(f'SELECT MAX(epimetric_id) FROM epimetric_latest;')
for (max_id,) in db._cursor:
print(f'epimetric_latest: {max_id} (this should be <= the number above)')
db._cursor.execute(f'SELECT COUNT(signal_key_id), MAX(signal_key_id) FROM signal_dim;')
for (count_id, max_id) in db._cursor:
print(f'signal_dim: count {count_id} / max {max_id}')
db._cursor.execute(f'SELECT COUNT(geo_key_id), MAX(geo_key_id) FROM geo_dim;')
for (count_id, max_id) in db._cursor:
print(f'geo_dim: count {count_id} / max {max_id}')
return upper_lim
|
delphi-epidata
|
positive
|
def get_next_object(opt, site, objects, ctime):
"""Not too efficent but works for now."""
rise_list = {}
for obj in objects:
obj_id = obj
obj_info = objects[obj]
if opt.debug:
print('# object ', obj_id, ' @ ', ctime)
print('# obj_info', obj_info)
site_name = site['site']['name']
site_tag = site['site']['tag']
obs_lat = site['site']['latitude']
obs_long = site['site']['longitude']
obs_elev = float(site['site']['elevation'])
obj_name = obj_info['name']
obj_tle1 = obj_info['tle1'][1:-1]
obj_tle2 = obj_info['tle2'][1:-1]
obj_freqs = np.array(string.split(obj_info['frequencies'], ','), np.float32)
c_dtime = datetime.datetime.utcfromtimestamp(ctime)
c_ephem_time = ephem.Date(c_dtime)
<DeepExtract>
obsLoc = ephem.Observer()
obsLoc.lat = obs_lat
obsLoc.long = obs_long
obsLoc.elev = obs_elev
obsLoc.date = c_ephem_time
if opt.debug:
print('dbg location: ', obsLoc)
print('dbg tle1: ', obj_tle1)
print('dbg tle2: ', obj_tle2)
satObj = ephem.readtle(obj_name, obj_tle1, obj_tle2)
if opt.debug:
print('dbg object: ', satObj)
satObj.compute(obsLoc)
pinfo = obsLoc.next_pass(satObj)
(sat_rise, sat_transit, sat_set) = (pinfo[0], pinfo[2], pinfo[4])
</DeepExtract>
if sat_set <= sat_rise or sat_transit <= sat_rise or sat_set <= sat_transit:
continue
rise_list[sat_rise] = obj
if opt.debug:
print(' rise list : ', rise_list)
keys = list(rise_list.keys())
if opt.debug:
print(' rise keys : ', keys)
keys.sort()
if opt.debug:
print(' sorted : ', keys)
print(' selected : ', rise_list[keys[0]])
return rise_list[keys[0]]
|
def get_next_object(opt, site, objects, ctime):
"""Not too efficent but works for now."""
rise_list = {}
for obj in objects:
obj_id = obj
obj_info = objects[obj]
if opt.debug:
print('# object ', obj_id, ' @ ', ctime)
print('# obj_info', obj_info)
site_name = site['site']['name']
site_tag = site['site']['tag']
obs_lat = site['site']['latitude']
obs_long = site['site']['longitude']
obs_elev = float(site['site']['elevation'])
obj_name = obj_info['name']
obj_tle1 = obj_info['tle1'][1:-1]
obj_tle2 = obj_info['tle2'][1:-1]
obj_freqs = np.array(string.split(obj_info['frequencies'], ','), np.float32)
c_dtime = datetime.datetime.utcfromtimestamp(ctime)
c_ephem_time = ephem.Date(c_dtime)
obsLoc = ephem.Observer()
obsLoc.lat = obs_lat
obsLoc.long = obs_long
obsLoc.elev = obs_elev
obsLoc.date = c_ephem_time
if opt.debug:
print('dbg location: ', obsLoc)
print('dbg tle1: ', obj_tle1)
print('dbg tle2: ', obj_tle2)
satObj = ephem.readtle(obj_name, obj_tle1, obj_tle2)
if opt.debug:
print('dbg object: ', satObj)
satObj.compute(obsLoc)
pinfo = obsLoc.next_pass(satObj)
(sat_rise, sat_transit, sat_set) = (pinfo[0], pinfo[2], pinfo[4])
if sat_set <= sat_rise or sat_transit <= sat_rise or sat_set <= sat_transit:
continue
rise_list[sat_rise] = obj
if opt.debug:
print(' rise list : ', rise_list)
keys = list(rise_list.keys())
if opt.debug:
print(' rise keys : ', keys)
keys.sort()
if opt.debug:
print(' sorted : ', keys)
print(' selected : ', rise_list[keys[0]])
return rise_list[keys[0]]
|
digital_rf
|
positive
|
def update_trades_table_and_activity_monitor(self, trade: dict, caller):
"""
Updates trade table and activity based on caller and sends message to Telegram if live bot is trading and
Telegram feature is enabled.
:param trade: Trade information to add to activity monitor and trades table.
:param caller: Caller object that will rule which tables get updated.
"""
table = self.interface_dictionary[caller]['mainInterface']['historyTable']
<DeepExtract>
if caller == SIMULATION:
trader = self.simulation_trader
elif caller == LIVE:
trader = self.trader
elif caller == BACKTEST:
trader = self.backtester
else:
raise TypeError('Invalid type of caller specified.')
</DeepExtract>
if not trader:
return
trade_data = [trade['orderID'], trade['pair'], trade['price'], trade['percentage'], trade['profit'], trade['method'], trade['action']]
add_to_table(table, trade_data)
<DeepExtract>
if caller == SIMULATION:
self.add_to_simulation_activity_monitor(trade['action'])
elif caller == LIVE:
self.add_to_live_activity_monitor(trade['action'])
elif caller == BACKTEST:
self.add_to_backtest_monitor(trade['action'])
else:
raise TypeError('Invalid type of caller specified.')
</DeepExtract>
if caller == LIVE and self.telegram_bot and self.configuration.enableTelegramSendMessage.isChecked():
<DeepExtract>
try:
if self.telegram_bot is None:
api_key = self.configuration.telegramApiKey.text()
self.telegram_bot = TelegramBot(gui=self, token=api_key)
chat_id = self.configuration.telegramChatID.text()
if self.configuration.chat_pass:
self.telegram_bot.send_message(chat_id, trade['action'])
if stop_bot:
self.telegram_bot.stop()
self.telegram_bot = None
except Exception as e:
self.logger.exception(str(e))
</DeepExtract>
<DeepExtract>
if caller == LIVE:
monitor = self.activityMonitor
elif caller == SIMULATION:
monitor = self.simulationActivityMonitor
elif caller == BACKTEST:
monitor = self.backtestTable
else:
raise ValueError('Invalid type of caller specified.')
</DeepExtract>
monitor.scrollToBottom()
table.scrollToBottom()
|
def update_trades_table_and_activity_monitor(self, trade: dict, caller):
"""
Updates trade table and activity based on caller and sends message to Telegram if live bot is trading and
Telegram feature is enabled.
:param trade: Trade information to add to activity monitor and trades table.
:param caller: Caller object that will rule which tables get updated.
"""
table = self.interface_dictionary[caller]['mainInterface']['historyTable']
if caller == SIMULATION:
trader = self.simulation_trader
elif caller == LIVE:
trader = self.trader
elif caller == BACKTEST:
trader = self.backtester
else:
raise TypeError('Invalid type of caller specified.')
if not trader:
return
trade_data = [trade['orderID'], trade['pair'], trade['price'], trade['percentage'], trade['profit'], trade['method'], trade['action']]
add_to_table(table, trade_data)
if caller == SIMULATION:
self.add_to_simulation_activity_monitor(trade['action'])
elif caller == LIVE:
self.add_to_live_activity_monitor(trade['action'])
elif caller == BACKTEST:
self.add_to_backtest_monitor(trade['action'])
else:
raise TypeError('Invalid type of caller specified.')
if caller == LIVE and self.telegram_bot and self.configuration.enableTelegramSendMessage.isChecked():
try:
if self.telegram_bot is None:
api_key = self.configuration.telegramApiKey.text()
self.telegram_bot = TelegramBot(gui=self, token=api_key)
chat_id = self.configuration.telegramChatID.text()
if self.configuration.chat_pass:
self.telegram_bot.send_message(chat_id, trade['action'])
if stop_bot:
self.telegram_bot.stop()
self.telegram_bot = None
except Exception as e:
self.logger.exception(str(e))
if caller == LIVE:
monitor = self.activityMonitor
elif caller == SIMULATION:
monitor = self.simulationActivityMonitor
elif caller == BACKTEST:
monitor = self.backtestTable
else:
raise ValueError('Invalid type of caller specified.')
monitor.scrollToBottom()
table.scrollToBottom()
|
algobot
|
positive
|
def with_roles_available(self, roles: Union[list, str], entity_slug, user_model, coa_slug: Optional[str]) -> AccountModelQuerySet:
"""
Convenience method to pull only available and unlocked AccountModels for a specific EntityModel and for a
specific list of roles.
Parameters
----------
entity_slug: EntityModel or str
The EntityModel or EntityModel slug to pull accounts from. If slug is passed and coa_slug is None will
result in an additional Database query to determine the default code of accounts.
coa_slug: str
Explicitly specify which chart of accounts to use. If None, will pull default Chart of Accounts.
Discussed in detail in the CoA Model CoA slug, basically helps in identifying the complete Chart of
Accounts for a particular EntityModel.
user_model:
The Django User Model making the request to check for permissions.
roles: list or str
Function accepts a single str instance of a role or a list of roles. For a list of roles , refer io.roles.py
Returns
-------
AccountModelQuerySet
A QuerySet of all requested EntityModel Chart of Accounts.
"""
if isinstance(roles, str):
roles = [roles]
roles = validate_roles(roles)
<DeepExtract>
qs = self.for_entity(user_model=user_model, entity_slug=entity_slug, coa_slug=coa_slug)
qs = qs.filter(active=True, locked=False)
</DeepExtract>
return qs.filter(role__in=roles)
|
def with_roles_available(self, roles: Union[list, str], entity_slug, user_model, coa_slug: Optional[str]) -> AccountModelQuerySet:
"""
Convenience method to pull only available and unlocked AccountModels for a specific EntityModel and for a
specific list of roles.
Parameters
----------
entity_slug: EntityModel or str
The EntityModel or EntityModel slug to pull accounts from. If slug is passed and coa_slug is None will
result in an additional Database query to determine the default code of accounts.
coa_slug: str
Explicitly specify which chart of accounts to use. If None, will pull default Chart of Accounts.
Discussed in detail in the CoA Model CoA slug, basically helps in identifying the complete Chart of
Accounts for a particular EntityModel.
user_model:
The Django User Model making the request to check for permissions.
roles: list or str
Function accepts a single str instance of a role or a list of roles. For a list of roles , refer io.roles.py
Returns
-------
AccountModelQuerySet
A QuerySet of all requested EntityModel Chart of Accounts.
"""
if isinstance(roles, str):
roles = [roles]
roles = validate_roles(roles)
qs = self.for_entity(user_model=user_model, entity_slug=entity_slug, coa_slug=coa_slug)
qs = qs.filter(active=True, locked=False)
return qs.filter(role__in=roles)
|
django-ledger
|
positive
|
def eval(t, dset_loader, use_gpu=True):
with torch.no_grad():
total_loss = 0
total_acc = 0
total_num = 0
task = torch.autograd.Variable(torch.LongTensor([t]).cuda(), requires_grad=False)
self.model.eval()
total_reg = 0
for data in dset_loader:
(images, targets) = data
bs = images.shape[0]
images = images.cuda() if use_gpu else images
targets = targets.cuda() if use_gpu else targets
(images, targets) = (Variable(images, requires_grad=False), Variable(targets, requires_grad=False))
(logits, masks) = self.model.forward(task, images, s=self.smax)
<DeepExtract>
reg = 0
count = 0
if self.mask_pre is not None:
for (m, mp) in zip(masks, self.mask_pre):
aux = 1 - mp
reg += (m * aux).sum()
count += aux.sum()
else:
for m in masks:
reg += m.sum()
count += np.prod(m.size()).item()
reg /= count
(loss, reg) = (self.ce(logits, targets) + self.lamb * reg, self.lamb * reg)
</DeepExtract>
(_, pred) = logits.max(1)
hits = (pred == targets).float()
total_loss += loss.data.cpu().numpy().item() * bs
total_acc += hits.sum().data.cpu().numpy().item()
total_num += bs
total_reg += reg.data.cpu().numpy().item() * bs
print('<reg={:.6f}/ce={:.6f}>'.format(total_reg / total_num, (total_loss - total_reg) / total_num), end='')
return (total_loss / total_num, total_acc / total_num)
|
def eval(t, dset_loader, use_gpu=True):
with torch.no_grad():
total_loss = 0
total_acc = 0
total_num = 0
task = torch.autograd.Variable(torch.LongTensor([t]).cuda(), requires_grad=False)
self.model.eval()
total_reg = 0
for data in dset_loader:
(images, targets) = data
bs = images.shape[0]
images = images.cuda() if use_gpu else images
targets = targets.cuda() if use_gpu else targets
(images, targets) = (Variable(images, requires_grad=False), Variable(targets, requires_grad=False))
(logits, masks) = self.model.forward(task, images, s=self.smax)
reg = 0
count = 0
if self.mask_pre is not None:
for (m, mp) in zip(masks, self.mask_pre):
aux = 1 - mp
reg += (m * aux).sum()
count += aux.sum()
else:
for m in masks:
reg += m.sum()
count += np.prod(m.size()).item()
reg /= count
(loss, reg) = (self.ce(logits, targets) + self.lamb * reg, self.lamb * reg)
(_, pred) = logits.max(1)
hits = (pred == targets).float()
total_loss += loss.data.cpu().numpy().item() * bs
total_acc += hits.sum().data.cpu().numpy().item()
total_num += bs
total_reg += reg.data.cpu().numpy().item() * bs
print('<reg={:.6f}/ce={:.6f}>'.format(total_reg / total_num, (total_loss - total_reg) / total_num), end='')
return (total_loss / total_num, total_acc / total_num)
|
CLsurvey
|
positive
|
def test_standby_failure(self):
"""
That the failure of a standby-replay daemon happens cleanly
and doesn't interrupt anything else.
"""
use_daemons = sorted(self.mds_cluster.mds_ids[0:2])
(mds_a, mds_b) = use_daemons
log.info('Using MDS daemons: {0}'.format(use_daemons))
<DeepExtract>
self.set_conf('mds.{0}'.format(mds_b), 'mds_standby_for_name', mds_a)
if True:
self.set_conf('mds.{0}'.format(mds_b), 'mds_standby_replay', 'true')
</DeepExtract>
<DeepExtract>
self.set_conf('mds.{0}'.format(mds_a), 'mds_standby_for_name', mds_b)
if False:
self.set_conf('mds.{0}'.format(mds_a), 'mds_standby_replay', 'true')
</DeepExtract>
fs_a = self.mds_cluster.newfs('alpha')
self.mds_cluster.mds_restart(mds_a)
fs_a.wait_for_daemons()
self.assertEqual(fs_a.get_active_names(), [mds_a])
self.mds_cluster.mds_restart(mds_b)
self.wait_for_daemon_start([mds_b])
<DeepExtract>
status = self.mds_cluster.status()
info = status.get_mds(mds_b)
if info is None:
log.warn(str(status))
raise RuntimeError("MDS '{0}' not found".format(mds_b))
else:
info_b = info
</DeepExtract>
self.assertEqual(info_b['state'], 'up:standby-replay')
self.assertEqual(info_b['standby_for_name'], mds_a)
self.assertEqual(info_b['rank'], 0)
self.mds_cluster.mds_stop(mds_b)
self.mds_cluster.mds_fail(mds_b)
self.assertEqual(fs_a.get_active_names(), [mds_a])
mds_map = fs_a.get_mds_map()
self.assertEqual(len(mds_map['info']), 1)
self.assertEqual(mds_map['failed'], [])
self.assertEqual(mds_map['damaged'], [])
self.assertEqual(mds_map['stopped'], [])
|
def test_standby_failure(self):
"""
That the failure of a standby-replay daemon happens cleanly
and doesn't interrupt anything else.
"""
use_daemons = sorted(self.mds_cluster.mds_ids[0:2])
(mds_a, mds_b) = use_daemons
log.info('Using MDS daemons: {0}'.format(use_daemons))
self.set_conf('mds.{0}'.format(mds_b), 'mds_standby_for_name', mds_a)
if True:
self.set_conf('mds.{0}'.format(mds_b), 'mds_standby_replay', 'true')
self.set_conf('mds.{0}'.format(mds_a), 'mds_standby_for_name', mds_b)
if False:
self.set_conf('mds.{0}'.format(mds_a), 'mds_standby_replay', 'true')
fs_a = self.mds_cluster.newfs('alpha')
self.mds_cluster.mds_restart(mds_a)
fs_a.wait_for_daemons()
self.assertEqual(fs_a.get_active_names(), [mds_a])
self.mds_cluster.mds_restart(mds_b)
self.wait_for_daemon_start([mds_b])
status = self.mds_cluster.status()
info = status.get_mds(mds_b)
if info is None:
log.warn(str(status))
raise RuntimeError("MDS '{0}' not found".format(mds_b))
else:
info_b = info
self.assertEqual(info_b['state'], 'up:standby-replay')
self.assertEqual(info_b['standby_for_name'], mds_a)
self.assertEqual(info_b['rank'], 0)
self.mds_cluster.mds_stop(mds_b)
self.mds_cluster.mds_fail(mds_b)
self.assertEqual(fs_a.get_active_names(), [mds_a])
mds_map = fs_a.get_mds_map()
self.assertEqual(len(mds_map['info']), 1)
self.assertEqual(mds_map['failed'], [])
self.assertEqual(mds_map['damaged'], [])
self.assertEqual(mds_map['stopped'], [])
|
ceph-qa-suite
|
positive
|
def open(self, port, param):
errorcode = ReturnCodes.Good
try:
<DeepExtract>
for client in self.clients:
if port == client['port']:
cli = client
cli = None
</DeepExtract>
if cli is None:
<DeepExtract>
com_id = port[4:]
com_param = [9600, 8, 'N', 1, 0, 0, 0, 0]
if param is not None:
s = param.split('-')
com_param = [int(s[0]), int(s[1]), s[2], int(s[3]), int(s[4]), int(s[5]), int(s[6]), int(s[7])]
ser_client = serial.Serial(baudrate=com_param[0], bytesize=com_param[1], parity=com_param[2], timeout=com_param[4], stopbits=com_param[3], dsrdtr=com_param[5], xonxoff=com_param[6], rtscts=com_param[7])
ser_client.setPort(com_id)
client_param = {'port': port, 'client': ser_client, 'state': 'closed'}
self.clients.append(client_param)
ser_client = ser_client
</DeepExtract>
<DeepExtract>
for client in self.clients:
if port == client['port']:
cli = client
cli = None
</DeepExtract>
else:
ser_client = cli['client']
if not ser_client.isOpen():
ser_client.open()
cli['state'] = 'opened'
result = 'serial port %s is opened' % port
except BaseException:
logger.exception('failed to open serial port %s' % port)
result = 'failed to open serial port %s' % port
errorcode = ReturnCodes.SCPI_SerialOpenFail
return (errorcode, result)
|
def open(self, port, param):
errorcode = ReturnCodes.Good
try:
for client in self.clients:
if port == client['port']:
cli = client
cli = None
if cli is None:
com_id = port[4:]
com_param = [9600, 8, 'N', 1, 0, 0, 0, 0]
if param is not None:
s = param.split('-')
com_param = [int(s[0]), int(s[1]), s[2], int(s[3]), int(s[4]), int(s[5]), int(s[6]), int(s[7])]
ser_client = serial.Serial(baudrate=com_param[0], bytesize=com_param[1], parity=com_param[2], timeout=com_param[4], stopbits=com_param[3], dsrdtr=com_param[5], xonxoff=com_param[6], rtscts=com_param[7])
ser_client.setPort(com_id)
client_param = {'port': port, 'client': ser_client, 'state': 'closed'}
self.clients.append(client_param)
ser_client = ser_client
for client in self.clients:
if port == client['port']:
cli = client
cli = None
else:
ser_client = cli['client']
if not ser_client.isOpen():
ser_client.open()
cli['state'] = 'opened'
result = 'serial port %s is opened' % port
except BaseException:
logger.exception('failed to open serial port %s' % port)
result = 'failed to open serial port %s' % port
errorcode = ReturnCodes.SCPI_SerialOpenFail
return (errorcode, result)
|
Converter-for-OPCUA
|
positive
|
def make_enc_kem(name, value, sig_key, alg, enc_key, enc):
<DeepExtract>
header = {'kid': sig_key.key_id, 'alg': alg}
claims = {'sub': name, 'exp': int(time.time() + 5 * 60)}
if value is not None:
claims['value'] = value
jwt = JWT(header, claims)
jwt.make_signed_token(sig_key)
plaintext = jwt.serialize(compact=True)
</DeepExtract>
eprot = {'kid': enc_key.key_id, 'alg': enc[0], 'enc': enc[1]}
jwe = JWE(plaintext, json_encode(eprot))
jwe.add_recipient(enc_key)
return jwe.serialize(compact=True)
|
def make_enc_kem(name, value, sig_key, alg, enc_key, enc):
header = {'kid': sig_key.key_id, 'alg': alg}
claims = {'sub': name, 'exp': int(time.time() + 5 * 60)}
if value is not None:
claims['value'] = value
jwt = JWT(header, claims)
jwt.make_signed_token(sig_key)
plaintext = jwt.serialize(compact=True)
eprot = {'kid': enc_key.key_id, 'alg': enc[0], 'enc': enc[1]}
jwe = JWE(plaintext, json_encode(eprot))
jwe.add_recipient(enc_key)
return jwe.serialize(compact=True)
|
custodia
|
positive
|
def phi_rotamers(traj, buffer_width=15):
hard_boundaries = [0, 180, 360]
<DeepExtract>
valid_dihedral_types = ['phi', 'psi', 'chi1', 'chi2', 'chi3', 'chi4']
if 'phi' not in valid_dihedral_types:
(angles, atom_inds) = (None, None, None)
f = getattr(md, 'compute_%s' % 'phi')
(atom_inds, angles) = f(traj)
angles = np.rad2deg(angles)
angles[np.where(angles < 0)] += 360
n_angles = angles.shape[1]
ref_atom_inds = np.zeros(n_angles)
for i in range(n_angles):
atom = traj.topology.atom(atom_inds[i, 2])
ref_atom_inds[i] = int(atom.index)
(angles, atom_inds) = (angles, atom_inds)
</DeepExtract>
(n_frames, n_angles) = angles.shape
rotamers = np.zeros((n_frames, n_angles), dtype='int16')
for i in range(n_angles):
<DeepExtract>
n_basins = len(hard_boundaries) - 1
if buffer_width < 0 or buffer_width >= 360.0 / n_basins:
raise DataInvalid('Buffer width (got %s) must be between 0 and 360 degrees.' % buffer_width)
if hard_boundaries[0] != 0 or hard_boundaries[-1] != 360:
raise DataInvalid('hard_boundaries list must start with 0 and end with 360, list was %s.' % hard_boundaries)
n_frames = len(angles[:, i])
rotamers = -1 * np.ones(n_frames, dtype='int16')
for i in range(n_basins):
if angles[:, i][0] < hard_boundaries[i + 1]:
rotamers[0] = i
break
cur_state = rotamers[0]
for i in range(1, n_frames):
new_angle = angles[:, i][i]
cur_angle = angles[:, i][i - 1]
if is_buffered_transition(cur_state, new_angle, hard_boundaries, buffer_width):
cur_state = np.digitize(new_angle, hard_boundaries) - 1
rotamers[i] = cur_state
rotamers[:, i] = rotamers
</DeepExtract>
n_states = 2 * np.ones(n_angles, dtype='int16')
return (rotamers, atom_inds, n_states)
|
def phi_rotamers(traj, buffer_width=15):
hard_boundaries = [0, 180, 360]
valid_dihedral_types = ['phi', 'psi', 'chi1', 'chi2', 'chi3', 'chi4']
if 'phi' not in valid_dihedral_types:
(angles, atom_inds) = (None, None, None)
f = getattr(md, 'compute_%s' % 'phi')
(atom_inds, angles) = f(traj)
angles = np.rad2deg(angles)
angles[np.where(angles < 0)] += 360
n_angles = angles.shape[1]
ref_atom_inds = np.zeros(n_angles)
for i in range(n_angles):
atom = traj.topology.atom(atom_inds[i, 2])
ref_atom_inds[i] = int(atom.index)
(angles, atom_inds) = (angles, atom_inds)
(n_frames, n_angles) = angles.shape
rotamers = np.zeros((n_frames, n_angles), dtype='int16')
for i in range(n_angles):
n_basins = len(hard_boundaries) - 1
if buffer_width < 0 or buffer_width >= 360.0 / n_basins:
raise DataInvalid('Buffer width (got %s) must be between 0 and 360 degrees.' % buffer_width)
if hard_boundaries[0] != 0 or hard_boundaries[-1] != 360:
raise DataInvalid('hard_boundaries list must start with 0 and end with 360, list was %s.' % hard_boundaries)
n_frames = len(angles[:, i])
rotamers = -1 * np.ones(n_frames, dtype='int16')
for i in range(n_basins):
if angles[:, i][0] < hard_boundaries[i + 1]:
rotamers[0] = i
break
cur_state = rotamers[0]
for i in range(1, n_frames):
new_angle = angles[:, i][i]
cur_angle = angles[:, i][i - 1]
if is_buffered_transition(cur_state, new_angle, hard_boundaries, buffer_width):
cur_state = np.digitize(new_angle, hard_boundaries) - 1
rotamers[i] = cur_state
rotamers[:, i] = rotamers
n_states = 2 * np.ones(n_angles, dtype='int16')
return (rotamers, atom_inds, n_states)
|
enspara
|
positive
|
def clean(names, queue=False, exchange=False):
"""Clean any queues or exchanges created by the test.
:param names: Queue/Exchange names.
:param queue: Remove queues.
:param exchange: Remove exchanges.
:return:
"""
if not queue and (not exchange):
return
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD, timeout=10)
if queue:
<DeepExtract>
for name in names:
try:
api.queue.delete(name)
except exception.ApiError:
pass
</DeepExtract>
if exchange:
<DeepExtract>
for name in names:
try:
api.exchange.delete(name)
except exception.ApiError:
pass
</DeepExtract>
|
def clean(names, queue=False, exchange=False):
"""Clean any queues or exchanges created by the test.
:param names: Queue/Exchange names.
:param queue: Remove queues.
:param exchange: Remove exchanges.
:return:
"""
if not queue and (not exchange):
return
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD, timeout=10)
if queue:
for name in names:
try:
api.queue.delete(name)
except exception.ApiError:
pass
if exchange:
for name in names:
try:
api.exchange.delete(name)
except exception.ApiError:
pass
</DeepExtract>
|
amqpstorm
|
positive
|
@pytest.mark.parametrize('src,result', (('{}', {}), ("{'a': 'b', 1: 2}", {'a': 'b', 1: 2}), ('{None: 42}', {None: 42}), ('\n d = {"a": "b"}\n {1: 2, **d}\n ', {'a': 'b', 1: 2}), ("\n d1 = {'a': 'b'}\n d2 = {'c': 'd'}\n {**d1, **d2}\n ", {'a': 'b', 'c': 'd'})))
def test_dictionary(src, result):
<DeepExtract>
tree = collect(dedent(src), minimal=True)
loc = ScanLocation(location='<unknown>')
v = Visitor.run_stages(location=loc, stages=('convert', 'rewrite'), ast_tree=tree)
if single:
tree = v.tree[-1]
else:
tree = v.tree
</DeepExtract>
assert isinstance(tree, Dictionary)
src_dict = tree.as_native()
assert src_dict == result, src_dict
|
@pytest.mark.parametrize('src,result', (('{}', {}), ("{'a': 'b', 1: 2}", {'a': 'b', 1: 2}), ('{None: 42}', {None: 42}), ('\n d = {"a": "b"}\n {1: 2, **d}\n ', {'a': 'b', 1: 2}), ("\n d1 = {'a': 'b'}\n d2 = {'c': 'd'}\n {**d1, **d2}\n ", {'a': 'b', 'c': 'd'})))
def test_dictionary(src, result):
tree = collect(dedent(src), minimal=True)
loc = ScanLocation(location='<unknown>')
v = Visitor.run_stages(location=loc, stages=('convert', 'rewrite'), ast_tree=tree)
if single:
tree = v.tree[-1]
else:
tree = v.tree
assert isinstance(tree, Dictionary)
src_dict = tree.as_native()
assert src_dict == result, src_dict
|
aura
|
positive
|
@methodtrace(_logger)
def get_configuration_descriptor(self, dev, config):
desc = _usb_config_desc()
<DeepExtract>
if hasattr(_lib.openusb_parse_config_desc(_ctx.handle, dev, None, 0, config, byref(desc)), 'value'):
_lib.openusb_parse_config_desc(_ctx.handle, dev, None, 0, config, byref(desc)) = _lib.openusb_parse_config_desc(_ctx.handle, dev, None, 0, config, byref(desc)).value
if _lib.openusb_parse_config_desc(_ctx.handle, dev, None, 0, config, byref(desc)) != 0:
raise USBError(_lib.openusb_strerror(_lib.openusb_parse_config_desc(_ctx.handle, dev, None, 0, config, byref(desc))), _lib.openusb_parse_config_desc(_ctx.handle, dev, None, 0, config, byref(desc)), _openusb_errno[_lib.openusb_parse_config_desc(_ctx.handle, dev, None, 0, config, byref(desc))])
return _lib.openusb_parse_config_desc(_ctx.handle, dev, None, 0, config, byref(desc))
</DeepExtract>
desc.extra_descriptors = None
return desc
|
@methodtrace(_logger)
def get_configuration_descriptor(self, dev, config):
desc = _usb_config_desc()
if hasattr(_lib.openusb_parse_config_desc(_ctx.handle, dev, None, 0, config, byref(desc)), 'value'):
_lib.openusb_parse_config_desc(_ctx.handle, dev, None, 0, config, byref(desc)) = _lib.openusb_parse_config_desc(_ctx.handle, dev, None, 0, config, byref(desc)).value
if _lib.openusb_parse_config_desc(_ctx.handle, dev, None, 0, config, byref(desc)) != 0:
raise USBError(_lib.openusb_strerror(_lib.openusb_parse_config_desc(_ctx.handle, dev, None, 0, config, byref(desc))), _lib.openusb_parse_config_desc(_ctx.handle, dev, None, 0, config, byref(desc)), _openusb_errno[_lib.openusb_parse_config_desc(_ctx.handle, dev, None, 0, config, byref(desc))])
return _lib.openusb_parse_config_desc(_ctx.handle, dev, None, 0, config, byref(desc))
desc.extra_descriptors = None
return desc
|
CyKit
|
positive
|
def main():
if not JAR.exists():
print(f'Downloading {JAR_NAME}...')
urllib.request.urlretrieve(JAR_URL, str(JAR))
print('Expanding lexer...')
<DeepExtract>
MODE_P = re.compile('mode ([A-Z_]+)\\;')
TOKEN_P = re.compile('([A-Z_]+) ?\\:.*')
with open(LEXER.parent.parent / (LEXER.name + '.jinja'), 'r') as fh:
content = fh.read()
template = jinja2.Environment(loader=jinja2.BaseLoader).from_string(content)
current_mode = 'DEFAULT_MODE'
tokens = collections.defaultdict(list)
for line in content.split('\n'):
mode_g = MODE_P.match(line)
if mode_g:
current_mode = mode_g.group(1)
token_g = TOKEN_P.match(line)
if token_g:
tokens[current_mode].append(token_g.group(1))
new_content = template.render(tokens=tokens)
with open(str(LEXER), 'w') as fh:
fh.write(new_content)
</DeepExtract>
print('Compiling lexer...')
subprocess.run(['java', '-jar', str(JAR), '-Dlanguage=Python3', str(LEXER), '-o', 'parser'], check=True)
print('Compiling parser...')
subprocess.run(['java', '-jar', str(JAR), '-Dlanguage=Python3', '-no-listener', '-visitor', str(PARSER), '-o', 'parser'], check=True)
print('Done.')
|
def main():
if not JAR.exists():
print(f'Downloading {JAR_NAME}...')
urllib.request.urlretrieve(JAR_URL, str(JAR))
print('Expanding lexer...')
MODE_P = re.compile('mode ([A-Z_]+)\\;')
TOKEN_P = re.compile('([A-Z_]+) ?\\:.*')
with open(LEXER.parent.parent / (LEXER.name + '.jinja'), 'r') as fh:
content = fh.read()
template = jinja2.Environment(loader=jinja2.BaseLoader).from_string(content)
current_mode = 'DEFAULT_MODE'
tokens = collections.defaultdict(list)
for line in content.split('\n'):
mode_g = MODE_P.match(line)
if mode_g:
current_mode = mode_g.group(1)
token_g = TOKEN_P.match(line)
if token_g:
tokens[current_mode].append(token_g.group(1))
new_content = template.render(tokens=tokens)
with open(str(LEXER), 'w') as fh:
fh.write(new_content)
print('Compiling lexer...')
subprocess.run(['java', '-jar', str(JAR), '-Dlanguage=Python3', str(LEXER), '-o', 'parser'], check=True)
print('Compiling parser...')
subprocess.run(['java', '-jar', str(JAR), '-Dlanguage=Python3', '-no-listener', '-visitor', str(PARSER), '-o', 'parser'], check=True)
print('Done.')
|
cf-units
|
positive
|
def sharpen(pix, width, intensity):
A = pix[..., 3]
<DeepExtract>
res = np.zeros(pix.shape, dtype=np.float32)
gcr = gauss_curve(width)
for i in range(-width, width + 1):
if i != 0:
res[:-i, ...] += pix[i:, ...] * gcr[i + width]
res[-i:, ...] += pix[:i, ...] * gcr[i + width]
else:
res += pix * gcr[width]
pix2 = res.copy()
res *= 0.0
for i in range(-width, width + 1):
if i != 0:
res[:, :-i, :] += pix2[:, i:, :] * gcr[i + width]
res[:, -i:, :] += pix2[:, :i, :] * gcr[i + width]
else:
res += pix2 * gcr[width]
gas = res
</DeepExtract>
pix += (pix - gas) * intensity
pix[..., 3] = A
return pix
|
def sharpen(pix, width, intensity):
A = pix[..., 3]
res = np.zeros(pix.shape, dtype=np.float32)
gcr = gauss_curve(width)
for i in range(-width, width + 1):
if i != 0:
res[:-i, ...] += pix[i:, ...] * gcr[i + width]
res[-i:, ...] += pix[:i, ...] * gcr[i + width]
else:
res += pix * gcr[width]
pix2 = res.copy()
res *= 0.0
for i in range(-width, width + 1):
if i != 0:
res[:, :-i, :] += pix2[:, i:, :] * gcr[i + width]
res[:, -i:, :] += pix2[:, :i, :] * gcr[i + width]
else:
res += pix2 * gcr[width]
gas = res
pix += (pix - gas) * intensity
pix[..., 3] = A
return pix
|
blender-texture-tools
|
positive
|
def normals_to_height(image, grid_steps, iterations=2000, intensity=1.0):
(ih, iw) = (image.shape[0], image.shape[1])
u = np.ones((ih, iw), dtype=np.float32) * 0.5
<DeepExtract>
vectors = np.empty((image.shape[0], image.shape[1], 3), dtype=np.float32)
vectors[..., 0] = image[..., 0] - 0.5
vectors[..., 1] = image[..., 1] - 0.5
vectors[..., 2] = image[..., 2] - 0.5
vectors *= 2.0
vectors = vectors
</DeepExtract>
vectors *= intensity
t = np.empty_like(u, dtype=np.float32)
for k in range(grid_steps, -1, -1):
k = 2 ** k
print('grid step:', k)
n = np.roll(vectors[..., 0], k, axis=1)
n -= np.roll(vectors[..., 0], -k, axis=1)
n += np.roll(vectors[..., 1], k, axis=0)
n -= np.roll(vectors[..., 1], -k, axis=0)
n *= 0.125
for ic in range(iterations):
if ic % 100 == 0:
print(ic)
t[:-k, :] = u[k:, :]
t[-k:, :] = u[:k, :]
t[k:, :] += u[:-k, :]
t[:k, :] += u[-k:, :]
t[:, :-k] += u[:, k:]
t[:, -k:] += u[:, :k]
t[:, k:] += u[:, :-k]
t[:, :k] += u[:, -k:]
t *= 0.25
u = t + n
u = -u
u -= np.min(u)
u /= np.max(u)
return np.dstack([u, u, u, image[..., 3]])
|
def normals_to_height(image, grid_steps, iterations=2000, intensity=1.0):
(ih, iw) = (image.shape[0], image.shape[1])
u = np.ones((ih, iw), dtype=np.float32) * 0.5
vectors = np.empty((image.shape[0], image.shape[1], 3), dtype=np.float32)
vectors[..., 0] = image[..., 0] - 0.5
vectors[..., 1] = image[..., 1] - 0.5
vectors[..., 2] = image[..., 2] - 0.5
vectors *= 2.0
vectors = vectors
vectors *= intensity
t = np.empty_like(u, dtype=np.float32)
for k in range(grid_steps, -1, -1):
k = 2 ** k
print('grid step:', k)
n = np.roll(vectors[..., 0], k, axis=1)
n -= np.roll(vectors[..., 0], -k, axis=1)
n += np.roll(vectors[..., 1], k, axis=0)
n -= np.roll(vectors[..., 1], -k, axis=0)
n *= 0.125
for ic in range(iterations):
if ic % 100 == 0:
print(ic)
t[:-k, :] = u[k:, :]
t[-k:, :] = u[:k, :]
t[k:, :] += u[:-k, :]
t[:k, :] += u[-k:, :]
t[:, :-k] += u[:, k:]
t[:, -k:] += u[:, :k]
t[:, k:] += u[:, :-k]
t[:, :k] += u[:, -k:]
t *= 0.25
u = t + n
u = -u
u -= np.min(u)
u /= np.max(u)
return np.dstack([u, u, u, image[..., 3]])
|
blender-texture-tools
|
positive
|
def checklicensewarning(p_apikey, p_orglist, p_timethreshold, p_modeincludeempty=False):
filterlist = []
i = 0
for org in p_orglist:
print('INFO: Checking org %s "%s" ' % (org.id, org.name))
<DeepExtract>
merakirequestthrottler()
try:
r = requests.get('https://%s/api/v0/organizations/%s/licenseState' % (org.shardhost, org.id), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'}, timeout=(REQUESTS_CONNECT_TIMEOUT, REQUESTS_READ_TIMEOUT))
except:
print('ERROR 03: Unable to contact Meraki cloud')
sys.exit(2)
returnvalue = []
if r.status_code != requests.codes.ok:
licensestate = None
rjson = r.json()
licensestate = rjson
</DeepExtract>
if not licensestate is None:
if licensestate['expirationDate'] == 'N/A':
if p_modeincludeempty:
timeremaining = 0
elif licensestate['status'] != 'License Required':
timeremaining = p_timethreshold + 1
else:
timeremaining = 0
else:
<DeepExtract>
mdate = datetime.date(datetime.strptime(licensestate['expirationDate'], '%b %d, %Y UTC'))
today = date.today()
retvalue = int(str(mdate - today).split(' ')[0])
timeremaining = retvalue
</DeepExtract>
if licensestate['status'] != 'OK' or timeremaining <= p_timethreshold:
if licensestate['status'] != 'N/A' or p_modeincludeempty:
filterlist.append(c_organizationdata())
filterlist[i].id = org.id
filterlist[i].name = org.name
filterlist[i].shardhost = org.shardhost
filterlist[i].licensestate = licensestate['status']
filterlist[i].timeremaining = timeremaining
i += 1
else:
print('WARNING: Unable to fetch license state')
return filterlist
|
def checklicensewarning(p_apikey, p_orglist, p_timethreshold, p_modeincludeempty=False):
filterlist = []
i = 0
for org in p_orglist:
print('INFO: Checking org %s "%s" ' % (org.id, org.name))
merakirequestthrottler()
try:
r = requests.get('https://%s/api/v0/organizations/%s/licenseState' % (org.shardhost, org.id), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'}, timeout=(REQUESTS_CONNECT_TIMEOUT, REQUESTS_READ_TIMEOUT))
except:
print('ERROR 03: Unable to contact Meraki cloud')
sys.exit(2)
returnvalue = []
if r.status_code != requests.codes.ok:
licensestate = None
rjson = r.json()
licensestate = rjson
if not licensestate is None:
if licensestate['expirationDate'] == 'N/A':
if p_modeincludeempty:
timeremaining = 0
elif licensestate['status'] != 'License Required':
timeremaining = p_timethreshold + 1
else:
timeremaining = 0
else:
mdate = datetime.date(datetime.strptime(licensestate['expirationDate'], '%b %d, %Y UTC'))
today = date.today()
retvalue = int(str(mdate - today).split(' ')[0])
timeremaining = retvalue
if licensestate['status'] != 'OK' or timeremaining <= p_timethreshold:
if licensestate['status'] != 'N/A' or p_modeincludeempty:
filterlist.append(c_organizationdata())
filterlist[i].id = org.id
filterlist[i].name = org.name
filterlist[i].shardhost = org.shardhost
filterlist[i].licensestate = licensestate['status']
filterlist[i].timeremaining = timeremaining
i += 1
else:
print('WARNING: Unable to fetch license state')
return filterlist
|
automation-scripts
|
positive
|
@property
@provide_session
def previous_ti(self, session=None):
""" The task instance for the task that ran before this task instance """
dag = self.task.dag
if dag:
<DeepExtract>
dr = session.query(DagRun).filter(DagRun.dag_id == self.dag_id, DagRun.execution_date == self.execution_date).first()
dr = dr
</DeepExtract>
if not dr:
previous_scheduled_date = dag.previous_schedule(self.execution_date)
if not previous_scheduled_date:
return None
return TaskInstance(task=self.task, execution_date=previous_scheduled_date)
dr.dag = dag
if dag.catchup:
last_dagrun = dr.get_previous_scheduled_dagrun(session=session)
else:
last_dagrun = dr.get_previous_dagrun(session=session)
if last_dagrun:
return last_dagrun.get_task_instance(self.task_id, session=session)
return None
|
@property
@provide_session
def previous_ti(self, session=None):
""" The task instance for the task that ran before this task instance """
dag = self.task.dag
if dag:
dr = session.query(DagRun).filter(DagRun.dag_id == self.dag_id, DagRun.execution_date == self.execution_date).first()
dr = dr
if not dr:
previous_scheduled_date = dag.previous_schedule(self.execution_date)
if not previous_scheduled_date:
return None
return TaskInstance(task=self.task, execution_date=previous_scheduled_date)
dr.dag = dag
if dag.catchup:
last_dagrun = dr.get_previous_scheduled_dagrun(session=session)
else:
last_dagrun = dr.get_previous_dagrun(session=session)
if last_dagrun:
return last_dagrun.get_task_instance(self.task_id, session=session)
return None
|
docker-airflow
|
positive
|
def get_design_matrices(df, dependent_variable, independent_variables, interactions=[]):
<DeepExtract>
lhs_var = dependent_variable
rhs_vars = independent_variables
if 'name' in dependent_variable:
lhs_var = dependent_variable['name']
if 'name' in independent_variables[0]:
new_rhs_vars = []
for iv in independent_variables:
if type(iv) is list:
new_rhs_vars.append([x['name'] for x in iv])
elif 'name' in iv:
new_rhs_vars.append(iv['name'])
else:
new_rhs_vars.append(iv)
rhs_vars = new_rhs_vars
if interactions:
first_interaction = interactions[0]
if 'name' in first_interaction:
new_interactions = []
for interaction in interactions:
new_interactions.append([term['name'] for term in interaction])
rhs_interactions = new_interactions
else:
rhs_interactions = interactions
lhs = [Term([LookupFactor(lhs_var)])]
rhs = [Term([])]
for rhs_var in rhs_vars:
if type(rhs_var) is list:
rhs += [Term([LookupFactor(term) for term in rhs_var])]
elif rhs_var in transformations:
transformation = transformations[rhs_var]
if transformation == 'square':
rhs += [Term([LookupFactor(rhs_var)])]
format_string = transformation_to_format_string[transformation]
rhs += [Term([EvalFactor(format_string.format(rhs_var))])]
else:
rhs += [Term([LookupFactor(rhs_var)])]
if interactions:
rhs += [Term([LookupFactor(term) for term in interaction]) for interaction in rhs_interactions]
model = ModelDesc(lhs, rhs)
patsy_model = model
</DeepExtract>
(y, X) = dmatrices(patsy_model, df, return_type='dataframe')
return (y, X)
|
def get_design_matrices(df, dependent_variable, independent_variables, interactions=[]):
lhs_var = dependent_variable
rhs_vars = independent_variables
if 'name' in dependent_variable:
lhs_var = dependent_variable['name']
if 'name' in independent_variables[0]:
new_rhs_vars = []
for iv in independent_variables:
if type(iv) is list:
new_rhs_vars.append([x['name'] for x in iv])
elif 'name' in iv:
new_rhs_vars.append(iv['name'])
else:
new_rhs_vars.append(iv)
rhs_vars = new_rhs_vars
if interactions:
first_interaction = interactions[0]
if 'name' in first_interaction:
new_interactions = []
for interaction in interactions:
new_interactions.append([term['name'] for term in interaction])
rhs_interactions = new_interactions
else:
rhs_interactions = interactions
lhs = [Term([LookupFactor(lhs_var)])]
rhs = [Term([])]
for rhs_var in rhs_vars:
if type(rhs_var) is list:
rhs += [Term([LookupFactor(term) for term in rhs_var])]
elif rhs_var in transformations:
transformation = transformations[rhs_var]
if transformation == 'square':
rhs += [Term([LookupFactor(rhs_var)])]
format_string = transformation_to_format_string[transformation]
rhs += [Term([EvalFactor(format_string.format(rhs_var))])]
else:
rhs += [Term([LookupFactor(rhs_var)])]
if interactions:
rhs += [Term([LookupFactor(term) for term in interaction]) for interaction in rhs_interactions]
model = ModelDesc(lhs, rhs)
patsy_model = model
(y, X) = dmatrices(patsy_model, df, return_type='dataframe')
return (y, X)
|
DIVE-backend
|
positive
|
def sync(self, depsgraph):
scene = depsgraph.scene
settings = scene.hdusd.final
view_layer = depsgraph.view_layer
self.render_layer_name = view_layer.name
self.status_title = f'{scene.name}: {self.render_layer_name}'
<DeepExtract>
self.render_engine.update_progress(0.0)
self.render_engine.update_stats(self.status_title, 'Start syncing')
log(f"Status [{0.0:.2}]: {'Start syncing'}")
</DeepExtract>
time_begin = time.perf_counter()
border = ((0, 0), (1, 1)) if not scene.render.use_border else ((scene.render.border_min_x, scene.render.border_min_y), (scene.render.border_max_x - scene.render.border_min_x, scene.render.border_max_y - scene.render.border_min_y))
screen_width = int(scene.render.resolution_x * scene.render.resolution_percentage / 100)
screen_height = int(scene.render.resolution_y * scene.render.resolution_percentage / 100)
self.width = int(screen_width * border[1][0])
self.height = int(screen_height * border[1][1])
<DeepExtract>
pass
</DeepExtract>
usd_utils.set_delegate_variant_stage(self.stage, settings.delegate_name)
if self.render_engine.test_break():
log.warn('Syncing stopped by user termination')
return
self.render_engine.bl_use_gpu_context = settings.is_gl_delegate
log.info('Scene synchronization time:', time_str(time.perf_counter() - time_begin))
<DeepExtract>
self.render_engine.update_progress(0.0)
self.render_engine.update_stats(self.status_title, 'Start render')
log(f"Status [{0.0:.2}]: {'Start render'}")
</DeepExtract>
|
def sync(self, depsgraph):
scene = depsgraph.scene
settings = scene.hdusd.final
view_layer = depsgraph.view_layer
self.render_layer_name = view_layer.name
self.status_title = f'{scene.name}: {self.render_layer_name}'
self.render_engine.update_progress(0.0)
self.render_engine.update_stats(self.status_title, 'Start syncing')
log(f"Status [{0.0:.2}]: {'Start syncing'}")
time_begin = time.perf_counter()
border = ((0, 0), (1, 1)) if not scene.render.use_border else ((scene.render.border_min_x, scene.render.border_min_y), (scene.render.border_max_x - scene.render.border_min_x, scene.render.border_max_y - scene.render.border_min_y))
screen_width = int(scene.render.resolution_x * scene.render.resolution_percentage / 100)
screen_height = int(scene.render.resolution_y * scene.render.resolution_percentage / 100)
self.width = int(screen_width * border[1][0])
self.height = int(screen_height * border[1][1])
pass
usd_utils.set_delegate_variant_stage(self.stage, settings.delegate_name)
if self.render_engine.test_break():
log.warn('Syncing stopped by user termination')
return
self.render_engine.bl_use_gpu_context = settings.is_gl_delegate
log.info('Scene synchronization time:', time_str(time.perf_counter() - time_begin))
self.render_engine.update_progress(0.0)
self.render_engine.update_stats(self.status_title, 'Start render')
log(f"Status [{0.0:.2}]: {'Start render'}")
</DeepExtract>
|
BlenderUSDHydraAddon
|
positive
|
def convert_resnet_bottleneck_deps(deps):
assert len(deps) in [53, 104, 155]
res_n = len(deps) - 3
num_blocks = resnet_n_to_num_blocks[res_n]
<DeepExtract>
start_layer_idx_of_stage = [2, 3 + num_blocks[0] * 3, 4 + (num_blocks[0] + num_blocks[1]) * 3, 5 + (num_blocks[0] + num_blocks[1] + num_blocks[2]) * 3]
</DeepExtract>
d = [deps[0]]
for stage_idx in range(4):
tmp = []
assert deps[start_layer_idx_of_stage[stage_idx] - 1] == deps[start_layer_idx_of_stage[stage_idx] + 2]
for i in range(num_blocks[stage_idx]):
tmp.append([deps[start_layer_idx_of_stage[stage_idx] + i * 3], deps[start_layer_idx_of_stage[stage_idx] + 1 + i * 3], deps[start_layer_idx_of_stage[stage_idx] + 2 + i * 3]])
d.append(tmp)
return d
|
def convert_resnet_bottleneck_deps(deps):
assert len(deps) in [53, 104, 155]
res_n = len(deps) - 3
num_blocks = resnet_n_to_num_blocks[res_n]
start_layer_idx_of_stage = [2, 3 + num_blocks[0] * 3, 4 + (num_blocks[0] + num_blocks[1]) * 3, 5 + (num_blocks[0] + num_blocks[1] + num_blocks[2]) * 3]
d = [deps[0]]
for stage_idx in range(4):
tmp = []
assert deps[start_layer_idx_of_stage[stage_idx] - 1] == deps[start_layer_idx_of_stage[stage_idx] + 2]
for i in range(num_blocks[stage_idx]):
tmp.append([deps[start_layer_idx_of_stage[stage_idx] + i * 3], deps[start_layer_idx_of_stage[stage_idx] + 1 + i * 3], deps[start_layer_idx_of_stage[stage_idx] + 2 + i * 3]])
d.append(tmp)
return d
|
ACNet
|
positive
|
def show_main_view(self, from_main):
"""Shows the main window depending on where the application comes from.
If the from_main flag is true, the configuration comes from the previous GUI views. Otherwise, the configuration
comes from a configuration file. Eitherway, the main view will be shown with the proper configuration.
Arguments:
from_main {bool} -- tells if the configuration comes from either configuration file or GUI.
"""
if not from_main:
layout_configuration = self.layout_selector.get_config()
<DeepExtract>
for i in reversed(range(self.parent.main_layout.count())):
widgetToRemove = self.parent.main_layout.itemAt(i).widget()
self.parent.main_layout.removeWidget(widgetToRemove)
widgetToRemove.setParent(None)
</DeepExtract>
else:
layout_configuration = None
self.main_view = MainView(layout_configuration, self.configuration, self.controller, self.parent)
self.parent.main_layout.addWidget(self.main_view)
<DeepExtract>
self.w = QFrame(self.parent)
self.w.setFixedSize(WIDTH, HEIGHT)
self.w.setStyleSheet('background-color: rgba(51,51,51,1)')
self.w.show()
effect = QGraphicsOpacityEffect()
self.w.setGraphicsEffect(effect)
self.animation = QPropertyAnimation(effect, b'opacity')
self.animation.setDuration(500)
self.animation.setStartValue(1)
self.animation.setEndValue(0)
self.animation.start(QPropertyAnimation.DeleteWhenStopped)
self.animation.finished.connect(self.fade_animation)
</DeepExtract>
<DeepExtract>
self.thread_gui.start()
</DeepExtract>
|
def show_main_view(self, from_main):
"""Shows the main window depending on where the application comes from.
If the from_main flag is true, the configuration comes from the previous GUI views. Otherwise, the configuration
comes from a configuration file. Eitherway, the main view will be shown with the proper configuration.
Arguments:
from_main {bool} -- tells if the configuration comes from either configuration file or GUI.
"""
if not from_main:
layout_configuration = self.layout_selector.get_config()
for i in reversed(range(self.parent.main_layout.count())):
widgetToRemove = self.parent.main_layout.itemAt(i).widget()
self.parent.main_layout.removeWidget(widgetToRemove)
widgetToRemove.setParent(None)
else:
layout_configuration = None
self.main_view = MainView(layout_configuration, self.configuration, self.controller, self.parent)
self.parent.main_layout.addWidget(self.main_view)
self.w = QFrame(self.parent)
self.w.setFixedSize(WIDTH, HEIGHT)
self.w.setStyleSheet('background-color: rgba(51,51,51,1)')
self.w.show()
effect = QGraphicsOpacityEffect()
self.w.setGraphicsEffect(effect)
self.animation = QPropertyAnimation(effect, b'opacity')
self.animation.setDuration(500)
self.animation.setStartValue(1)
self.animation.setEndValue(0)
self.animation.start(QPropertyAnimation.DeleteWhenStopped)
self.animation.finished.connect(self.fade_animation)
self.thread_gui.start()
</DeepExtract>
|
BehaviorMetrics
|
positive
|
def async_check_update(self, now, callback=None):
"""Perform update check, run as target of background thread"""
self._async_checking = True
if self._verbose:
print('{} BG thread: Checking for update now in background'.format(self._addon))
try:
<DeepExtract>
if self._verbose:
print('Checking for update function')
self._error = None
self._error_msg = None
if self._update_ready != None and now == False:
return (self._update_ready, self._update_version, self._update_link)
if self._current_version == None:
raise ValueError('current_version not yet defined')
if self._repo == None:
raise ValueError('repo not yet defined')
if self._user == None:
raise ValueError('username not yet defined')
self.set_updater_json()
if now == False and self.past_interval_timestamp() == False:
if self._verbose:
print('Aborting check for updated, check interval not reached')
return (False, None, None)
if self._fake_install == True:
if self._verbose:
print('fake_install = True, setting fake version as ready')
self._update_ready = True
self._update_version = '(999,999,999)'
self._update_link = 'http://127.0.0.1'
return (self._update_ready, self._update_version, self._update_link)
self.get_tags()
self._json['last_check'] = str(datetime.now())
self.save_updater_json()
new_version = self.version_tuple_from_text(self.tag_latest)
if len(self._tags) == 0:
self._update_ready = False
self._update_version = None
self._update_link = None
return (False, None, None)
if self._include_branches == False:
link = self.select_link(self, self._tags[0])
else:
n = len(self._include_branch_list)
if len(self._tags) == n:
link = self.select_link(self, self._tags[0])
else:
link = self.select_link(self, self._tags[n])
if new_version == ():
self._update_ready = False
self._update_version = None
self._update_link = None
return (False, None, None)
elif str(new_version).lower() in self._include_branch_list:
if self._include_branch_autocheck == False:
self._update_ready = False
self._update_version = new_version
self._update_link = link
self.save_updater_json()
return (True, new_version, link)
else:
raise ValueError('include_branch_autocheck: NOT YET DEVELOPED')
elif new_version > self._current_version:
self._update_ready = True
self._update_version = new_version
self._update_link = link
self.save_updater_json()
return (True, new_version, link)
self._update_ready = False
self._update_version = None
self._update_link = None
return (False, None, None)
</DeepExtract>
except Exception as exception:
print('Checking for update error:')
print(exception)
<DeepExtract>
if self._use_print_traces:
traceback.print_exc()
</DeepExtract>
if not self._error:
self._update_ready = False
self._update_version = None
self._update_link = None
self._error = 'Error occurred'
self._error_msg = 'Encountered an error while checking for updates'
self._async_checking = False
self._check_thread = None
if self._verbose:
print('{} BG thread: Finished checking for update, doing callback'.format(self._addon))
if callback:
callback(self._update_ready)
|
def async_check_update(self, now, callback=None):
"""Perform update check, run as target of background thread"""
self._async_checking = True
if self._verbose:
print('{} BG thread: Checking for update now in background'.format(self._addon))
try:
if self._verbose:
print('Checking for update function')
self._error = None
self._error_msg = None
if self._update_ready != None and now == False:
return (self._update_ready, self._update_version, self._update_link)
if self._current_version == None:
raise ValueError('current_version not yet defined')
if self._repo == None:
raise ValueError('repo not yet defined')
if self._user == None:
raise ValueError('username not yet defined')
self.set_updater_json()
if now == False and self.past_interval_timestamp() == False:
if self._verbose:
print('Aborting check for updated, check interval not reached')
return (False, None, None)
if self._fake_install == True:
if self._verbose:
print('fake_install = True, setting fake version as ready')
self._update_ready = True
self._update_version = '(999,999,999)'
self._update_link = 'http://127.0.0.1'
return (self._update_ready, self._update_version, self._update_link)
self.get_tags()
self._json['last_check'] = str(datetime.now())
self.save_updater_json()
new_version = self.version_tuple_from_text(self.tag_latest)
if len(self._tags) == 0:
self._update_ready = False
self._update_version = None
self._update_link = None
return (False, None, None)
if self._include_branches == False:
link = self.select_link(self, self._tags[0])
else:
n = len(self._include_branch_list)
if len(self._tags) == n:
link = self.select_link(self, self._tags[0])
else:
link = self.select_link(self, self._tags[n])
if new_version == ():
self._update_ready = False
self._update_version = None
self._update_link = None
return (False, None, None)
elif str(new_version).lower() in self._include_branch_list:
if self._include_branch_autocheck == False:
self._update_ready = False
self._update_version = new_version
self._update_link = link
self.save_updater_json()
return (True, new_version, link)
else:
raise ValueError('include_branch_autocheck: NOT YET DEVELOPED')
elif new_version > self._current_version:
self._update_ready = True
self._update_version = new_version
self._update_link = link
self.save_updater_json()
return (True, new_version, link)
self._update_ready = False
self._update_version = None
self._update_link = None
return (False, None, None)
except Exception as exception:
print('Checking for update error:')
print(exception)
if self._use_print_traces:
traceback.print_exc()
if not self._error:
self._update_ready = False
self._update_version = None
self._update_link = None
self._error = 'Error occurred'
self._error_msg = 'Encountered an error while checking for updates'
self._async_checking = False
self._check_thread = None
if self._verbose:
print('{} BG thread: Finished checking for update, doing callback'.format(self._addon))
if callback:
callback(self._update_ready)
|
building_tools
|
positive
|
@mock.patch('analytics_dashboard.courses.permissions.OAuthAPIClient')
def test_get_user_course_permissions(self, mock_client):
"""
Verify course permissions are retrieved and cached, even when paged.
"""
page_size = 5
course_ids = [str(x) for x in range(page_size * 2)]
<DeepExtract>
paged_responses = []
course_id_pages = [course_ids[x:x + page_size] for x in range(0, len(course_ids), page_size)]
for (page, course_ids_for_page) in enumerate(course_id_pages, start=1):
if page == len(course_id_pages):
next_page_url = None
else:
next_page_url = 'http://course-api-host/course_ids?page={}'.format(page + 1)
response = {'pagination': {'next': next_page_url}, 'results': course_ids_for_page}
paged_responses.append(response)
mock_client.return_value.get.return_value.json.side_effect = paged_responses
expected_calls = self._get_expected_permissions_client_calls(len(course_id_pages))
</DeepExtract>
self.assertCountEqual(permissions.get_user_course_permissions(self.user), course_ids)
self.assertCountEqual(expected_calls, mock_client.mock_calls)
mock_client.reset_mock()
<DeepExtract>
paged_responses = []
course_id_pages = [[self.new_course_id][x:x + page_size] for x in range(0, len([self.new_course_id]), page_size)]
for (page, course_ids_for_page) in enumerate(course_id_pages, start=1):
if page == len(course_id_pages):
next_page_url = None
else:
next_page_url = 'http://course-api-host/course_ids?page={}'.format(page + 1)
response = {'pagination': {'next': next_page_url}, 'results': course_ids_for_page}
paged_responses.append(response)
mock_client.return_value.get.return_value.json.side_effect = paged_responses
return self._get_expected_permissions_client_calls(len(course_id_pages))
</DeepExtract>
self.assertCountEqual(permissions.get_user_course_permissions(self.user), course_ids)
self.assertFalse(mock_client.mock_calls)
mock_client.reset_mock()
self.assertCountEqual(permissions.get_user_course_permissions(self.user), course_ids)
self.assertFalse(mock_client.mock_calls)
|
@mock.patch('analytics_dashboard.courses.permissions.OAuthAPIClient')
def test_get_user_course_permissions(self, mock_client):
"""
Verify course permissions are retrieved and cached, even when paged.
"""
page_size = 5
course_ids = [str(x) for x in range(page_size * 2)]
paged_responses = []
course_id_pages = [course_ids[x:x + page_size] for x in range(0, len(course_ids), page_size)]
for (page, course_ids_for_page) in enumerate(course_id_pages, start=1):
if page == len(course_id_pages):
next_page_url = None
else:
next_page_url = 'http://course-api-host/course_ids?page={}'.format(page + 1)
response = {'pagination': {'next': next_page_url}, 'results': course_ids_for_page}
paged_responses.append(response)
mock_client.return_value.get.return_value.json.side_effect = paged_responses
expected_calls = self._get_expected_permissions_client_calls(len(course_id_pages))
self.assertCountEqual(permissions.get_user_course_permissions(self.user), course_ids)
self.assertCountEqual(expected_calls, mock_client.mock_calls)
mock_client.reset_mock()
paged_responses = []
course_id_pages = [[self.new_course_id][x:x + page_size] for x in range(0, len([self.new_course_id]), page_size)]
for (page, course_ids_for_page) in enumerate(course_id_pages, start=1):
if page == len(course_id_pages):
next_page_url = None
else:
next_page_url = 'http://course-api-host/course_ids?page={}'.format(page + 1)
response = {'pagination': {'next': next_page_url}, 'results': course_ids_for_page}
paged_responses.append(response)
mock_client.return_value.get.return_value.json.side_effect = paged_responses
return self._get_expected_permissions_client_calls(len(course_id_pages))
self.assertCountEqual(permissions.get_user_course_permissions(self.user), course_ids)
self.assertFalse(mock_client.mock_calls)
mock_client.reset_mock()
self.assertCountEqual(permissions.get_user_course_permissions(self.user), course_ids)
self.assertFalse(mock_client.mock_calls)
|
edx-analytics-dashboard
|
positive
|
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info('Writing example %d of %d' % (ex_index, len(examples)))
<DeepExtract>
if isinstance(example, PaddingInputExample):
feature = InputFeatures(input_ids=[0] * max_seq_length, input_mask=[0] * max_seq_length, segment_ids=[0] * max_seq_length, label_id=0, is_real_example=False)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
elif len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:max_seq_length - 2]
tokens = []
segment_ids = []
tokens.append('[CLS]')
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append('[SEP]')
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append('[SEP]')
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
tf.logging.info('*** Example ***')
tf.logging.info('guid: %s' % example.guid)
tf.logging.info('tokens: %s' % ' '.join([tokenization.printable_text(x) for x in tokens]))
tf.logging.info('input_ids: %s' % ' '.join([str(x) for x in input_ids]))
tf.logging.info('input_mask: %s' % ' '.join([str(x) for x in input_mask]))
tf.logging.info('segment_ids: %s' % ' '.join([str(x) for x in segment_ids]))
tf.logging.info('label: %s (id = %d)' % (example.label, label_id))
feature = InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, is_real_example=True)
feature = feature
</DeepExtract>
features.append(feature)
return features
|
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info('Writing example %d of %d' % (ex_index, len(examples)))
if isinstance(example, PaddingInputExample):
feature = InputFeatures(input_ids=[0] * max_seq_length, input_mask=[0] * max_seq_length, segment_ids=[0] * max_seq_length, label_id=0, is_real_example=False)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
elif len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:max_seq_length - 2]
tokens = []
segment_ids = []
tokens.append('[CLS]')
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append('[SEP]')
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append('[SEP]')
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
tf.logging.info('*** Example ***')
tf.logging.info('guid: %s' % example.guid)
tf.logging.info('tokens: %s' % ' '.join([tokenization.printable_text(x) for x in tokens]))
tf.logging.info('input_ids: %s' % ' '.join([str(x) for x in input_ids]))
tf.logging.info('input_mask: %s' % ' '.join([str(x) for x in input_mask]))
tf.logging.info('segment_ids: %s' % ' '.join([str(x) for x in segment_ids]))
tf.logging.info('label: %s (id = %d)' % (example.label, label_id))
feature = InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, is_real_example=True)
feature = feature
features.append(feature)
return features
|
BERT-for-Sequence-Labeling-and-Text-Classification
|
positive
|
def loadfat_sect(self, sect):
"""
Adds the indexes of the given sector to the FAT
:param sect: string containing the first FAT sector, or array of long integers
:returns: index of last FAT sector.
"""
if isinstance(sect, array.array):
fat1 = sect
else:
<DeepExtract>
a = array.array(UINT32, sect)
if sys.byteorder == 'big':
a.byteswap()
fat1 = a
</DeepExtract>
<DeepExtract>
if not DEBUG_MODE:
return
VPL = 8
tab = array.array(UINT32, sect)
if sys.byteorder == 'big':
tab.byteswap()
nbsect = len(tab)
nlines = (nbsect + VPL - 1) // VPL
print('index', end=' ')
for i in range(VPL):
print('%8X' % i, end=' ')
print()
for l in range(nlines):
index = l * VPL
print('%8X:' % (firstindex + index), end=' ')
for i in range(index, index + VPL):
if i >= nbsect:
break
sect = tab[i]
name = '%8X' % sect
print(name, end=' ')
print()
</DeepExtract>
for isect in fat1:
isect = isect & 4294967295
debug('isect = %X' % isect)
if isect == ENDOFCHAIN or isect == FREESECT:
debug('found end of sector chain')
break
<DeepExtract>
try:
self.fp.seek(self.sectorsize * (isect + 1))
except:
debug('getsect(): sect=%X, seek=%d, filesize=%d' % (isect, self.sectorsize * (isect + 1), self._filesize))
self._raise_defect(DEFECT_FATAL, 'OLE sector index out of range')
sector = self.fp.read(self.sectorsize)
if len(sector) != self.sectorsize:
debug('getsect(): sect=%X, read=%d, sectorsize=%d' % (isect, len(sector), self.sectorsize))
self._raise_defect(DEFECT_FATAL, 'incomplete OLE sector')
s = sector
</DeepExtract>
<DeepExtract>
a = array.array(UINT32, s)
if sys.byteorder == 'big':
a.byteswap()
nextfat = a
</DeepExtract>
self.fat = self.fat + nextfat
return isect
|
def loadfat_sect(self, sect):
"""
Adds the indexes of the given sector to the FAT
:param sect: string containing the first FAT sector, or array of long integers
:returns: index of last FAT sector.
"""
if isinstance(sect, array.array):
fat1 = sect
else:
a = array.array(UINT32, sect)
if sys.byteorder == 'big':
a.byteswap()
fat1 = a
if not DEBUG_MODE:
return
VPL = 8
tab = array.array(UINT32, sect)
if sys.byteorder == 'big':
tab.byteswap()
nbsect = len(tab)
nlines = (nbsect + VPL - 1) // VPL
print('index', end=' ')
for i in range(VPL):
print('%8X' % i, end=' ')
print()
for l in range(nlines):
index = l * VPL
print('%8X:' % (firstindex + index), end=' ')
for i in range(index, index + VPL):
if i >= nbsect:
break
sect = tab[i]
name = '%8X' % sect
print(name, end=' ')
print()
for isect in fat1:
isect = isect & 4294967295
debug('isect = %X' % isect)
if isect == ENDOFCHAIN or isect == FREESECT:
debug('found end of sector chain')
break
try:
self.fp.seek(self.sectorsize * (isect + 1))
except:
debug('getsect(): sect=%X, seek=%d, filesize=%d' % (isect, self.sectorsize * (isect + 1), self._filesize))
self._raise_defect(DEFECT_FATAL, 'OLE sector index out of range')
sector = self.fp.read(self.sectorsize)
if len(sector) != self.sectorsize:
debug('getsect(): sect=%X, read=%d, sectorsize=%d' % (isect, len(sector), self.sectorsize))
self._raise_defect(DEFECT_FATAL, 'incomplete OLE sector')
s = sector
a = array.array(UINT32, s)
if sys.byteorder == 'big':
a.byteswap()
nextfat = a
self.fat = self.fat + nextfat
return isect
|
crackerjack
|
positive
|
def _locate_or_create():
home_config = path.expanduser('~/.cephdeploy.conf')
locations = [path.join(os.getcwd(), 'cephdeploy.conf'), home_config]
for location in locations:
if path.exists(location):
logger.debug('found configuration file at: %s' % location)
return location
logger.info('could not find configuration file, will create one in $HOME')
<DeepExtract>
home_config = home_config or path.expanduser('~/.cephdeploy.conf')
logger.debug('creating new configuration file: %s' % home_config)
with open(home_config, 'w') as cd_conf:
cd_conf.write(cd_conf_template)
</DeepExtract>
return home_config
|
def _locate_or_create():
home_config = path.expanduser('~/.cephdeploy.conf')
locations = [path.join(os.getcwd(), 'cephdeploy.conf'), home_config]
for location in locations:
if path.exists(location):
logger.debug('found configuration file at: %s' % location)
return location
logger.info('could not find configuration file, will create one in $HOME')
home_config = home_config or path.expanduser('~/.cephdeploy.conf')
logger.debug('creating new configuration file: %s' % home_config)
with open(home_config, 'w') as cd_conf:
cd_conf.write(cd_conf_template)
return home_config
|
ceph-deploy
|
positive
|
@_wrapdll(cl_command_queue, cl_command_queue_info, size_t, void_p, P(size_t))
def clGetCommandQueueInfo(queue, param_name):
"""
:param queue: :class:`cl_command_queue`.
:param param_name: One of the :class:`cl_command_queue_info` values.
>>> q = clCreateCommandQueue()
>>> q.context # doctest: +ELLIPSIS
<cl_context ...>
>>> q.device # doctest: +ELLIPSIS
<cl_device ...>
>>> q.properties # doctest: +ELLIPSIS
NONE
>>> q.reference_count
1
"""
if param_name == cl_command_queue_info.CL_QUEUE_CONTEXT:
param_value = cl_context()
clGetCommandQueueInfo.call(queue, param_name, sizeof(param_value), byref(param_value), None)
<DeepExtract>
clRetainContext.call(param_value)
</DeepExtract>
return param_value
elif param_name == cl_command_queue_info.CL_QUEUE_DEVICE:
param_value = cl_device()
clGetCommandQueueInfo.call(queue, param_name, sizeof(param_value), byref(param_value), None)
return param_value
elif param_name == cl_command_queue_info.CL_QUEUE_PROPERTIES:
param_value = cl_command_queue_properties()
clGetCommandQueueInfo.call(queue, param_name, sizeof(param_value), byref(param_value), None)
return param_value
elif param_name == cl_command_queue_info.CL_QUEUE_REFERENCE_COUNT:
param_value = cl_uint()
clGetCommandQueueInfo.call(queue, param_name, sizeof(param_value), byref(param_value), None)
return int(param_value.value)
else:
raise ValueError('Unrecognized parameter %s' % param_name)
|
@_wrapdll(cl_command_queue, cl_command_queue_info, size_t, void_p, P(size_t))
def clGetCommandQueueInfo(queue, param_name):
"""
:param queue: :class:`cl_command_queue`.
:param param_name: One of the :class:`cl_command_queue_info` values.
>>> q = clCreateCommandQueue()
>>> q.context # doctest: +ELLIPSIS
<cl_context ...>
>>> q.device # doctest: +ELLIPSIS
<cl_device ...>
>>> q.properties # doctest: +ELLIPSIS
NONE
>>> q.reference_count
1
"""
if param_name == cl_command_queue_info.CL_QUEUE_CONTEXT:
param_value = cl_context()
clGetCommandQueueInfo.call(queue, param_name, sizeof(param_value), byref(param_value), None)
clRetainContext.call(param_value)
return param_value
elif param_name == cl_command_queue_info.CL_QUEUE_DEVICE:
param_value = cl_device()
clGetCommandQueueInfo.call(queue, param_name, sizeof(param_value), byref(param_value), None)
return param_value
elif param_name == cl_command_queue_info.CL_QUEUE_PROPERTIES:
param_value = cl_command_queue_properties()
clGetCommandQueueInfo.call(queue, param_name, sizeof(param_value), byref(param_value), None)
return param_value
elif param_name == cl_command_queue_info.CL_QUEUE_REFERENCE_COUNT:
param_value = cl_uint()
clGetCommandQueueInfo.call(queue, param_name, sizeof(param_value), byref(param_value), None)
return int(param_value.value)
else:
raise ValueError('Unrecognized parameter %s' % param_name)
|
blender-texture-tools
|
positive
|
def _generate_input_code(nodes: list, job_name: str, output_directory: str):
""" Generates code for loading inputs. """
schemas = {}
close_ops = filter(lambda op_node: isinstance(op_node, Close), nodes)
my_close_ops = filter(lambda close_op: self.pid in close_op.get_in_rel().stored_with, close_ops)
hdfs_import_statements = []
import_statements = []
for close_op in my_close_ops:
<DeepExtract>
in_rel = close_op.get_in_rel()
in_cols = in_rel.columns
out_rel = close_op.out_rel
out_cols = out_rel.columns
col_defs = []
col_def_template = open('{0}/col_def.tmpl'.format(self.template_directory), 'r').read()
for (in_col, out_col) in zip(in_cols, out_cols):
col_data = {'IN_NAME': in_col.get_name(), 'OUT_NAME': out_col.get_name(), 'TYPE': 'uint32'}
col_defs.append(pystache.render(col_def_template, col_data))
col_def_str = '\n'.join(col_defs)
rel_def_template = open('{0}/rel_def.tmpl'.format(self.template_directory), 'r').read()
rel_data = {'NAME': close_op.get_in_rel().name, 'COL_DEFS': col_def_str}
rel_def_header = ','.join([c.name for c in in_cols])
rel_def_str = pystache.render(rel_def_template, rel_data)
(name, schema, header) = (in_rel.name, rel_def_str, rel_def_header)
</DeepExtract>
schemas[name] = schema
if self.sm_config.use_hdfs:
hdfs_import_statements.append(self._generate_hdfs_import(close_op, header, job_name)[:-1])
else:
hdfs_import_statements.append('cp {} {}'.format(self.config.output_path + '/' + name + '.csv', self.config.code_path + '/' + job_name + '/' + name + '.csv'))
import_statements.append(self._generate_csv_import(close_op, output_directory, job_name)[:-1])
input_code = '&&'.join(import_statements)
if self.sm_config.use_docker:
top_level_template = open('{0}/csv_import_top_level.tmpl'.format(self.template_directory), 'r').read()
else:
top_level_template = open('{0}/csv_import_no_docker.tmpl'.format(self.template_directory), 'r').read()
top_level_data = {'SHAREMIND_HOME': self.sm_config.home_path, 'HDFS_IMPORTS': '\n'.join(hdfs_import_statements), 'IMPORTS': input_code}
return (schemas, pystache.render(top_level_template, top_level_data))
|
def _generate_input_code(nodes: list, job_name: str, output_directory: str):
""" Generates code for loading inputs. """
schemas = {}
close_ops = filter(lambda op_node: isinstance(op_node, Close), nodes)
my_close_ops = filter(lambda close_op: self.pid in close_op.get_in_rel().stored_with, close_ops)
hdfs_import_statements = []
import_statements = []
for close_op in my_close_ops:
in_rel = close_op.get_in_rel()
in_cols = in_rel.columns
out_rel = close_op.out_rel
out_cols = out_rel.columns
col_defs = []
col_def_template = open('{0}/col_def.tmpl'.format(self.template_directory), 'r').read()
for (in_col, out_col) in zip(in_cols, out_cols):
col_data = {'IN_NAME': in_col.get_name(), 'OUT_NAME': out_col.get_name(), 'TYPE': 'uint32'}
col_defs.append(pystache.render(col_def_template, col_data))
col_def_str = '\n'.join(col_defs)
rel_def_template = open('{0}/rel_def.tmpl'.format(self.template_directory), 'r').read()
rel_data = {'NAME': close_op.get_in_rel().name, 'COL_DEFS': col_def_str}
rel_def_header = ','.join([c.name for c in in_cols])
rel_def_str = pystache.render(rel_def_template, rel_data)
(name, schema, header) = (in_rel.name, rel_def_str, rel_def_header)
schemas[name] = schema
if self.sm_config.use_hdfs:
hdfs_import_statements.append(self._generate_hdfs_import(close_op, header, job_name)[:-1])
else:
hdfs_import_statements.append('cp {} {}'.format(self.config.output_path + '/' + name + '.csv', self.config.code_path + '/' + job_name + '/' + name + '.csv'))
import_statements.append(self._generate_csv_import(close_op, output_directory, job_name)[:-1])
input_code = '&&'.join(import_statements)
if self.sm_config.use_docker:
top_level_template = open('{0}/csv_import_top_level.tmpl'.format(self.template_directory), 'r').read()
else:
top_level_template = open('{0}/csv_import_no_docker.tmpl'.format(self.template_directory), 'r').read()
top_level_data = {'SHAREMIND_HOME': self.sm_config.home_path, 'HDFS_IMPORTS': '\n'.join(hdfs_import_statements), 'IMPORTS': input_code}
return (schemas, pystache.render(top_level_template, top_level_data))
|
conclave
|
positive
|
def get_frame_from_container_ipdu(pdu, target_frame, ea, float_factory, headers_are_littleendian):
target_frame.is_fd = True
pdus = ea.follow_all_ref(pdu, 'CONTAINED-PDU-TRIGGERING-REF')
header_type = ea.get_child(pdu, 'HEADER-TYPE').text
header_type_params = {'SHORT-HEADER': (24, 8), 'LONG-HEADER': (32, 32)}
if header_type in header_type_params:
mux_size = header_type_params[header_type]
target_frame.add_signal(canmatrix.Signal(start_bit=0, size=mux_size[0], name='Header_ID', is_little_endian=headers_are_littleendian))
target_frame.add_signal(canmatrix.Signal(start_bit=mux_size[0], size=mux_size[1], name='Header_DLC', is_little_endian=headers_are_littleendian))
ipdus_refs = []
for cpdu in pdus:
ipdu = ea.follow_ref(cpdu, 'I-PDU-REF')
if ipdu in ipdus_refs:
continue
ipdus_refs.append(ipdu)
timing_spec = ea.get_child(ipdu, 'I-PDU-TIMING-SPECIFICATION')
if timing_spec is None:
timing_spec = ea.get_child(ipdu, 'I-PDU-TIMING-SPECIFICATIONS')
cyclic_timing = ea.get_child(timing_spec, 'CYCLIC-TIMING')
repeating_time = ea.get_child(cyclic_timing, 'REPEATING-TIME')
cycle_time = 0
value = ea.get_child(repeating_time, 'VALUE')
if value is not None:
cycle_time = int(float_factory(value.text) * 1000)
else:
time_period = ea.get_child(cyclic_timing, 'TIME-PERIOD')
value = ea.get_child(time_period, 'VALUE')
if value is not None:
cycle_time = int(float_factory(value.text) * 1000)
try:
if header_type == 'SHORT-HEADER':
header_id = ea.get_child(ipdu, 'HEADER-ID-SHORT-HEADER').text
elif header_type == 'LONG-HEADER':
header_id = ea.get_child(ipdu, 'HEADER-ID-LONG-HEADER').text
else:
header_id = None
except AttributeError:
header_id = None
if header_id is not None:
header_id = int(header_id, 0)
if ipdu is not None and 'SECURED-I-PDU' in ipdu.tag:
secured_i_pdu_name = ea.get_element_name(ipdu)
payload = ea.follow_ref(ipdu, 'PAYLOAD-REF')
ipdu = ea.follow_ref(payload, 'I-PDU-REF')
logger.info("found secured pdu '%s', dissolved to '%s'", secured_i_pdu_name, ea.get_element_name(ipdu))
try:
offset = int(ea.get_child(ipdu, 'OFFSET').text, 0) * 8
except:
offset = 0
try:
pdu_type = ipdu.attrib['DEST']
except KeyError:
pdu_type = ''
try:
pdu_port_type = ea.get_child(cpdu, 'I-PDU-PORT-REF').attrib['DEST']
except (AttributeError, KeyError):
pdu_port_type = ''
ipdu_length = int(ea.get_child(ipdu, 'LENGTH').text, 0)
ipdu_name = ea.get_element_name(ipdu)
ipdu_triggering_name = ea.get_element_name(cpdu)
target_pdu = canmatrix.Pdu(name=ipdu_name, size=ipdu_length, id=header_id, triggering_name=ipdu_triggering_name, pdu_type=pdu_type, port_type=pdu_port_type, cycle_time=cycle_time)
pdu_sig_mapping = ea.get_children(ipdu, 'I-SIGNAL-TO-I-PDU-MAPPING')
<DeepExtract>
group_id = 1
if pdu_sig_mapping is None:
return
for signal in pdu_sig_mapping:
compu_method = None
motorola = ea.get_child(signal, 'PACKING-BYTE-ORDER')
start_bit = ea.get_child(signal, 'START-POSITION')
isignal = ea.follow_ref(signal, 'SIGNAL-REF')
if isignal is None:
isignal = ea.follow_ref(signal, 'I-SIGNAL-REF')
if isignal is None:
isignal = ea.follow_ref(signal, 'I-SIGNAL-GROUP-REF')
if isignal is not None:
logger.debug('get_signals: found I-SIGNAL-GROUP ')
isignal_array = ea.follow_all_ref(isignal, 'I-SIGNAL-REF')
get_signalgrp_and_signals(isignal, isignal_array, target_pdu, group_id, ea)
group_id = group_id + 1
continue
if isignal is None:
logger.debug('Frame %s, no isignal for %s found', target_pdu.name, ea.get_child(signal, 'SHORT-NAME').text)
receiver = []
for triggering in signal_triggerings:
try:
if ea.selector(triggering, '>I-SIGNAL-REF')[0] == isignal:
reciving_ecu_instances = ea.selector(triggering, '>>I-SIGNAL-PORT-REF//COMMUNICATION-DIRECTION:IN/../../..')
receiver = [ea.get_short_name(a) for a in reciving_ecu_instances]
except IndexError:
pass
base_type = ea.follow_ref(isignal, 'BASE-TYPE-REF')
if base_type is None:
a = ea.selector(isignal, '>SYSTEM-SIGNAL-REF>DATA-TYPE-REF>BASE-TYPE-REF')
if len(a) > 0:
base_type = a[0]
try:
type_encoding = ea.get_child(base_type, 'BASE-TYPE-ENCODING').text
except AttributeError:
type_encoding = 'None'
signal_name = None
signal_name_elem = ea.get_child(isignal, 'LONG-NAME')
if signal_name_elem is not None:
signal_name_elem = ea.get_child(signal_name_elem, 'L-4')
if signal_name_elem is not None:
signal_name = signal_name_elem.text
system_signal = ea.follow_ref(isignal, 'SYSTEM-SIGNAL-REF')
if system_signal is None:
logger.debug('Frame %s, signal %s has no system-signal', target_pdu.name, isignal.tag)
if system_signal is not None and 'SYSTEM-SIGNAL-GROUP' in system_signal.tag:
system_signals = ea.selector(system_signal, 'SYSTEM-SIGNAL-REFS>>SYSTEM-SIGNAL-REF')
get_signalgrp_and_signals(system_signal, system_signals, target_pdu, group_id, ea)
group_id = group_id + 1
continue
length = ea.get_child(isignal, 'LENGTH')
if length is None:
length = ea.get_child(system_signal, 'LENGTH')
signal_min = None
signal_max = None
signal_description = ea.get_element_desc(system_signal)
datatype = ea.follow_ref(system_signal, 'DATA-TYPE-REF')
if datatype is None:
data_constr = None
compu_method = None
base_type = None
for test_signal in [isignal, system_signal]:
if data_constr is None:
data_constr = ea.follow_ref(test_signal, 'DATA-CONSTR-REF')
if compu_method is None:
compu_method = ea.follow_ref(test_signal, 'COMPU-METHOD-REF')
if base_type is None:
base_type = ea.follow_ref(test_signal, 'BASE-TYPE-REF')
lower = ea.get_child(data_constr, 'LOWER-LIMIT')
upper = ea.get_child(data_constr, 'UPPER-LIMIT')
else:
lower = ea.get_child(datatype, 'LOWER-LIMIT')
upper = ea.get_child(datatype, 'UPPER-LIMIT')
type_encoding = ea.get_child(datatype, 'ENCODING')
if lower is not None and upper is not None:
signal_min = float_factory(lower.text)
signal_max = float_factory(upper.text)
datdefprops = ea.get_child(datatype, 'SW-DATA-DEF-PROPS')
if compu_method is None and datdefprops is not None:
compu_method = ea.follow_ref(datdefprops, 'COMPU-METHOD-REF')
if compu_method is None:
compu_method = ea.follow_ref(isignal, 'COMPU-METHOD-REF')
base_type = ea.follow_ref(isignal, 'BASE-TYPE-REF')
encoding = ea.get_child(base_type, 'BASE-TYPE-ENCODING')
if encoding is not None and encoding.text == 'IEEE754':
is_float = True
if compu_method is None:
networkrep = ea.get_child(isignal, 'NETWORK-REPRESENTATION-PROPS')
data_def_props_var = ea.get_child(networkrep, 'SW-DATA-DEF-PROPS-VARIANTS')
data_def_props_cond = ea.get_child(data_def_props_var, 'SW-DATA-DEF-PROPS-CONDITIONAL')
if data_def_props_cond is not None:
try:
compu_method = ea.get_child(data_def_props_cond, 'COMPU-METHOD')
except:
logger.debug('No valid compu method found for this - check ARXML file!!')
compu_method = None
if compu_method is None:
logger.error('No valid compu method found for isignal/systemsignal {}/{} - check ARXML file!!'.format(ea.get_short_name(isignal), ea.get_short_name(system_signal)))
if compu_method is None:
logger.debug('No Compmethod found!! - fuzzy search in syssignal.')
compu_method = ea.follow_ref(system_signal, 'COMPU-METHOD-REF')
(values, factor, offset, unit_elem, const) = decode_compu_method(compu_method, ea, float_factory)
if signal_min is not None:
signal_min *= factor
signal_min += offset
if signal_max is not None:
signal_max *= factor
signal_max += offset
if base_type is None:
base_type = ea.follow_ref(datdefprops, 'BASE-TYPE-REF')
(is_signed, is_float) = eval_type_of_signal(type_encoding, base_type, ea)
unit_element = ea.get_child(isignal, 'UNIT')
display_name = ea.get_child(unit_element, 'DISPLAY-NAME')
if display_name is not None:
signal_unit = display_name.text
else:
signal_unit = ''
if unit_elem is not None:
longname = ea.get_child(unit_elem, 'LONG-NAME')
display_name = None
try:
display_name = ea.get_child(unit_elem, 'DISPLAY-NAME')
except:
logger.debug('No Unit Display name found!! - using long name')
if display_name is not None:
signal_unit = display_name.text
else:
l4 = ea.get_child(longname, 'L-4')
if l4 is not None:
signal_unit = l4.text
init_list = ea.selector(system_signal, '>INIT-VALUE-REF/VALUE')
if len(init_list) == 0:
init_list = ea.find_children_by_path(isignal, 'INIT-VALUE/NUMERICAL-VALUE-SPECIFICATION/VALUE')
if init_list:
initvalue = init_list[0]
else:
initvalue = None
isignal_name = ea.get_short_name(isignal)
system_signal_name = ea.get_short_name(system_signal)
compu_name = ea.get_short_name(compu_method)
name = isignal_name if isignal_name else system_signal_name
is_little_endian = False
if motorola is not None:
is_little_endian = ar_byteorder_is_little(motorola.text)
else:
logger.debug('no name byte order for signal' + name)
if start_bit is None:
logger.debug('no startBit for signal given')
if length is None:
logger.debug('no length for signal given')
if start_bit is not None:
new_signal = canmatrix.Signal(name, start_bit=int(start_bit.text, 0) + offset, size=int(length.text, 0) if length is not None else 0, is_little_endian=is_little_endian, is_signed=is_signed, factor=factor, offset=offset, unit=signal_unit, receivers=receiver, multiplex=None, comment=signal_description, is_float=is_float)
if signal_min is not None:
new_signal.min = signal_min
if signal_max is not None:
new_signal.max = signal_max
if not new_signal.is_little_endian:
new_signal.set_startbit(int(start_bit.text, 0) + offset, bitNumbering=1)
communication_direction = ea.selector(isignal, '<I-SIGNAL-TRIGGERING>I-SIGNAL-PORT-REF/COMMUNICATION-DIRECTION')
if len(communication_direction) > 0:
ecu = ea.get_ecu_instance(communication_direction[0])
if communication_direction[0].text == 'IN':
new_signal.add_receiver(ea.get_short_name(ecu))
if base_type is not None:
temp = ea.get_child(base_type, 'SHORT-NAME')
if temp is not None and 'boolean' == temp.text:
new_signal.add_values(1, 'TRUE')
new_signal.add_values(0, 'FALSE')
if initvalue is not None and initvalue.text is not None:
initvalue.text = canmatrix.utils.guess_value(initvalue.text)
try:
new_signal.initial_value = float_factory(initvalue.text) * factor + offset
except decimal.InvalidOperation:
logger.error('could not decode value {}'.format(initvalue.text))
for (key, value) in list(values.items()):
new_signal.add_values(canmatrix.utils.decode_number(key, float_factory), value)
if signal_name is not None:
new_signal.add_attribute('LongName', signal_name)
if compu_name is not None and compu_name:
new_signal.add_attribute('CompuMethodName', compu_name)
if isignal_name is not None and isignal_name:
new_signal.add_attribute('ISignalName', isignal_name)
if system_signal_name is not None and system_signal_name:
new_signal.add_attribute('SysSignalName', system_signal_name)
existing_signal = target_pdu.signal_by_name(new_signal.name)
if existing_signal is None:
target_pdu.add_signal(new_signal)
</DeepExtract>
target_frame.add_pdu(target_pdu)
|
def get_frame_from_container_ipdu(pdu, target_frame, ea, float_factory, headers_are_littleendian):
target_frame.is_fd = True
pdus = ea.follow_all_ref(pdu, 'CONTAINED-PDU-TRIGGERING-REF')
header_type = ea.get_child(pdu, 'HEADER-TYPE').text
header_type_params = {'SHORT-HEADER': (24, 8), 'LONG-HEADER': (32, 32)}
if header_type in header_type_params:
mux_size = header_type_params[header_type]
target_frame.add_signal(canmatrix.Signal(start_bit=0, size=mux_size[0], name='Header_ID', is_little_endian=headers_are_littleendian))
target_frame.add_signal(canmatrix.Signal(start_bit=mux_size[0], size=mux_size[1], name='Header_DLC', is_little_endian=headers_are_littleendian))
ipdus_refs = []
for cpdu in pdus:
ipdu = ea.follow_ref(cpdu, 'I-PDU-REF')
if ipdu in ipdus_refs:
continue
ipdus_refs.append(ipdu)
timing_spec = ea.get_child(ipdu, 'I-PDU-TIMING-SPECIFICATION')
if timing_spec is None:
timing_spec = ea.get_child(ipdu, 'I-PDU-TIMING-SPECIFICATIONS')
cyclic_timing = ea.get_child(timing_spec, 'CYCLIC-TIMING')
repeating_time = ea.get_child(cyclic_timing, 'REPEATING-TIME')
cycle_time = 0
value = ea.get_child(repeating_time, 'VALUE')
if value is not None:
cycle_time = int(float_factory(value.text) * 1000)
else:
time_period = ea.get_child(cyclic_timing, 'TIME-PERIOD')
value = ea.get_child(time_period, 'VALUE')
if value is not None:
cycle_time = int(float_factory(value.text) * 1000)
try:
if header_type == 'SHORT-HEADER':
header_id = ea.get_child(ipdu, 'HEADER-ID-SHORT-HEADER').text
elif header_type == 'LONG-HEADER':
header_id = ea.get_child(ipdu, 'HEADER-ID-LONG-HEADER').text
else:
header_id = None
except AttributeError:
header_id = None
if header_id is not None:
header_id = int(header_id, 0)
if ipdu is not None and 'SECURED-I-PDU' in ipdu.tag:
secured_i_pdu_name = ea.get_element_name(ipdu)
payload = ea.follow_ref(ipdu, 'PAYLOAD-REF')
ipdu = ea.follow_ref(payload, 'I-PDU-REF')
logger.info("found secured pdu '%s', dissolved to '%s'", secured_i_pdu_name, ea.get_element_name(ipdu))
try:
offset = int(ea.get_child(ipdu, 'OFFSET').text, 0) * 8
except:
offset = 0
try:
pdu_type = ipdu.attrib['DEST']
except KeyError:
pdu_type = ''
try:
pdu_port_type = ea.get_child(cpdu, 'I-PDU-PORT-REF').attrib['DEST']
except (AttributeError, KeyError):
pdu_port_type = ''
ipdu_length = int(ea.get_child(ipdu, 'LENGTH').text, 0)
ipdu_name = ea.get_element_name(ipdu)
ipdu_triggering_name = ea.get_element_name(cpdu)
target_pdu = canmatrix.Pdu(name=ipdu_name, size=ipdu_length, id=header_id, triggering_name=ipdu_triggering_name, pdu_type=pdu_type, port_type=pdu_port_type, cycle_time=cycle_time)
pdu_sig_mapping = ea.get_children(ipdu, 'I-SIGNAL-TO-I-PDU-MAPPING')
group_id = 1
if pdu_sig_mapping is None:
return
for signal in pdu_sig_mapping:
compu_method = None
motorola = ea.get_child(signal, 'PACKING-BYTE-ORDER')
start_bit = ea.get_child(signal, 'START-POSITION')
isignal = ea.follow_ref(signal, 'SIGNAL-REF')
if isignal is None:
isignal = ea.follow_ref(signal, 'I-SIGNAL-REF')
if isignal is None:
isignal = ea.follow_ref(signal, 'I-SIGNAL-GROUP-REF')
if isignal is not None:
logger.debug('get_signals: found I-SIGNAL-GROUP ')
isignal_array = ea.follow_all_ref(isignal, 'I-SIGNAL-REF')
get_signalgrp_and_signals(isignal, isignal_array, target_pdu, group_id, ea)
group_id = group_id + 1
continue
if isignal is None:
logger.debug('Frame %s, no isignal for %s found', target_pdu.name, ea.get_child(signal, 'SHORT-NAME').text)
receiver = []
for triggering in signal_triggerings:
try:
if ea.selector(triggering, '>I-SIGNAL-REF')[0] == isignal:
reciving_ecu_instances = ea.selector(triggering, '>>I-SIGNAL-PORT-REF//COMMUNICATION-DIRECTION:IN/../../..')
receiver = [ea.get_short_name(a) for a in reciving_ecu_instances]
except IndexError:
pass
base_type = ea.follow_ref(isignal, 'BASE-TYPE-REF')
if base_type is None:
a = ea.selector(isignal, '>SYSTEM-SIGNAL-REF>DATA-TYPE-REF>BASE-TYPE-REF')
if len(a) > 0:
base_type = a[0]
try:
type_encoding = ea.get_child(base_type, 'BASE-TYPE-ENCODING').text
except AttributeError:
type_encoding = 'None'
signal_name = None
signal_name_elem = ea.get_child(isignal, 'LONG-NAME')
if signal_name_elem is not None:
signal_name_elem = ea.get_child(signal_name_elem, 'L-4')
if signal_name_elem is not None:
signal_name = signal_name_elem.text
system_signal = ea.follow_ref(isignal, 'SYSTEM-SIGNAL-REF')
if system_signal is None:
logger.debug('Frame %s, signal %s has no system-signal', target_pdu.name, isignal.tag)
if system_signal is not None and 'SYSTEM-SIGNAL-GROUP' in system_signal.tag:
system_signals = ea.selector(system_signal, 'SYSTEM-SIGNAL-REFS>>SYSTEM-SIGNAL-REF')
get_signalgrp_and_signals(system_signal, system_signals, target_pdu, group_id, ea)
group_id = group_id + 1
continue
length = ea.get_child(isignal, 'LENGTH')
if length is None:
length = ea.get_child(system_signal, 'LENGTH')
signal_min = None
signal_max = None
signal_description = ea.get_element_desc(system_signal)
datatype = ea.follow_ref(system_signal, 'DATA-TYPE-REF')
if datatype is None:
data_constr = None
compu_method = None
base_type = None
for test_signal in [isignal, system_signal]:
if data_constr is None:
data_constr = ea.follow_ref(test_signal, 'DATA-CONSTR-REF')
if compu_method is None:
compu_method = ea.follow_ref(test_signal, 'COMPU-METHOD-REF')
if base_type is None:
base_type = ea.follow_ref(test_signal, 'BASE-TYPE-REF')
lower = ea.get_child(data_constr, 'LOWER-LIMIT')
upper = ea.get_child(data_constr, 'UPPER-LIMIT')
else:
lower = ea.get_child(datatype, 'LOWER-LIMIT')
upper = ea.get_child(datatype, 'UPPER-LIMIT')
type_encoding = ea.get_child(datatype, 'ENCODING')
if lower is not None and upper is not None:
signal_min = float_factory(lower.text)
signal_max = float_factory(upper.text)
datdefprops = ea.get_child(datatype, 'SW-DATA-DEF-PROPS')
if compu_method is None and datdefprops is not None:
compu_method = ea.follow_ref(datdefprops, 'COMPU-METHOD-REF')
if compu_method is None:
compu_method = ea.follow_ref(isignal, 'COMPU-METHOD-REF')
base_type = ea.follow_ref(isignal, 'BASE-TYPE-REF')
encoding = ea.get_child(base_type, 'BASE-TYPE-ENCODING')
if encoding is not None and encoding.text == 'IEEE754':
is_float = True
if compu_method is None:
networkrep = ea.get_child(isignal, 'NETWORK-REPRESENTATION-PROPS')
data_def_props_var = ea.get_child(networkrep, 'SW-DATA-DEF-PROPS-VARIANTS')
data_def_props_cond = ea.get_child(data_def_props_var, 'SW-DATA-DEF-PROPS-CONDITIONAL')
if data_def_props_cond is not None:
try:
compu_method = ea.get_child(data_def_props_cond, 'COMPU-METHOD')
except:
logger.debug('No valid compu method found for this - check ARXML file!!')
compu_method = None
if compu_method is None:
logger.error('No valid compu method found for isignal/systemsignal {}/{} - check ARXML file!!'.format(ea.get_short_name(isignal), ea.get_short_name(system_signal)))
if compu_method is None:
logger.debug('No Compmethod found!! - fuzzy search in syssignal.')
compu_method = ea.follow_ref(system_signal, 'COMPU-METHOD-REF')
(values, factor, offset, unit_elem, const) = decode_compu_method(compu_method, ea, float_factory)
if signal_min is not None:
signal_min *= factor
signal_min += offset
if signal_max is not None:
signal_max *= factor
signal_max += offset
if base_type is None:
base_type = ea.follow_ref(datdefprops, 'BASE-TYPE-REF')
(is_signed, is_float) = eval_type_of_signal(type_encoding, base_type, ea)
unit_element = ea.get_child(isignal, 'UNIT')
display_name = ea.get_child(unit_element, 'DISPLAY-NAME')
if display_name is not None:
signal_unit = display_name.text
else:
signal_unit = ''
if unit_elem is not None:
longname = ea.get_child(unit_elem, 'LONG-NAME')
display_name = None
try:
display_name = ea.get_child(unit_elem, 'DISPLAY-NAME')
except:
logger.debug('No Unit Display name found!! - using long name')
if display_name is not None:
signal_unit = display_name.text
else:
l4 = ea.get_child(longname, 'L-4')
if l4 is not None:
signal_unit = l4.text
init_list = ea.selector(system_signal, '>INIT-VALUE-REF/VALUE')
if len(init_list) == 0:
init_list = ea.find_children_by_path(isignal, 'INIT-VALUE/NUMERICAL-VALUE-SPECIFICATION/VALUE')
if init_list:
initvalue = init_list[0]
else:
initvalue = None
isignal_name = ea.get_short_name(isignal)
system_signal_name = ea.get_short_name(system_signal)
compu_name = ea.get_short_name(compu_method)
name = isignal_name if isignal_name else system_signal_name
is_little_endian = False
if motorola is not None:
is_little_endian = ar_byteorder_is_little(motorola.text)
else:
logger.debug('no name byte order for signal' + name)
if start_bit is None:
logger.debug('no startBit for signal given')
if length is None:
logger.debug('no length for signal given')
if start_bit is not None:
new_signal = canmatrix.Signal(name, start_bit=int(start_bit.text, 0) + offset, size=int(length.text, 0) if length is not None else 0, is_little_endian=is_little_endian, is_signed=is_signed, factor=factor, offset=offset, unit=signal_unit, receivers=receiver, multiplex=None, comment=signal_description, is_float=is_float)
if signal_min is not None:
new_signal.min = signal_min
if signal_max is not None:
new_signal.max = signal_max
if not new_signal.is_little_endian:
new_signal.set_startbit(int(start_bit.text, 0) + offset, bitNumbering=1)
communication_direction = ea.selector(isignal, '<I-SIGNAL-TRIGGERING>I-SIGNAL-PORT-REF/COMMUNICATION-DIRECTION')
if len(communication_direction) > 0:
ecu = ea.get_ecu_instance(communication_direction[0])
if communication_direction[0].text == 'IN':
new_signal.add_receiver(ea.get_short_name(ecu))
if base_type is not None:
temp = ea.get_child(base_type, 'SHORT-NAME')
if temp is not None and 'boolean' == temp.text:
new_signal.add_values(1, 'TRUE')
new_signal.add_values(0, 'FALSE')
if initvalue is not None and initvalue.text is not None:
initvalue.text = canmatrix.utils.guess_value(initvalue.text)
try:
new_signal.initial_value = float_factory(initvalue.text) * factor + offset
except decimal.InvalidOperation:
logger.error('could not decode value {}'.format(initvalue.text))
for (key, value) in list(values.items()):
new_signal.add_values(canmatrix.utils.decode_number(key, float_factory), value)
if signal_name is not None:
new_signal.add_attribute('LongName', signal_name)
if compu_name is not None and compu_name:
new_signal.add_attribute('CompuMethodName', compu_name)
if isignal_name is not None and isignal_name:
new_signal.add_attribute('ISignalName', isignal_name)
if system_signal_name is not None and system_signal_name:
new_signal.add_attribute('SysSignalName', system_signal_name)
existing_signal = target_pdu.signal_by_name(new_signal.name)
if existing_signal is None:
target_pdu.add_signal(new_signal)
target_frame.add_pdu(target_pdu)
|
canmatrix
|
positive
|
def generate_common():
common_lib = 'libcommon.dll' if sys.platform == 'win32' else 'libcommon.so'
<DeepExtract>
a_h = '#include "common.h"\n\nvoid foo(struct mystruct *m);\n\n'
a_c = '#include "a.h"\n\nvoid foo(struct mystruct *m) {\n\n}\n\n'
b_h = '#include "common.h"\n\nvoid bar(struct mystruct *m);\n\n'
b_c = '#include "b.h"\n\nvoid bar(struct mystruct *m) {\n\n}\n\n'
common_h = 'struct mystruct {\n int a;\n};\n\n'
try:
os.mkdir(COMMON_DIR)
except FileExistsError:
rmtree(COMMON_DIR)
os.mkdir(COMMON_DIR)
names = {'a.h': a_h, 'a.c': a_c, 'b.h': b_h, 'b.c': b_c, 'common.h': common_h}
for (name, source) in names.items():
with open(f'{COMMON_DIR}/{name}', 'w') as f:
f.write(source)
</DeepExtract>
<DeepExtract>
subprocess.run(['gcc', '-c', f'{COMMON_DIR}/a.c', '-o', f'{COMMON_DIR}/a.o'])
subprocess.run(['gcc', '-c', f'{COMMON_DIR}/b.c', '-o', f'{COMMON_DIR}/b.o'])
subprocess.run(['gcc', '-shared', '-o', f'{COMMON_DIR}/{common_lib}', f'{COMMON_DIR}/a.o', f'{COMMON_DIR}/b.o'])
</DeepExtract>
for file_name in ['a', 'b']:
<DeepExtract>
test_options = options.get_default_options()
test_options.headers = [f'{COMMON_DIR}/{file_name}.h']
test_options.include_search_paths = [COMMON_DIR]
test_options.libraries = [common_lib]
test_options.compile_libdirs = [COMMON_DIR]
test_options.embed_preamble = embed_preamble
if embed_preamble:
output = f'{COMMON_DIR}/{file_name}.py'
else:
output = f'{COMMON_DIR}/{file_name}2.py'
descriptions = parser.parse(test_options.headers, test_options)
processor.process(descriptions, test_options)
printer_python.WrapperPrinter(output, test_options, descriptions)
</DeepExtract>
for file_name in ['a', 'b']:
<DeepExtract>
test_options = options.get_default_options()
test_options.headers = [f'{COMMON_DIR}/{file_name}.h']
test_options.include_search_paths = [COMMON_DIR]
test_options.libraries = [common_lib]
test_options.compile_libdirs = [COMMON_DIR]
test_options.embed_preamble = False
if False:
output = f'{COMMON_DIR}/{file_name}.py'
else:
output = f'{COMMON_DIR}/{file_name}2.py'
descriptions = parser.parse(test_options.headers, test_options)
processor.process(descriptions, test_options)
printer_python.WrapperPrinter(output, test_options, descriptions)
</DeepExtract>
|
def generate_common():
common_lib = 'libcommon.dll' if sys.platform == 'win32' else 'libcommon.so'
a_h = '#include "common.h"\n\nvoid foo(struct mystruct *m);\n\n'
a_c = '#include "a.h"\n\nvoid foo(struct mystruct *m) {\n\n}\n\n'
b_h = '#include "common.h"\n\nvoid bar(struct mystruct *m);\n\n'
b_c = '#include "b.h"\n\nvoid bar(struct mystruct *m) {\n\n}\n\n'
common_h = 'struct mystruct {\n int a;\n};\n\n'
try:
os.mkdir(COMMON_DIR)
except FileExistsError:
rmtree(COMMON_DIR)
os.mkdir(COMMON_DIR)
names = {'a.h': a_h, 'a.c': a_c, 'b.h': b_h, 'b.c': b_c, 'common.h': common_h}
for (name, source) in names.items():
with open(f'{COMMON_DIR}/{name}', 'w') as f:
f.write(source)
subprocess.run(['gcc', '-c', f'{COMMON_DIR}/a.c', '-o', f'{COMMON_DIR}/a.o'])
subprocess.run(['gcc', '-c', f'{COMMON_DIR}/b.c', '-o', f'{COMMON_DIR}/b.o'])
subprocess.run(['gcc', '-shared', '-o', f'{COMMON_DIR}/{common_lib}', f'{COMMON_DIR}/a.o', f'{COMMON_DIR}/b.o'])
for file_name in ['a', 'b']:
test_options = options.get_default_options()
test_options.headers = [f'{COMMON_DIR}/{file_name}.h']
test_options.include_search_paths = [COMMON_DIR]
test_options.libraries = [common_lib]
test_options.compile_libdirs = [COMMON_DIR]
test_options.embed_preamble = embed_preamble
if embed_preamble:
output = f'{COMMON_DIR}/{file_name}.py'
else:
output = f'{COMMON_DIR}/{file_name}2.py'
descriptions = parser.parse(test_options.headers, test_options)
processor.process(descriptions, test_options)
printer_python.WrapperPrinter(output, test_options, descriptions)
for file_name in ['a', 'b']:
test_options = options.get_default_options()
test_options.headers = [f'{COMMON_DIR}/{file_name}.h']
test_options.include_search_paths = [COMMON_DIR]
test_options.libraries = [common_lib]
test_options.compile_libdirs = [COMMON_DIR]
test_options.embed_preamble = False
if False:
output = f'{COMMON_DIR}/{file_name}.py'
else:
output = f'{COMMON_DIR}/{file_name}2.py'
descriptions = parser.parse(test_options.headers, test_options)
processor.process(descriptions, test_options)
printer_python.WrapperPrinter(output, test_options, descriptions)
</DeepExtract>
|
ctypesgen
|
positive
|
def mouseReleaseEvent(self, event):
if self.mouse_right:
self.mouse_right = False
return
if not self.mouse_left:
return
self.mouse_left = False
self.prev_pt = None
if len(self.this_mouse_stroke) > 0:
print('strokes', self.this_mouse_stroke)
self.this_mouse_strokes.append(self.this_mouse_stroke)
self.line_size.append(self.size)
self.line_color_list.append(self.current_color)
self.this_mouse_stroke = []
<DeepExtract>
self.Refresh()
area = QRectF(0.0, 0.0, 512.0, 512.0)
area2 = QRectF(20.0, 120.0, 512.0, 512.0)
image = QImage(area.width(), area.height(), QImage.Format_ARGB32_Premultiplied)
painter = QPainter(image)
self.render(painter, area, area2)
painter.end()
image.save(os.path.join(os.getcwd(), 'temp', 'query_img.png'))
</DeepExtract>
<DeepExtract>
if self.query_shadow:
path = os.path.join(os.getcwd(), 'temp', 'query_img.png')
qstr = self.query(path)
self.closest_face_fname = qstr[0]
self.closer_face_fname_list = qstr
self.shadow_pil = self.getShadow(qstr)
for item in self.items():
obj_type = type(item)
if isinstance(item, QGraphicsPixmapItem):
self.removeItem(item)
print('remove QGraphicsPixmapItem')
'\n im = self.shadow_pil\n data = im.tobytes(\'raw\', \'RGB\')\n qim = QImage(data, im.size[0], im.size[1], QImage.Format_RGB888)\n \n pix = QPixmap.fromImage(qim)\n #pix.save("dddd1.png");\n pix=self.set_pixmap_alpha(pix)\n \n tst=self.addPixmap(pix)\n #tst=self.addPixmap(QPixmap(\'\\capture.png\'))\n tst.setPos(20, 120)\n '
self.Refresh()
if self.shadow_show:
self.setShadowImage()
if self.mono_mask_show:
self.setShadowImage_toRed()
</DeepExtract>
if self.callbk is not None:
<DeepExtract>
path = os.path.join(os.getcwd(), 'temp', 'query_img.png')
qstr = self.query(path)
self.callbk(qstr)
</DeepExtract>
|
def mouseReleaseEvent(self, event):
if self.mouse_right:
self.mouse_right = False
return
if not self.mouse_left:
return
self.mouse_left = False
self.prev_pt = None
if len(self.this_mouse_stroke) > 0:
print('strokes', self.this_mouse_stroke)
self.this_mouse_strokes.append(self.this_mouse_stroke)
self.line_size.append(self.size)
self.line_color_list.append(self.current_color)
self.this_mouse_stroke = []
self.Refresh()
area = QRectF(0.0, 0.0, 512.0, 512.0)
area2 = QRectF(20.0, 120.0, 512.0, 512.0)
image = QImage(area.width(), area.height(), QImage.Format_ARGB32_Premultiplied)
painter = QPainter(image)
self.render(painter, area, area2)
painter.end()
image.save(os.path.join(os.getcwd(), 'temp', 'query_img.png'))
if self.query_shadow:
path = os.path.join(os.getcwd(), 'temp', 'query_img.png')
qstr = self.query(path)
self.closest_face_fname = qstr[0]
self.closer_face_fname_list = qstr
self.shadow_pil = self.getShadow(qstr)
for item in self.items():
obj_type = type(item)
if isinstance(item, QGraphicsPixmapItem):
self.removeItem(item)
print('remove QGraphicsPixmapItem')
'\n im = self.shadow_pil\n data = im.tobytes(\'raw\', \'RGB\')\n qim = QImage(data, im.size[0], im.size[1], QImage.Format_RGB888)\n \n pix = QPixmap.fromImage(qim)\n #pix.save("dddd1.png");\n pix=self.set_pixmap_alpha(pix)\n \n tst=self.addPixmap(pix)\n #tst=self.addPixmap(QPixmap(\'\\capture.png\'))\n tst.setPos(20, 120)\n '
self.Refresh()
if self.shadow_show:
self.setShadowImage()
if self.mono_mask_show:
self.setShadowImage_toRed()
if self.callbk is not None:
path = os.path.join(os.getcwd(), 'temp', 'query_img.png')
qstr = self.query(path)
self.callbk(qstr)
</DeepExtract>
|
dualFace
|
positive
|
def train_step(self, data):
"""One training step
Arguments:
data {dict of data} -- required keys and values:
'X' {LongTensor [batch_size, history_len, max_x_sent_len]} -- token ids of context sentences
'X_floor' {LongTensor [batch_size, history_len]} -- floors of context sentences
'Y' {LongTensor [batch_size, max_y_sent_len]} -- token ids of response sentence
'Y_floor' {LongTensor [batch_size]} -- floor of response sentence
Returns:
dict of data -- returned keys and values
'loss' {FloatTensor []} -- loss to backword
dict of statistics -- returned keys and values
'ppl' {float} -- perplexity
'loss' {float} -- batch loss
"""
(X, Y) = (data['X'], data['Y'])
(X_floor, Y_floor) = (data['X_floor'], data['Y_floor'])
Y_in = Y[:, :-1].contiguous()
Y_out = Y[:, 1:].contiguous()
batch_size = X.size(0)
<DeepExtract>
(batch_size, history_len, max_x_sent_len) = X.size()
flat_inputs = X.view(batch_size * history_len, max_x_sent_len)
input_lens = (X != self.pad_token_id).sum(-1)
flat_input_lens = input_lens.view(batch_size * history_len)
(word_encodings, _, sent_encodings) = self.sent_encoder(flat_inputs, flat_input_lens)
word_encodings = word_encodings.view(batch_size, history_len, max_x_sent_len, -1)
sent_encodings = sent_encodings.view(batch_size, history_len, -1)
if self.floor_encoder is not None:
src_floors = X_floor.view(-1)
tgt_floors = Y_floor.unsqueeze(1).repeat(1, history_len).view(-1)
sent_encodings = sent_encodings.view(batch_size * history_len, -1)
sent_encodings = self.floor_encoder(sent_encodings, src_floors=src_floors, tgt_floors=tgt_floors)
sent_encodings = sent_encodings.view(batch_size, history_len, -1)
dial_lens = (input_lens > 0).long().sum(1)
(_, _, dial_encodings) = self.dial_encoder(sent_encodings, dial_lens)
(word_encodings, sent_encodings, dial_encodings) = (word_encodings, sent_encodings, dial_encodings)
</DeepExtract>
attn_ctx = word_encodings.view(batch_size, -1, word_encodings.size(-1))
<DeepExtract>
attn_mask = X.view(batch_size, -1) != self.pad_token_id
attn_mask = attn_mask
</DeepExtract>
<DeepExtract>
ctx_mech = self.ctx2mech_fc(dial_encodings)
mech_scores = torch.matmul(torch.matmul(ctx_mech, self.score_bilinear), self.mechanism_embeddings.weight.T)
mech_probs = F.softmax(mech_scores, dim=1)
mech_probs = mech_probs
</DeepExtract>
mech_embed_inputs = torch.LongTensor(list(range(self.n_mechanisms))).to(DEVICE)
repeated_mech_embed_inputs = mech_embed_inputs.unsqueeze(0).repeat(batch_size, 1).view(-1)
repeated_mech_embeds = self.mechanism_embeddings(repeated_mech_embed_inputs)
repeated_Y_in = Y_in.unsqueeze(1).repeat(1, self.n_mechanisms, 1)
repeated_Y_in = repeated_Y_in.view(batch_size * self.n_mechanisms, -1)
repeated_Y_out = Y_out.unsqueeze(1).repeat(1, self.n_mechanisms, 1)
repeated_Y_out = repeated_Y_out.view(batch_size * self.n_mechanisms, -1)
dial_encodings = dial_encodings.unsqueeze(1).repeat(1, self.n_mechanisms, 1)
dial_encodings = dial_encodings.view(batch_size * self.n_mechanisms, self.dial_encoder_hidden_dim)
attn_ctx = attn_ctx.unsqueeze(1).repeat(1, self.n_mechanisms, 1, 1)
attn_ctx = attn_ctx.view(batch_size * self.n_mechanisms, -1, attn_ctx.size(-1))
attn_mask = attn_mask.unsqueeze(1).repeat(1, self.n_mechanisms, 1)
attn_mask = attn_mask.view(batch_size * self.n_mechanisms, -1)
dec_ctx = self.ctx_mech_combine_fc(torch.cat([dial_encodings, repeated_mech_embeds], dim=1))
<DeepExtract>
batch_size = dec_ctx.size(0)
hiddens = self._init_dec_hiddens(dec_ctx)
feats = None
feats = dec_ctx.unsqueeze(1).repeat(1, repeated_Y_in.size(1), 1)
ret_dict = self.decoder.forward(batch_size=batch_size, inputs=repeated_Y_in, hiddens=hiddens, feats=feats, attn_ctx=attn_ctx, attn_mask=attn_mask, mode=DecoderRNN.MODE_TEACHER_FORCE)
decoder_ret_dict = ret_dict
</DeepExtract>
loss = 0
word_neglogll = F.cross_entropy(decoder_ret_dict['logits'].view(-1, self.vocab_size), repeated_Y_out.view(-1), ignore_index=self.decoder.pad_token_id, reduction='none').view(batch_size, self.n_mechanisms, -1)
sent_logll = word_neglogll.sum(2) * -1
mech_logll = (mech_probs + 1e-10).log()
sent_mech_logll = sent_logll + mech_logll
target_logll = torch.logsumexp(sent_mech_logll, dim=1)
target_neglogll = target_logll * -1
loss = target_neglogll.mean()
with torch.no_grad():
ppl = F.cross_entropy(decoder_ret_dict['logits'].view(-1, self.vocab_size), repeated_Y_out.view(-1), ignore_index=self.decoder.pad_token_id, reduction='mean').exp()
ret_data = {'loss': loss}
ret_stat = {'ppl': ppl.item(), 'loss': loss.item(), 'mech_prob_std': mech_probs.std(1).mean().item(), 'mech_prob_max': mech_probs.max(1)[0].mean().item()}
return (ret_data, ret_stat)
|
def train_step(self, data):
"""One training step
Arguments:
data {dict of data} -- required keys and values:
'X' {LongTensor [batch_size, history_len, max_x_sent_len]} -- token ids of context sentences
'X_floor' {LongTensor [batch_size, history_len]} -- floors of context sentences
'Y' {LongTensor [batch_size, max_y_sent_len]} -- token ids of response sentence
'Y_floor' {LongTensor [batch_size]} -- floor of response sentence
Returns:
dict of data -- returned keys and values
'loss' {FloatTensor []} -- loss to backword
dict of statistics -- returned keys and values
'ppl' {float} -- perplexity
'loss' {float} -- batch loss
"""
(X, Y) = (data['X'], data['Y'])
(X_floor, Y_floor) = (data['X_floor'], data['Y_floor'])
Y_in = Y[:, :-1].contiguous()
Y_out = Y[:, 1:].contiguous()
batch_size = X.size(0)
(batch_size, history_len, max_x_sent_len) = X.size()
flat_inputs = X.view(batch_size * history_len, max_x_sent_len)
input_lens = (X != self.pad_token_id).sum(-1)
flat_input_lens = input_lens.view(batch_size * history_len)
(word_encodings, _, sent_encodings) = self.sent_encoder(flat_inputs, flat_input_lens)
word_encodings = word_encodings.view(batch_size, history_len, max_x_sent_len, -1)
sent_encodings = sent_encodings.view(batch_size, history_len, -1)
if self.floor_encoder is not None:
src_floors = X_floor.view(-1)
tgt_floors = Y_floor.unsqueeze(1).repeat(1, history_len).view(-1)
sent_encodings = sent_encodings.view(batch_size * history_len, -1)
sent_encodings = self.floor_encoder(sent_encodings, src_floors=src_floors, tgt_floors=tgt_floors)
sent_encodings = sent_encodings.view(batch_size, history_len, -1)
dial_lens = (input_lens > 0).long().sum(1)
(_, _, dial_encodings) = self.dial_encoder(sent_encodings, dial_lens)
(word_encodings, sent_encodings, dial_encodings) = (word_encodings, sent_encodings, dial_encodings)
attn_ctx = word_encodings.view(batch_size, -1, word_encodings.size(-1))
attn_mask = X.view(batch_size, -1) != self.pad_token_id
attn_mask = attn_mask
ctx_mech = self.ctx2mech_fc(dial_encodings)
mech_scores = torch.matmul(torch.matmul(ctx_mech, self.score_bilinear), self.mechanism_embeddings.weight.T)
mech_probs = F.softmax(mech_scores, dim=1)
mech_probs = mech_probs
mech_embed_inputs = torch.LongTensor(list(range(self.n_mechanisms))).to(DEVICE)
repeated_mech_embed_inputs = mech_embed_inputs.unsqueeze(0).repeat(batch_size, 1).view(-1)
repeated_mech_embeds = self.mechanism_embeddings(repeated_mech_embed_inputs)
repeated_Y_in = Y_in.unsqueeze(1).repeat(1, self.n_mechanisms, 1)
repeated_Y_in = repeated_Y_in.view(batch_size * self.n_mechanisms, -1)
repeated_Y_out = Y_out.unsqueeze(1).repeat(1, self.n_mechanisms, 1)
repeated_Y_out = repeated_Y_out.view(batch_size * self.n_mechanisms, -1)
dial_encodings = dial_encodings.unsqueeze(1).repeat(1, self.n_mechanisms, 1)
dial_encodings = dial_encodings.view(batch_size * self.n_mechanisms, self.dial_encoder_hidden_dim)
attn_ctx = attn_ctx.unsqueeze(1).repeat(1, self.n_mechanisms, 1, 1)
attn_ctx = attn_ctx.view(batch_size * self.n_mechanisms, -1, attn_ctx.size(-1))
attn_mask = attn_mask.unsqueeze(1).repeat(1, self.n_mechanisms, 1)
attn_mask = attn_mask.view(batch_size * self.n_mechanisms, -1)
dec_ctx = self.ctx_mech_combine_fc(torch.cat([dial_encodings, repeated_mech_embeds], dim=1))
batch_size = dec_ctx.size(0)
hiddens = self._init_dec_hiddens(dec_ctx)
feats = None
feats = dec_ctx.unsqueeze(1).repeat(1, repeated_Y_in.size(1), 1)
ret_dict = self.decoder.forward(batch_size=batch_size, inputs=repeated_Y_in, hiddens=hiddens, feats=feats, attn_ctx=attn_ctx, attn_mask=attn_mask, mode=DecoderRNN.MODE_TEACHER_FORCE)
decoder_ret_dict = ret_dict
loss = 0
word_neglogll = F.cross_entropy(decoder_ret_dict['logits'].view(-1, self.vocab_size), repeated_Y_out.view(-1), ignore_index=self.decoder.pad_token_id, reduction='none').view(batch_size, self.n_mechanisms, -1)
sent_logll = word_neglogll.sum(2) * -1
mech_logll = (mech_probs + 1e-10).log()
sent_mech_logll = sent_logll + mech_logll
target_logll = torch.logsumexp(sent_mech_logll, dim=1)
target_neglogll = target_logll * -1
loss = target_neglogll.mean()
with torch.no_grad():
ppl = F.cross_entropy(decoder_ret_dict['logits'].view(-1, self.vocab_size), repeated_Y_out.view(-1), ignore_index=self.decoder.pad_token_id, reduction='mean').exp()
ret_data = {'loss': loss}
ret_stat = {'ppl': ppl.item(), 'loss': loss.item(), 'mech_prob_std': mech_probs.std(1).mean().item(), 'mech_prob_max': mech_probs.max(1)[0].mean().item()}
return (ret_data, ret_stat)
|
dialog-processing
|
positive
|
def MenuBlockAsControls(menuItems, parentage=[]):
blocks = []
curBlock = []
for item in menuItems:
<DeepExtract>
itemAsCtrl = ControlProps()
itemAsCtrl['Texts'] = [item['Text']]
itemAsCtrl['ControlID'] = item['ID']
itemAsCtrl['Type'] = item['Type']
itemAsCtrl['State'] = item['State']
itemAsCtrl['Class'] = 'MenuItem'
itemAsCtrl['FriendlyClassName'] = 'MenuItem'
itemAsCtrl['Rectangle'] = RECT(0, 0, 999, 999)
itemAsCtrl['Fonts'] = [LOGFONTW()]
itemAsCtrl['ClientRects'] = [RECT(0, 0, 999, 999)]
itemAsCtrl['ContextHelpID'] = 0
itemAsCtrl['UserData'] = 0
itemAsCtrl['Style'] = 0
itemAsCtrl['ExStyle'] = 0
itemAsCtrl['IsVisible'] = 1
itemAsCtrl = itemAsCtrl
</DeepExtract>
if parentage:
itemPath = '%s->%s' % ('->'.join(parentage), item['Text'])
else:
itemPath = item['Text']
curBlock.append(itemAsCtrl)
if 'MenuItems' in item:
parentage.append(item['Text'])
blocks.extend(MenuBlockAsControls(item['MenuItems']['MenuItems'], parentage))
del parentage[-1]
blocks.append(curBlock)
return blocks
|
def MenuBlockAsControls(menuItems, parentage=[]):
blocks = []
curBlock = []
for item in menuItems:
itemAsCtrl = ControlProps()
itemAsCtrl['Texts'] = [item['Text']]
itemAsCtrl['ControlID'] = item['ID']
itemAsCtrl['Type'] = item['Type']
itemAsCtrl['State'] = item['State']
itemAsCtrl['Class'] = 'MenuItem'
itemAsCtrl['FriendlyClassName'] = 'MenuItem'
itemAsCtrl['Rectangle'] = RECT(0, 0, 999, 999)
itemAsCtrl['Fonts'] = [LOGFONTW()]
itemAsCtrl['ClientRects'] = [RECT(0, 0, 999, 999)]
itemAsCtrl['ContextHelpID'] = 0
itemAsCtrl['UserData'] = 0
itemAsCtrl['Style'] = 0
itemAsCtrl['ExStyle'] = 0
itemAsCtrl['IsVisible'] = 1
itemAsCtrl = itemAsCtrl
if parentage:
itemPath = '%s->%s' % ('->'.join(parentage), item['Text'])
else:
itemPath = item['Text']
curBlock.append(itemAsCtrl)
if 'MenuItems' in item:
parentage.append(item['Text'])
blocks.extend(MenuBlockAsControls(item['MenuItems']['MenuItems'], parentage))
del parentage[-1]
blocks.append(curBlock)
return blocks
|
BrowserRefresh-Sublime
|
positive
|
def make_iterator(self, dataset):
<DeepExtract>
tf.compat.v1.logging.info('Using pure synthetic data.')
with self.scope():
if self.extended._global_batch_size:
dist_dataset = SyntheticDataset(self, self.num_replicas_in_sync)
else:
dist_dataset = SyntheticDataset(self)
</DeepExtract>
return iter(dist_dataset)
|
def make_iterator(self, dataset):
tf.compat.v1.logging.info('Using pure synthetic data.')
with self.scope():
if self.extended._global_batch_size:
dist_dataset = SyntheticDataset(self, self.num_replicas_in_sync)
else:
dist_dataset = SyntheticDataset(self)
return iter(dist_dataset)
|
dialogue-utterance-rewriter
|
positive
|
def forward(self, x, do_ori=False):
t = time.time()
num_features_prefilter = self.num
if self.num_Baum_iters > 0:
num_features_prefilter = int(1.5 * self.num)
<DeepExtract>
t = time.time()
(self.scale_pyr, self.sigmas, self.pix_dists) = self.ScalePyrGen(x)
aff_matrices = []
top_responces = []
pyr_idxs = []
level_idxs = []
det_t = 0
nmst = 0
for oct_idx in range(len(self.sigmas)):
octave = self.scale_pyr[oct_idx]
sigmas_oct = self.sigmas[oct_idx]
pix_dists_oct = self.pix_dists[oct_idx]
low = None
cur = None
high = None
octaveMap = (self.scale_pyr[oct_idx][0] * 0).byte()
nms_f = NMS3dAndComposeA(w=octave[0].size(3), h=octave[0].size(2), border=self.b, mrSize=self.mrSize)
for level_idx in range(1, len(octave) - 1):
if cur is None:
low = torch.clamp(self.RespNet(octave[level_idx - 1], sigmas_oct[level_idx - 1]) - self.th, min=0)
else:
low = cur
if high is None:
cur = torch.clamp(self.RespNet(octave[level_idx], sigmas_oct[level_idx]) - self.th, min=0)
else:
cur = high
high = torch.clamp(self.RespNet(octave[level_idx + 1], sigmas_oct[level_idx + 1]) - self.th, min=0)
(top_resp, aff_matrix, octaveMap_current) = nms_f(low, cur, high, num_features=num_features_prefilter, octaveMap=octaveMap, scales=sigmas_oct[level_idx - 1:level_idx + 2])
if top_resp is None:
continue
octaveMap = octaveMap_current
(aff_matrices.append(aff_matrix), top_responces.append(top_resp))
pyr_id = Variable(oct_idx * torch.ones(aff_matrix.size(0)))
lev_id = Variable((level_idx - 1) * torch.ones(aff_matrix.size(0)))
if x.is_cuda:
pyr_id = pyr_id.cuda()
lev_id = lev_id.cuda()
pyr_idxs.append(pyr_id)
level_idxs.append(lev_id)
all_responses = torch.cat(top_responces, dim=0)
aff_m_scales = torch.cat(aff_matrices, dim=0)
pyr_idxs_scales = torch.cat(pyr_idxs, dim=0)
level_idxs_scale = torch.cat(level_idxs, dim=0)
if num_features_prefilter > 0 and num_features_prefilter < all_responses.size(0):
(all_responses, idxs) = torch.topk(all_responses, k=num_features_prefilter)
LAFs = torch.index_select(aff_m_scales, 0, idxs)
final_pyr_idxs = pyr_idxs_scales[idxs]
final_level_idxs = level_idxs_scale[idxs]
else:
(responses, LAFs, final_pyr_idxs, final_level_idxs) = (all_responses, aff_m_scales, pyr_idxs_scales, level_idxs_scale)
(responses, LAFs, final_pyr_idxs, final_level_idxs) = (all_responses, LAFs, final_pyr_idxs, final_level_idxs)
</DeepExtract>
print(time.time() - t, 'detection multiscale')
t = time.time()
LAFs[:, 0:2, 0:2] = self.mrSize * LAFs[:, :, 0:2]
if self.num_Baum_iters > 0:
<DeepExtract>
pe_time = 0
affnet_time = 0
pyr_inv_idxs = get_inverted_pyr_index(self.scale_pyr, final_pyr_idxs, final_level_idxs)
t = time.time()
patches_small = extract_patches_from_pyramid_with_inv_index(self.scale_pyr, pyr_inv_idxs, LAFs, PS=self.AffNet.PS)
pe_time += time.time() - t
t = time.time()
base_A = torch.eye(2).unsqueeze(0).expand(final_pyr_idxs.size(0), 2, 2)
if responses.is_cuda:
base_A = base_A.cuda()
base_A = Variable(base_A)
is_good = None
n_patches = patches_small.size(0)
for i in range(self.num_Baum_iters):
t = time.time()
A = batched_forward(self.AffNet, patches_small, 256)
is_good_current = 1
affnet_time += time.time() - t
if is_good is None:
is_good = is_good_current
else:
is_good = is_good * is_good_current
base_A = torch.bmm(A, base_A)
new_LAFs = torch.cat([torch.bmm(base_A, LAFs[:, :, 0:2]), LAFs[:, :, 2:]], dim=2)
if i != self.num_Baum_iters - 1:
pe_time += time.time() - t
t = time.time()
patches_small = extract_patches_from_pyramid_with_inv_index(self.scale_pyr, pyr_inv_idxs, new_LAFs, PS=self.AffNet.PS)
pe_time += time.time() - t
(l1, l2) = batch_eig2x2(A)
ratio1 = torch.abs(l1 / (l2 + 1e-08))
converged_mask = (ratio1 <= 1.2) * (ratio1 >= 0.8)
(l1, l2) = batch_eig2x2(base_A)
ratio = torch.abs(l1 / (l2 + 1e-08))
idxs_mask = (ratio < 6.0) * (ratio > 1.0 / 6.0) * checkTouchBoundary(new_LAFs)
num_survived = idxs_mask.float().sum()
if self.num > 0 and num_survived.data.item() > self.num:
responses = responses * idxs_mask.float()
(responses, idxs) = torch.topk(responses, k=self.num)
else:
idxs = Variable(torch.nonzero(idxs_mask.data).view(-1).long())
responses = responses[idxs]
final_pyr_idxs = final_pyr_idxs[idxs]
final_level_idxs = final_level_idxs[idxs]
base_A = torch.index_select(base_A, 0, idxs)
LAFs = torch.index_select(LAFs, 0, idxs)
new_LAFs = torch.cat([torch.bmm(base_A, LAFs[:, :, 0:2]), LAFs[:, :, 2:]], dim=2)
print('affnet_time', affnet_time)
print('pe_time', pe_time)
(responses, LAFs, final_pyr_idxs, final_level_idxs) = (responses, new_LAFs, final_pyr_idxs, final_level_idxs)
</DeepExtract>
print(time.time() - t, 'affine shape iters')
t = time.time()
if do_ori:
<DeepExtract>
pyr_inv_idxs = get_inverted_pyr_index(self.scale_pyr, final_pyr_idxs, final_level_idxs)
patches_small = extract_patches_from_pyramid_with_inv_index(self.scale_pyr, pyr_inv_idxs, LAFs, PS=self.OriNet.PS)
max_iters = 1
for i in range(max_iters):
angles = self.OriNet(patches_small)
if len(angles.size()) > 2:
LAFs = torch.cat([torch.bmm(LAFs[:, :, :2], angles), LAFs[:, :, 2:]], dim=2)
else:
LAFs = torch.cat([torch.bmm(LAFs[:, :, :2], angles2A(angles).view(-1, 2, 2)), LAFs[:, :, 2:]], dim=2)
if i != max_iters:
patches_small = extract_patches_from_pyramid_with_inv_index(self.scale_pyr, pyr_inv_idxs, LAFs, PS=self.OriNet.PS)
LAFs = LAFs
</DeepExtract>
return (denormalizeLAFs(LAFs, x.size(3), x.size(2)), responses)
|
def forward(self, x, do_ori=False):
t = time.time()
num_features_prefilter = self.num
if self.num_Baum_iters > 0:
num_features_prefilter = int(1.5 * self.num)
t = time.time()
(self.scale_pyr, self.sigmas, self.pix_dists) = self.ScalePyrGen(x)
aff_matrices = []
top_responces = []
pyr_idxs = []
level_idxs = []
det_t = 0
nmst = 0
for oct_idx in range(len(self.sigmas)):
octave = self.scale_pyr[oct_idx]
sigmas_oct = self.sigmas[oct_idx]
pix_dists_oct = self.pix_dists[oct_idx]
low = None
cur = None
high = None
octaveMap = (self.scale_pyr[oct_idx][0] * 0).byte()
nms_f = NMS3dAndComposeA(w=octave[0].size(3), h=octave[0].size(2), border=self.b, mrSize=self.mrSize)
for level_idx in range(1, len(octave) - 1):
if cur is None:
low = torch.clamp(self.RespNet(octave[level_idx - 1], sigmas_oct[level_idx - 1]) - self.th, min=0)
else:
low = cur
if high is None:
cur = torch.clamp(self.RespNet(octave[level_idx], sigmas_oct[level_idx]) - self.th, min=0)
else:
cur = high
high = torch.clamp(self.RespNet(octave[level_idx + 1], sigmas_oct[level_idx + 1]) - self.th, min=0)
(top_resp, aff_matrix, octaveMap_current) = nms_f(low, cur, high, num_features=num_features_prefilter, octaveMap=octaveMap, scales=sigmas_oct[level_idx - 1:level_idx + 2])
if top_resp is None:
continue
octaveMap = octaveMap_current
(aff_matrices.append(aff_matrix), top_responces.append(top_resp))
pyr_id = Variable(oct_idx * torch.ones(aff_matrix.size(0)))
lev_id = Variable((level_idx - 1) * torch.ones(aff_matrix.size(0)))
if x.is_cuda:
pyr_id = pyr_id.cuda()
lev_id = lev_id.cuda()
pyr_idxs.append(pyr_id)
level_idxs.append(lev_id)
all_responses = torch.cat(top_responces, dim=0)
aff_m_scales = torch.cat(aff_matrices, dim=0)
pyr_idxs_scales = torch.cat(pyr_idxs, dim=0)
level_idxs_scale = torch.cat(level_idxs, dim=0)
if num_features_prefilter > 0 and num_features_prefilter < all_responses.size(0):
(all_responses, idxs) = torch.topk(all_responses, k=num_features_prefilter)
LAFs = torch.index_select(aff_m_scales, 0, idxs)
final_pyr_idxs = pyr_idxs_scales[idxs]
final_level_idxs = level_idxs_scale[idxs]
else:
(responses, LAFs, final_pyr_idxs, final_level_idxs) = (all_responses, aff_m_scales, pyr_idxs_scales, level_idxs_scale)
(responses, LAFs, final_pyr_idxs, final_level_idxs) = (all_responses, LAFs, final_pyr_idxs, final_level_idxs)
print(time.time() - t, 'detection multiscale')
t = time.time()
LAFs[:, 0:2, 0:2] = self.mrSize * LAFs[:, :, 0:2]
if self.num_Baum_iters > 0:
pe_time = 0
affnet_time = 0
pyr_inv_idxs = get_inverted_pyr_index(self.scale_pyr, final_pyr_idxs, final_level_idxs)
t = time.time()
patches_small = extract_patches_from_pyramid_with_inv_index(self.scale_pyr, pyr_inv_idxs, LAFs, PS=self.AffNet.PS)
pe_time += time.time() - t
t = time.time()
base_A = torch.eye(2).unsqueeze(0).expand(final_pyr_idxs.size(0), 2, 2)
if responses.is_cuda:
base_A = base_A.cuda()
base_A = Variable(base_A)
is_good = None
n_patches = patches_small.size(0)
for i in range(self.num_Baum_iters):
t = time.time()
A = batched_forward(self.AffNet, patches_small, 256)
is_good_current = 1
affnet_time += time.time() - t
if is_good is None:
is_good = is_good_current
else:
is_good = is_good * is_good_current
base_A = torch.bmm(A, base_A)
new_LAFs = torch.cat([torch.bmm(base_A, LAFs[:, :, 0:2]), LAFs[:, :, 2:]], dim=2)
if i != self.num_Baum_iters - 1:
pe_time += time.time() - t
t = time.time()
patches_small = extract_patches_from_pyramid_with_inv_index(self.scale_pyr, pyr_inv_idxs, new_LAFs, PS=self.AffNet.PS)
pe_time += time.time() - t
(l1, l2) = batch_eig2x2(A)
ratio1 = torch.abs(l1 / (l2 + 1e-08))
converged_mask = (ratio1 <= 1.2) * (ratio1 >= 0.8)
(l1, l2) = batch_eig2x2(base_A)
ratio = torch.abs(l1 / (l2 + 1e-08))
idxs_mask = (ratio < 6.0) * (ratio > 1.0 / 6.0) * checkTouchBoundary(new_LAFs)
num_survived = idxs_mask.float().sum()
if self.num > 0 and num_survived.data.item() > self.num:
responses = responses * idxs_mask.float()
(responses, idxs) = torch.topk(responses, k=self.num)
else:
idxs = Variable(torch.nonzero(idxs_mask.data).view(-1).long())
responses = responses[idxs]
final_pyr_idxs = final_pyr_idxs[idxs]
final_level_idxs = final_level_idxs[idxs]
base_A = torch.index_select(base_A, 0, idxs)
LAFs = torch.index_select(LAFs, 0, idxs)
new_LAFs = torch.cat([torch.bmm(base_A, LAFs[:, :, 0:2]), LAFs[:, :, 2:]], dim=2)
print('affnet_time', affnet_time)
print('pe_time', pe_time)
(responses, LAFs, final_pyr_idxs, final_level_idxs) = (responses, new_LAFs, final_pyr_idxs, final_level_idxs)
print(time.time() - t, 'affine shape iters')
t = time.time()
if do_ori:
pyr_inv_idxs = get_inverted_pyr_index(self.scale_pyr, final_pyr_idxs, final_level_idxs)
patches_small = extract_patches_from_pyramid_with_inv_index(self.scale_pyr, pyr_inv_idxs, LAFs, PS=self.OriNet.PS)
max_iters = 1
for i in range(max_iters):
angles = self.OriNet(patches_small)
if len(angles.size()) > 2:
LAFs = torch.cat([torch.bmm(LAFs[:, :, :2], angles), LAFs[:, :, 2:]], dim=2)
else:
LAFs = torch.cat([torch.bmm(LAFs[:, :, :2], angles2A(angles).view(-1, 2, 2)), LAFs[:, :, 2:]], dim=2)
if i != max_iters:
patches_small = extract_patches_from_pyramid_with_inv_index(self.scale_pyr, pyr_inv_idxs, LAFs, PS=self.OriNet.PS)
LAFs = LAFs
return (denormalizeLAFs(LAFs, x.size(3), x.size(2)), responses)
|
affnet
|
positive
|
def open(self, update_remote=True):
<DeepExtract>
if not self.channel_buffer:
alias = config.server_aliases.get(self.subdomain)
if alias:
self.preferred_name = alias
elif config.short_buffer_names:
self.preferred_name = self.subdomain
else:
self.preferred_name = self.domain
self.channel_buffer = w.buffer_new(self.preferred_name, 'buffer_input_callback', 'EVENTROUTER', '', '')
self.eventrouter.weechat_controller.register_buffer(self.channel_buffer, self)
w.buffer_set(self.channel_buffer, 'localvar_set_type', 'server')
w.buffer_set(self.channel_buffer, 'localvar_set_nick', self.nick)
w.buffer_set(self.channel_buffer, 'localvar_set_server', self.preferred_name)
if w.config_string(w.config_get('irc.look.server_buffer')) == 'merge_with_core':
w.buffer_merge(self.channel_buffer, w.buffer_search_main())
</DeepExtract>
self.active = True
<DeepExtract>
if not self.got_history:
if self.team.connected:
self.clear_messages()
self.buffer_prnt('', 'getting channel history...', tagset='backlog')
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]['history'], {'channel': self.identifier, 'count': BACKLOG_SIZE}, team_hash=self.team.team_hash, channel_identifier=self.identifier, clear=True)
if not slow_queue:
self.eventrouter.receive(s)
else:
self.eventrouter.receive_slow(s)
self.got_history = True
</DeepExtract>
|
def open(self, update_remote=True):
if not self.channel_buffer:
alias = config.server_aliases.get(self.subdomain)
if alias:
self.preferred_name = alias
elif config.short_buffer_names:
self.preferred_name = self.subdomain
else:
self.preferred_name = self.domain
self.channel_buffer = w.buffer_new(self.preferred_name, 'buffer_input_callback', 'EVENTROUTER', '', '')
self.eventrouter.weechat_controller.register_buffer(self.channel_buffer, self)
w.buffer_set(self.channel_buffer, 'localvar_set_type', 'server')
w.buffer_set(self.channel_buffer, 'localvar_set_nick', self.nick)
w.buffer_set(self.channel_buffer, 'localvar_set_server', self.preferred_name)
if w.config_string(w.config_get('irc.look.server_buffer')) == 'merge_with_core':
w.buffer_merge(self.channel_buffer, w.buffer_search_main())
self.active = True
if not self.got_history:
if self.team.connected:
self.clear_messages()
self.buffer_prnt('', 'getting channel history...', tagset='backlog')
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]['history'], {'channel': self.identifier, 'count': BACKLOG_SIZE}, team_hash=self.team.team_hash, channel_identifier=self.identifier, clear=True)
if not slow_queue:
self.eventrouter.receive(s)
else:
self.eventrouter.receive_slow(s)
self.got_history = True
</DeepExtract>
|
awesome-dots
|
positive
|
def load_checkpoint(model, optimizer, lr_scheduler, args, load_optimizer_states=True):
"""Load a model checkpoint."""
<DeepExtract>
tracker_filename = get_checkpoint_tracker_filename(args.load)
if not os.path.isfile(tracker_filename):
print_rank_0('WARNING: could not find the metadata file {} '.format(tracker_filename))
if os.path.isdir(args.load):
path = os.path.normpath(args.load)
(load_dir, tag) = os.path.split(path)
print_rank_0('Try to directly load the checkpoint from the directory')
(load_dir, iteration, release, success) = (load_dir, tag, False, True)
print_rank_0(' will not load any checkpoints and will start from random')
(load_dir, iteration, release, success) = (args.load, 0, False, False)
iteration = 0
release = False
with open(tracker_filename, 'r') as f:
metastring = f.read().strip()
try:
iteration = int(metastring)
except ValueError:
release = metastring == 'release'
if not release:
print_rank_0('ERROR: Invalid metadata file {}. Exiting'.format(tracker_filename))
exit()
assert iteration > 0 or release, 'error parsing metadata file {}'.format(tracker_filename)
(load_dir, iteration, release, success) = (args.load, iteration, release, True)
</DeepExtract>
if not success:
return 0
if args.deepspeed:
(checkpoint_name, sd) = model.load_checkpoint(load_dir, iteration, load_optimizer_states=not args.no_load_optim, load_lr_scheduler_states=not args.no_load_lr_scheduler)
if not args.no_load_lr_scheduler and 'client_lr_scheduler' in sd:
lr_scheduler.load_state_dict(sd['client_lr_scheduler'])
<DeepExtract>
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
print('Load lr scheduler state', flush=True)
else:
print('Load lr scheduler state', flush=True)
</DeepExtract>
if checkpoint_name is None:
if mpu.get_data_parallel_rank() == 0:
print('Unable to load checkpoint.')
return iteration
else:
<DeepExtract>
if release:
d = 'release'
else:
d = '{}'.format(iteration)
if zero:
dp_rank = mpu.get_data_parallel_rank()
d += '_zero_dp_rank_{}'.format(dp_rank)
checkpoint_name = os.path.join(load_dir, d, 'mp_rank_{:02d}_model_states.pt'.format(mpu.get_model_parallel_rank()))
</DeepExtract>
if mpu.get_data_parallel_rank() == 0:
print('global rank {} is loading checkpoint {}'.format(torch.distributed.get_rank(), checkpoint_name))
sd = torch.load(checkpoint_name, map_location='cpu')
if isinstance(model, torchDDP):
model = model.module
try:
model.load_state_dict(sd['module'])
except KeyError:
<DeepExtract>
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
print('A metadata file exists but unable to load model from checkpoint {}, exiting'.format(checkpoint_name), flush=True)
else:
print('A metadata file exists but unable to load model from checkpoint {}, exiting'.format(checkpoint_name), flush=True)
</DeepExtract>
exit()
if not release:
try:
if not args.no_load_optim and optimizer is not None and load_optimizer_states:
optimizer.load_state_dict(sd['optimizer'])
if not args.no_load_lr_scheduler and lr_scheduler is not None:
lr_scheduler.load_state_dict(sd['lr_scheduler'])
except KeyError:
<DeepExtract>
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
print('Unable to load optimizer from checkpoint {}, exiting. Specify --no-load-optim or --finetune to prevent attempting to load the optimizer state.'.format(checkpoint_name), flush=True)
else:
print('Unable to load optimizer from checkpoint {}, exiting. Specify --no-load-optim or --finetune to prevent attempting to load the optimizer state.'.format(checkpoint_name), flush=True)
</DeepExtract>
exit()
if args.finetune or release:
iteration = 0
else:
try:
iteration = sd['iteration']
except KeyError:
try:
iteration = sd['total_iters']
except KeyError:
<DeepExtract>
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
print('A metadata file exists but Unable to load iteration from checkpoint {}, exiting'.format(checkpoint_name), flush=True)
else:
print('A metadata file exists but Unable to load iteration from checkpoint {}, exiting'.format(checkpoint_name), flush=True)
</DeepExtract>
exit()
if not release and (not args.finetune) and (not args.no_load_rng):
try:
random.setstate(sd['random_rng_state'])
np.random.set_state(sd['np_rng_state'])
torch.set_rng_state(sd['torch_rng_state'])
torch.cuda.set_rng_state(sd['cuda_rng_state'])
mpu.get_cuda_rng_tracker().set_states(sd['rng_tracker_states'])
except KeyError:
<DeepExtract>
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
print('Unable to load optimizer from checkpoint {}, exiting. Specify --no-load-rng or --finetune to prevent attempting to load the random state.'.format(checkpoint_name), flush=True)
else:
print('Unable to load optimizer from checkpoint {}, exiting. Specify --no-load-rng or --finetune to prevent attempting to load the random state.'.format(checkpoint_name), flush=True)
</DeepExtract>
exit()
if mpu.get_data_parallel_rank() == 0:
print(' successfully loaded {}'.format(checkpoint_name))
return iteration
|
def load_checkpoint(model, optimizer, lr_scheduler, args, load_optimizer_states=True):
"""Load a model checkpoint."""
tracker_filename = get_checkpoint_tracker_filename(args.load)
if not os.path.isfile(tracker_filename):
print_rank_0('WARNING: could not find the metadata file {} '.format(tracker_filename))
if os.path.isdir(args.load):
path = os.path.normpath(args.load)
(load_dir, tag) = os.path.split(path)
print_rank_0('Try to directly load the checkpoint from the directory')
(load_dir, iteration, release, success) = (load_dir, tag, False, True)
print_rank_0(' will not load any checkpoints and will start from random')
(load_dir, iteration, release, success) = (args.load, 0, False, False)
iteration = 0
release = False
with open(tracker_filename, 'r') as f:
metastring = f.read().strip()
try:
iteration = int(metastring)
except ValueError:
release = metastring == 'release'
if not release:
print_rank_0('ERROR: Invalid metadata file {}. Exiting'.format(tracker_filename))
exit()
assert iteration > 0 or release, 'error parsing metadata file {}'.format(tracker_filename)
(load_dir, iteration, release, success) = (args.load, iteration, release, True)
if not success:
return 0
if args.deepspeed:
(checkpoint_name, sd) = model.load_checkpoint(load_dir, iteration, load_optimizer_states=not args.no_load_optim, load_lr_scheduler_states=not args.no_load_lr_scheduler)
if not args.no_load_lr_scheduler and 'client_lr_scheduler' in sd:
lr_scheduler.load_state_dict(sd['client_lr_scheduler'])
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
print('Load lr scheduler state', flush=True)
else:
print('Load lr scheduler state', flush=True)
if checkpoint_name is None:
if mpu.get_data_parallel_rank() == 0:
print('Unable to load checkpoint.')
return iteration
else:
if release:
d = 'release'
else:
d = '{}'.format(iteration)
if zero:
dp_rank = mpu.get_data_parallel_rank()
d += '_zero_dp_rank_{}'.format(dp_rank)
checkpoint_name = os.path.join(load_dir, d, 'mp_rank_{:02d}_model_states.pt'.format(mpu.get_model_parallel_rank()))
if mpu.get_data_parallel_rank() == 0:
print('global rank {} is loading checkpoint {}'.format(torch.distributed.get_rank(), checkpoint_name))
sd = torch.load(checkpoint_name, map_location='cpu')
if isinstance(model, torchDDP):
model = model.module
try:
model.load_state_dict(sd['module'])
except KeyError:
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
print('A metadata file exists but unable to load model from checkpoint {}, exiting'.format(checkpoint_name), flush=True)
else:
print('A metadata file exists but unable to load model from checkpoint {}, exiting'.format(checkpoint_name), flush=True)
exit()
if not release:
try:
if not args.no_load_optim and optimizer is not None and load_optimizer_states:
optimizer.load_state_dict(sd['optimizer'])
if not args.no_load_lr_scheduler and lr_scheduler is not None:
lr_scheduler.load_state_dict(sd['lr_scheduler'])
except KeyError:
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
print('Unable to load optimizer from checkpoint {}, exiting. Specify --no-load-optim or --finetune to prevent attempting to load the optimizer state.'.format(checkpoint_name), flush=True)
else:
print('Unable to load optimizer from checkpoint {}, exiting. Specify --no-load-optim or --finetune to prevent attempting to load the optimizer state.'.format(checkpoint_name), flush=True)
exit()
if args.finetune or release:
iteration = 0
else:
try:
iteration = sd['iteration']
except KeyError:
try:
iteration = sd['total_iters']
except KeyError:
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
print('A metadata file exists but Unable to load iteration from checkpoint {}, exiting'.format(checkpoint_name), flush=True)
else:
print('A metadata file exists but Unable to load iteration from checkpoint {}, exiting'.format(checkpoint_name), flush=True)
exit()
if not release and (not args.finetune) and (not args.no_load_rng):
try:
random.setstate(sd['random_rng_state'])
np.random.set_state(sd['np_rng_state'])
torch.set_rng_state(sd['torch_rng_state'])
torch.cuda.set_rng_state(sd['cuda_rng_state'])
mpu.get_cuda_rng_tracker().set_states(sd['rng_tracker_states'])
except KeyError:
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
print('Unable to load optimizer from checkpoint {}, exiting. Specify --no-load-rng or --finetune to prevent attempting to load the random state.'.format(checkpoint_name), flush=True)
else:
print('Unable to load optimizer from checkpoint {}, exiting. Specify --no-load-rng or --finetune to prevent attempting to load the random state.'.format(checkpoint_name), flush=True)
exit()
if mpu.get_data_parallel_rank() == 0:
print(' successfully loaded {}'.format(checkpoint_name))
return iteration
|
Chinese-Transformer-XL
|
positive
|
def check_tasks_not_updated(service_name: str, prefix: str, old_task_ids: Iterable[str]) -> None:
sdk_plan.wait_for_completed_deployment(service_name)
sdk_plan.wait_for_completed_recovery(service_name)
<DeepExtract>
log.info('Waiting until [{}] is active'.format(service_name))
@retrying.retry(wait_fixed=1000, stop_max_delay=timeout_seconds * 1000, retry_on_result=lambda res: not res)
def _wait_for_active_framework() -> bool:
return len(list(filter(lambda fwk: fwk['name'] == service_name and fwk['active'], sdk_cmd.cluster_request('GET', '/mesos/frameworks').json()['frameworks']))) > 0
_wait_for_active_framework()
</DeepExtract>
<DeepExtract>
task_ids = [t.id for t in get_service_tasks(service_name, task_prefix=prefix)]
</DeepExtract>
task_sets = '\n- Old tasks: {}\n- Current tasks: {}'.format(sorted(old_task_ids), sorted(task_ids))
log.info('Checking tasks starting with "{}" have not been updated:{}'.format(prefix, task_sets))
assert set(old_task_ids).issubset(set(task_ids)), 'Tasks starting with "{}" were updated:{}'.format(prefix, task_sets)
|
def check_tasks_not_updated(service_name: str, prefix: str, old_task_ids: Iterable[str]) -> None:
sdk_plan.wait_for_completed_deployment(service_name)
sdk_plan.wait_for_completed_recovery(service_name)
log.info('Waiting until [{}] is active'.format(service_name))
@retrying.retry(wait_fixed=1000, stop_max_delay=timeout_seconds * 1000, retry_on_result=lambda res: not res)
def _wait_for_active_framework() -> bool:
return len(list(filter(lambda fwk: fwk['name'] == service_name and fwk['active'], sdk_cmd.cluster_request('GET', '/mesos/frameworks').json()['frameworks']))) > 0
_wait_for_active_framework()
task_ids = [t.id for t in get_service_tasks(service_name, task_prefix=prefix)]
task_sets = '\n- Old tasks: {}\n- Current tasks: {}'.format(sorted(old_task_ids), sorted(task_ids))
log.info('Checking tasks starting with "{}" have not been updated:{}'.format(prefix, task_sets))
assert set(old_task_ids).issubset(set(task_ids)), 'Tasks starting with "{}" were updated:{}'.format(prefix, task_sets)
|
dcos-kafka-service
|
positive
|
def _assert_invalid(self, testline: str, expect_err: str=''):
self.protocol.data_received(testline.encode('ascii'))
<DeepExtract>
loop.call_later(stop_after, protocol._handler_coroutine.cancel)
with suppress(asyncio.CancelledError):
loop.run_until_complete(protocol._handler_coroutine)
</DeepExtract>
handler: ProxyPeekerHandler = self.protocol.event_handler
assert not self.protocol.session.proxy_data.valid
assert not handler.called
assert self.transport.close.called
assert self.protocol.session.proxy_data.error == expect_err
|
def _assert_invalid(self, testline: str, expect_err: str=''):
self.protocol.data_received(testline.encode('ascii'))
loop.call_later(stop_after, protocol._handler_coroutine.cancel)
with suppress(asyncio.CancelledError):
loop.run_until_complete(protocol._handler_coroutine)
handler: ProxyPeekerHandler = self.protocol.event_handler
assert not self.protocol.session.proxy_data.valid
assert not handler.called
assert self.transport.close.called
assert self.protocol.session.proxy_data.error == expect_err
|
aiosmtpd
|
positive
|
def test_pid_file(self, unit):
""" support for the testsuite.py """
<DeepExtract>
conf = self.load_unit_conf(unit)
if conf is not None:
conf = conf
conf = self.default_unit_conf(unit)
</DeepExtract>
return self.pid_file_from(conf) or self.get_status_file_from(conf)
|
def test_pid_file(self, unit):
""" support for the testsuite.py """
conf = self.load_unit_conf(unit)
if conf is not None:
conf = conf
conf = self.default_unit_conf(unit)
return self.pid_file_from(conf) or self.get_status_file_from(conf)
|
deployment
|
positive
|
def update_list_html(self, files_list):
files_url = f'https://{settings.APP_HOST}{self.dataset.files_url}'
content = f"""<html><head><meta http-equiv="Refresh" content="0; url='{files_url}'" /></head></html>"""
temp_file = NamedTemporaryFile(delete=False, mode='w')
temp_file.write(content)
temp_file.close()
<DeepExtract>
print('\nUploading list HTML...', *args, **kwargs)
</DeepExtract>
dest_name = f'{self.dataset.slug}/{settings.MINIO_DATASET_TABLES_FILES_LIST_FILENAME}'
progress = MinioProgress()
self.minio.fput_object(self.bucket, dest_name, temp_file.name, progress=progress, content_type='text/html; charset=utf-8')
os.remove(temp_file.name)
return f'{settings.AWS_S3_ENDPOINT_URL}{self.bucket}/{dest_name}'
|
def update_list_html(self, files_list):
files_url = f'https://{settings.APP_HOST}{self.dataset.files_url}'
content = f"""<html><head><meta http-equiv="Refresh" content="0; url='{files_url}'" /></head></html>"""
temp_file = NamedTemporaryFile(delete=False, mode='w')
temp_file.write(content)
temp_file.close()
print('\nUploading list HTML...', *args, **kwargs)
dest_name = f'{self.dataset.slug}/{settings.MINIO_DATASET_TABLES_FILES_LIST_FILENAME}'
progress = MinioProgress()
self.minio.fput_object(self.bucket, dest_name, temp_file.name, progress=progress, content_type='text/html; charset=utf-8')
os.remove(temp_file.name)
return f'{settings.AWS_S3_ENDPOINT_URL}{self.bucket}/{dest_name}'
|
brasil.io
|
positive
|
def visit_bullet_list(self, node):
if 'sphx-glr-horizontal' in node.get('classes', []):
<DeepExtract>
if self.v2:
self.body.append(self._start_tag(node, 'ac:layout'))
self.context.append(self._end_tag(node))
self.body.append(self._start_tag(node, 'ac:layout-section', **{'ac:type': 'three_equal', 'ac:breakout-mode': 'default'}))
self.context.append(self._end_tag(node))
self._hlist_columns_left = 3
else:
self.body.append(self._start_tag(node, 'table', suffix=self.nl))
self.context.append(self._end_tag(node))
self.body.append(self._start_tag(node, 'tbody', suffix=self.nl, **{'style': 'border: none'}))
self.context.append(self._end_tag(node))
self.body.append(self._start_tag(node, 'tr', suffix=self.nl))
self.context.append(self._end_tag(node))
</DeepExtract>
self._list_context.append('sphx-glr-horizontal')
return
attribs = {}
<DeepExtract>
has_complex = False
for child in node.children:
if len(child.children) > 2 or (len(child.children) == 2 and (not isinstance(child.children[1], (nodes.bullet_list, nodes.enumerated_list)))):
has_complex = True
break
if has_complex:
for child in node.children:
child.__confluence_list_item_margin = True
if isinstance(node.parent, nodes.list_item):
try:
if node.parent.__confluence_list_item_margin:
attribs['style'] = f'margin-top: {FCMMO}px;'
except AttributeError:
pass
</DeepExtract>
self.body.append(self._start_tag(node, 'ul', suffix=self.nl, **attribs))
self.context.append(self._end_tag(node))
self._list_context.append('')
|
def visit_bullet_list(self, node):
if 'sphx-glr-horizontal' in node.get('classes', []):
if self.v2:
self.body.append(self._start_tag(node, 'ac:layout'))
self.context.append(self._end_tag(node))
self.body.append(self._start_tag(node, 'ac:layout-section', **{'ac:type': 'three_equal', 'ac:breakout-mode': 'default'}))
self.context.append(self._end_tag(node))
self._hlist_columns_left = 3
else:
self.body.append(self._start_tag(node, 'table', suffix=self.nl))
self.context.append(self._end_tag(node))
self.body.append(self._start_tag(node, 'tbody', suffix=self.nl, **{'style': 'border: none'}))
self.context.append(self._end_tag(node))
self.body.append(self._start_tag(node, 'tr', suffix=self.nl))
self.context.append(self._end_tag(node))
self._list_context.append('sphx-glr-horizontal')
return
attribs = {}
has_complex = False
for child in node.children:
if len(child.children) > 2 or (len(child.children) == 2 and (not isinstance(child.children[1], (nodes.bullet_list, nodes.enumerated_list)))):
has_complex = True
break
if has_complex:
for child in node.children:
child.__confluence_list_item_margin = True
if isinstance(node.parent, nodes.list_item):
try:
if node.parent.__confluence_list_item_margin:
attribs['style'] = f'margin-top: {FCMMO}px;'
except AttributeError:
pass
self.body.append(self._start_tag(node, 'ul', suffix=self.nl, **attribs))
self.context.append(self._end_tag(node))
self._list_context.append('')
|
confluencebuilder
|
positive
|
@skipIf(django.VERSION >= (1, 8), 'IPAddressField is deprecated with Django >= 1.8')
def test_auto_ip_address(self):
<DeepExtract>
class Form(forms.ModelForm):
class Meta:
model = AllFields
fields = ('ip_address',)
form_obj = Form
</DeepExtract>
<DeepExtract>
self.assertIsInstance(form_obj.base_fields['ip_address'], forms.IPAddressField)
</DeepExtract>
|
@skipIf(django.VERSION >= (1, 8), 'IPAddressField is deprecated with Django >= 1.8')
def test_auto_ip_address(self):
class Form(forms.ModelForm):
class Meta:
model = AllFields
fields = ('ip_address',)
form_obj = Form
self.assertIsInstance(form_obj.base_fields['ip_address'], forms.IPAddressField)
</DeepExtract>
|
django-floppyforms
|
positive
|
def test_pure_offline():
@deal.pure
def func(do):
if do:
http = urllib3.PoolManager()
http.request('GET', 'http://httpbin.org/robots.txt')
<DeepExtract>
if False:
print(False)
</DeepExtract>
with pytest.raises(deal.OfflineContractError):
<DeepExtract>
if True:
print(True)
</DeepExtract>
|
def test_pure_offline():
@deal.pure
def func(do):
if do:
http = urllib3.PoolManager()
http.request('GET', 'http://httpbin.org/robots.txt')
if False:
print(False)
with pytest.raises(deal.OfflineContractError):
if True:
print(True)
</DeepExtract>
|
deal
|
positive
|
def _detect_time_without_format(time_list=None, original_list=None):
"""
Detects time in the following format
format: <prefix><Optional extra separator><hour><Optional separator><minutes>
where each part is in of one of the formats given against them
prefix: "by", "before", "after", "at", "on", "dot", "exactly", "exact"
hour: h, hh
minute: m, mm
extra separator: "-", space
separator: ":", ".", space
The meridiem is given assuming the time detected to be in within 12 hour span from the current timestamp.
For example,
If it is 5:33 AM at the moment of invoking this method, 6:20 would be assigned 'AM'
as 5:33 AM <= 6:20 AM < 5:33 PM and 4:30 would be assigned 'PM' as 5:33 AM <= 4:30 PM < 5:33 PM
Args:
time_list (list): Optional, list to store dictionaries of detected time entities
original_list (list): Optional, list to store corresponding substrings of given text which were detected as
time entities
Returns:
A tuple of two lists with first list containing the detected time entities and second list containing their
corresponding substrings in the given text.
"""
if time_list is None:
time_list = []
if original_list is None:
original_list = []
patterns = re.findall('\\b((?:by|before|after|at|dot|exactly|exact)[\\s-]*((0?[1-9]|1[0-2])[:.\\s]*([0-5][0-9])?)\\s*({timezone})?)\\s'.format(timezone=self.timezone_choices), self.processed_text.lower())
for pattern in patterns:
original = pattern[0].strip()
t1 = pattern[2]
t2 = 0
tz = pattern[4] or None
if tz:
<DeepExtract>
timezone_code = self.timezones_map[tz].value
data_directory_path = os.path.join(os.path.dirname(os.path.abspath(__file__)).rstrip(os.sep), LANGUAGE_DATA_DIRECTORY)
timezone_data_path = os.path.join(data_directory_path, TIMEZONES_CONSTANT_FILE)
columns = [TIMEZONES_CODE_COLUMN_NAME, TIMEZONES_ALL_REGIONS_COLUMN_NAME]
if os.path.exists(timezone_data_path):
timezones_df = pd.read_csv(timezone_data_path, usecols=columns, index_col=TIMEZONES_CODE_COLUMN_NAME, encoding='utf-8')
if re.search(self.timezone.zone, timezones_df.loc[timezone_code][TIMEZONES_ALL_REGIONS_COLUMN_NAME]):
tz = self.timezone.zone
else:
tz = self.timezones_map[tz].preferred
tz = self.timezone.zone
</DeepExtract>
if pattern[3]:
t2 = pattern[3]
<DeepExtract>
if tz is not None:
new_timezone = get_timezone(tz)
else:
new_timezone = self.timezone or pytz.timezone('UTC')
current_datetime = datetime.datetime.now(new_timezone)
current_hour = current_datetime.hour
current_min = current_datetime.minute
if int(t1) == 0 or int(t1) >= TWELVE_HOUR:
meridiem = 'hrs'
if current_hour >= TWELVE_HOUR:
current_hour -= 12
if current_hour < int(t1) or (current_hour == int(t1) and current_min < int(t2)):
meridiem = PM_MERIDIEM
elif current_hour > int(t1):
meridiem = PM_MERIDIEM
elif current_hour == int(t1) and current_min > int(t2):
meridiem = PM_MERIDIEM
meridiem = AM_MERIDIEM
</DeepExtract>
time = {'hh': int(t1), 'mm': int(t2), 'nn': meridiem, 'tz': tz or (None if not self.timezone else self.timezone.zone)}
time_list.append(time)
original_list.append(original)
return (time_list, original_list)
|
def _detect_time_without_format(time_list=None, original_list=None):
"""
Detects time in the following format
format: <prefix><Optional extra separator><hour><Optional separator><minutes>
where each part is in of one of the formats given against them
prefix: "by", "before", "after", "at", "on", "dot", "exactly", "exact"
hour: h, hh
minute: m, mm
extra separator: "-", space
separator: ":", ".", space
The meridiem is given assuming the time detected to be in within 12 hour span from the current timestamp.
For example,
If it is 5:33 AM at the moment of invoking this method, 6:20 would be assigned 'AM'
as 5:33 AM <= 6:20 AM < 5:33 PM and 4:30 would be assigned 'PM' as 5:33 AM <= 4:30 PM < 5:33 PM
Args:
time_list (list): Optional, list to store dictionaries of detected time entities
original_list (list): Optional, list to store corresponding substrings of given text which were detected as
time entities
Returns:
A tuple of two lists with first list containing the detected time entities and second list containing their
corresponding substrings in the given text.
"""
if time_list is None:
time_list = []
if original_list is None:
original_list = []
patterns = re.findall('\\b((?:by|before|after|at|dot|exactly|exact)[\\s-]*((0?[1-9]|1[0-2])[:.\\s]*([0-5][0-9])?)\\s*({timezone})?)\\s'.format(timezone=self.timezone_choices), self.processed_text.lower())
for pattern in patterns:
original = pattern[0].strip()
t1 = pattern[2]
t2 = 0
tz = pattern[4] or None
if tz:
timezone_code = self.timezones_map[tz].value
data_directory_path = os.path.join(os.path.dirname(os.path.abspath(__file__)).rstrip(os.sep), LANGUAGE_DATA_DIRECTORY)
timezone_data_path = os.path.join(data_directory_path, TIMEZONES_CONSTANT_FILE)
columns = [TIMEZONES_CODE_COLUMN_NAME, TIMEZONES_ALL_REGIONS_COLUMN_NAME]
if os.path.exists(timezone_data_path):
timezones_df = pd.read_csv(timezone_data_path, usecols=columns, index_col=TIMEZONES_CODE_COLUMN_NAME, encoding='utf-8')
if re.search(self.timezone.zone, timezones_df.loc[timezone_code][TIMEZONES_ALL_REGIONS_COLUMN_NAME]):
tz = self.timezone.zone
else:
tz = self.timezones_map[tz].preferred
tz = self.timezone.zone
if pattern[3]:
t2 = pattern[3]
if tz is not None:
new_timezone = get_timezone(tz)
else:
new_timezone = self.timezone or pytz.timezone('UTC')
current_datetime = datetime.datetime.now(new_timezone)
current_hour = current_datetime.hour
current_min = current_datetime.minute
if int(t1) == 0 or int(t1) >= TWELVE_HOUR:
meridiem = 'hrs'
if current_hour >= TWELVE_HOUR:
current_hour -= 12
if current_hour < int(t1) or (current_hour == int(t1) and current_min < int(t2)):
meridiem = PM_MERIDIEM
elif current_hour > int(t1):
meridiem = PM_MERIDIEM
elif current_hour == int(t1) and current_min > int(t2):
meridiem = PM_MERIDIEM
meridiem = AM_MERIDIEM
time = {'hh': int(t1), 'mm': int(t2), 'nn': meridiem, 'tz': tz or (None if not self.timezone else self.timezone.zone)}
time_list.append(time)
original_list.append(original)
return (time_list, original_list)
|
chatbot_ner
|
positive
|
def split(input_pdf, output_path, split_options):
pdf = PdfFileReader(input_pdf)
pdf_writer = PdfFileWriter()
if ',' in split_options:
pages = [page for page in split_options.split(',') if page]
for page in pages:
pdf_writer.addPage(pdf.getPage(int(page)))
elif '-' in split_options:
(page_begin, page_end) = split_options.split('-')
page_begin = int(page_begin)
page_end = int(page_end)
<DeepExtract>
if page_begin < 0 or page_begin == 1:
page_begin = 0
if page_begin > 1:
page_begin -= 1
page_begin = page_begin
</DeepExtract>
for page in range(page_begin, page_end):
pdf_writer.addPage(pdf.getPage(page))
else:
page_begin = int(split_options)
<DeepExtract>
if page_begin < 0 or page_begin == 1:
page_begin = 0
if page_begin > 1:
page_begin -= 1
page_begin = page_begin
</DeepExtract>
pdf_writer.addPage(pdf.getPage(page_begin))
with open(output_path, 'wb') as out:
pdf_writer.write(out)
message = f'PDF split successfully to {output_path}'
<DeepExtract>
with wx.MessageDialog(None, message=message, caption='Split Finished', style=wx.ICON_INFORMATION) as dlg:
dlg.ShowModal()
</DeepExtract>
|
def split(input_pdf, output_path, split_options):
pdf = PdfFileReader(input_pdf)
pdf_writer = PdfFileWriter()
if ',' in split_options:
pages = [page for page in split_options.split(',') if page]
for page in pages:
pdf_writer.addPage(pdf.getPage(int(page)))
elif '-' in split_options:
(page_begin, page_end) = split_options.split('-')
page_begin = int(page_begin)
page_end = int(page_end)
if page_begin < 0 or page_begin == 1:
page_begin = 0
if page_begin > 1:
page_begin -= 1
page_begin = page_begin
for page in range(page_begin, page_end):
pdf_writer.addPage(pdf.getPage(page))
else:
page_begin = int(split_options)
if page_begin < 0 or page_begin == 1:
page_begin = 0
if page_begin > 1:
page_begin -= 1
page_begin = page_begin
pdf_writer.addPage(pdf.getPage(page_begin))
with open(output_path, 'wb') as out:
pdf_writer.write(out)
message = f'PDF split successfully to {output_path}'
with wx.MessageDialog(None, message=message, caption='Split Finished', style=wx.ICON_INFORMATION) as dlg:
dlg.ShowModal()
</DeepExtract>
|
applications_with_wxpython
|
positive
|
def __init__(self, num_layers, hidden_size, num_attention_heads, attention_dropout_prob, output_dropout_prob, checkpoint_activations, checkpoint_num_layers=1, layernorm_epsilon=1e-05, init_method_std=0.02, use_scaled_init_for_output_weights=True):
super(GPT2ParallelTransformer, self).__init__()
self.checkpoint_activations = checkpoint_activations
self.checkpoint_num_layers = checkpoint_num_layers
output_layer_init_method = None
if use_scaled_init_for_output_weights:
<DeepExtract>
std = init_method_std / math.sqrt(2.0 * num_layers)
def init_(tensor):
output_layer_init_method = torch.nn.init.normal_(tensor, mean=0.0, std=std)
output_layer_init_method = init_
</DeepExtract>
def get_layer():
return GPT2ParallelTransformerLayer(hidden_size, num_attention_heads, attention_dropout_prob, output_dropout_prob, layernorm_epsilon, unscaled_init_method(init_method_std), output_layer_init_method=output_layer_init_method)
self.layers = torch.nn.ModuleList([get_layer() for _ in range(num_layers)])
self.final_layernorm = LayerNorm(hidden_size, eps=layernorm_epsilon)
if deepspeed.checkpointing.is_configured():
global get_cuda_rng_tracker, checkpoint
get_cuda_rng_tracker = deepspeed.checkpointing.get_cuda_rng_tracker
checkpoint = deepspeed.checkpointing.checkpoint
|
def __init__(self, num_layers, hidden_size, num_attention_heads, attention_dropout_prob, output_dropout_prob, checkpoint_activations, checkpoint_num_layers=1, layernorm_epsilon=1e-05, init_method_std=0.02, use_scaled_init_for_output_weights=True):
super(GPT2ParallelTransformer, self).__init__()
self.checkpoint_activations = checkpoint_activations
self.checkpoint_num_layers = checkpoint_num_layers
output_layer_init_method = None
if use_scaled_init_for_output_weights:
std = init_method_std / math.sqrt(2.0 * num_layers)
def init_(tensor):
output_layer_init_method = torch.nn.init.normal_(tensor, mean=0.0, std=std)
output_layer_init_method = init_
def get_layer():
return GPT2ParallelTransformerLayer(hidden_size, num_attention_heads, attention_dropout_prob, output_dropout_prob, layernorm_epsilon, unscaled_init_method(init_method_std), output_layer_init_method=output_layer_init_method)
self.layers = torch.nn.ModuleList([get_layer() for _ in range(num_layers)])
self.final_layernorm = LayerNorm(hidden_size, eps=layernorm_epsilon)
if deepspeed.checkpointing.is_configured():
global get_cuda_rng_tracker, checkpoint
get_cuda_rng_tracker = deepspeed.checkpointing.get_cuda_rng_tracker
checkpoint = deepspeed.checkpointing.checkpoint
|
CPM-1-Generate
|
positive
|
def __init__(self, model_dir, gpus):
self.model_dir = model_dir
self.gpus = gpus
optimizer = Adam(0.0001, 0.5)
<DeepExtract>
def conv_block_d(input_tensor, f, use_instance_norm=True):
x = input_tensor
x = Conv2D(f, kernel_size=4, strides=2, kernel_initializer=conv_init, use_bias=False, padding='same')(x)
if use_instance_norm:
x = inst_norm()(x)
x = LeakyReLU(alpha=0.2)(x)
(self.netDA, self.netDB) = x
def Discriminator(nc_in, input_size=128):
inp = Input(shape=(input_size, input_size, nc_in))
x = conv_block_d(inp, 64, False)
x = conv_block_d(x, 128, True)
x = conv_block_d(x, 256, True)
x = conv_block_d(x, 512, True)
out = Conv2D(1, kernel_size=4, kernel_initializer=conv_init, use_bias=False, padding='same', activation='sigmoid')(x)
(self.netDA, self.netDB) = Model(inputs=[inp], outputs=out)
netDA = Discriminator(self.nc_D_inp)
netDB = Discriminator(self.nc_D_inp)
try:
netDA.load_weights(str(self.model_dir / hdf['netDAH5']))
netDB.load_weights(str(self.model_dir / hdf['netDBH5']))
print('Discriminator models loaded.')
except:
print('Discriminator weights files not found.')
pass
(self.netDA, self.netDB) = (netDA, netDB)
</DeepExtract>
<DeepExtract>
def conv_block(input_tensor, f, use_instance_norm=True):
x = input_tensor
x = SeparableConv2D(f, kernel_size=3, strides=2, kernel_initializer=conv_init, use_bias=False, padding='same')(x)
if use_instance_norm:
x = inst_norm()(x)
x = Activation('relu')(x)
(self.netGA, self.netGB) = x
def res_block(input_tensor, f, dilation=1):
x = input_tensor
x = Conv2D(f, kernel_size=3, kernel_initializer=conv_init, use_bias=False, padding='same', dilation_rate=dilation)(x)
x = LeakyReLU(alpha=0.2)(x)
x = Conv2D(f, kernel_size=3, kernel_initializer=conv_init, use_bias=False, padding='same', dilation_rate=dilation)(x)
x = add([x, input_tensor])
(self.netGA, self.netGB) = x
def upscale_ps(filters, use_instance_norm=True):
def block(x, use_instance_norm=use_instance_norm):
x = Conv2D(filters * 4, kernel_size=3, use_bias=False, kernel_initializer=RandomNormal(0, 0.02), padding='same')(x)
if use_instance_norm:
x = inst_norm()(x)
x = LeakyReLU(0.1)(x)
x = PixelShuffler()(x)
(self.netGA, self.netGB) = x
(self.netGA, self.netGB) = block
def Encoder(nc_in=3, input_size=128):
inp = Input(shape=(input_size, input_size, nc_in))
x = Conv2D(32, kernel_size=5, kernel_initializer=conv_init, use_bias=False, padding='same')(inp)
x = conv_block(x, 64, use_instance_norm=False)
x = conv_block(x, 128)
x = conv_block(x, 256)
x = conv_block(x, 512)
x = conv_block(x, 1024)
x = Dense(1024)(Flatten()(x))
x = Dense(4 * 4 * 1024)(x)
x = Reshape((4, 4, 1024))(x)
out = upscale_ps(512)(x)
(self.netGA, self.netGB) = Model(inputs=inp, outputs=out)
def Decoder_ps(nc_in=512, input_size=8):
input_ = Input(shape=(input_size, input_size, nc_in))
x = input_
x = upscale_ps(256)(x)
x = upscale_ps(128)(x)
x = upscale_ps(64)(x)
x = res_block(x, 64, dilation=2)
out64 = Conv2D(64, kernel_size=3, padding='same')(x)
out64 = LeakyReLU(alpha=0.1)(out64)
out64 = Conv2D(3, kernel_size=5, padding='same', activation='tanh')(out64)
x = upscale_ps(32)(x)
x = res_block(x, 32)
x = res_block(x, 32)
alpha = Conv2D(1, kernel_size=5, padding='same', activation='sigmoid')(x)
rgb = Conv2D(3, kernel_size=5, padding='same', activation='tanh')(x)
out = concatenate([alpha, rgb])
(self.netGA, self.netGB) = Model(input_, [out, out64])
encoder = Encoder()
decoder_A = Decoder_ps()
decoder_B = Decoder_ps()
x = Input(shape=self.img_shape)
netGA = Model(x, decoder_A(encoder(x)))
netGB = Model(x, decoder_B(encoder(x)))
netGA.output_names = ['netGA_out_1', 'netGA_out_2']
netGB.output_names = ['netGB_out_1', 'netGB_out_2']
self.netGA_sm = netGA
self.netGB_sm = netGB
try:
netGA.load_weights(str(self.model_dir / hdf['netGAH5']))
netGB.load_weights(str(self.model_dir / hdf['netGBH5']))
print('Generator models loaded.')
except:
print('Generator weights files not found.')
pass
if self.gpus > 1:
netGA = multi_gpu_model(self.netGA_sm, self.gpus)
netGB = multi_gpu_model(self.netGB_sm, self.gpus)
(self.netGA, self.netGB) = (netGA, netGB)
</DeepExtract>
|
def __init__(self, model_dir, gpus):
self.model_dir = model_dir
self.gpus = gpus
optimizer = Adam(0.0001, 0.5)
def conv_block_d(input_tensor, f, use_instance_norm=True):
x = input_tensor
x = Conv2D(f, kernel_size=4, strides=2, kernel_initializer=conv_init, use_bias=False, padding='same')(x)
if use_instance_norm:
x = inst_norm()(x)
x = LeakyReLU(alpha=0.2)(x)
(self.netDA, self.netDB) = x
def Discriminator(nc_in, input_size=128):
inp = Input(shape=(input_size, input_size, nc_in))
x = conv_block_d(inp, 64, False)
x = conv_block_d(x, 128, True)
x = conv_block_d(x, 256, True)
x = conv_block_d(x, 512, True)
out = Conv2D(1, kernel_size=4, kernel_initializer=conv_init, use_bias=False, padding='same', activation='sigmoid')(x)
(self.netDA, self.netDB) = Model(inputs=[inp], outputs=out)
netDA = Discriminator(self.nc_D_inp)
netDB = Discriminator(self.nc_D_inp)
try:
netDA.load_weights(str(self.model_dir / hdf['netDAH5']))
netDB.load_weights(str(self.model_dir / hdf['netDBH5']))
print('Discriminator models loaded.')
except:
print('Discriminator weights files not found.')
pass
(self.netDA, self.netDB) = (netDA, netDB)
def conv_block(input_tensor, f, use_instance_norm=True):
x = input_tensor
x = SeparableConv2D(f, kernel_size=3, strides=2, kernel_initializer=conv_init, use_bias=False, padding='same')(x)
if use_instance_norm:
x = inst_norm()(x)
x = Activation('relu')(x)
(self.netGA, self.netGB) = x
def res_block(input_tensor, f, dilation=1):
x = input_tensor
x = Conv2D(f, kernel_size=3, kernel_initializer=conv_init, use_bias=False, padding='same', dilation_rate=dilation)(x)
x = LeakyReLU(alpha=0.2)(x)
x = Conv2D(f, kernel_size=3, kernel_initializer=conv_init, use_bias=False, padding='same', dilation_rate=dilation)(x)
x = add([x, input_tensor])
(self.netGA, self.netGB) = x
def upscale_ps(filters, use_instance_norm=True):
def block(x, use_instance_norm=use_instance_norm):
x = Conv2D(filters * 4, kernel_size=3, use_bias=False, kernel_initializer=RandomNormal(0, 0.02), padding='same')(x)
if use_instance_norm:
x = inst_norm()(x)
x = LeakyReLU(0.1)(x)
x = PixelShuffler()(x)
(self.netGA, self.netGB) = x
(self.netGA, self.netGB) = block
def Encoder(nc_in=3, input_size=128):
inp = Input(shape=(input_size, input_size, nc_in))
x = Conv2D(32, kernel_size=5, kernel_initializer=conv_init, use_bias=False, padding='same')(inp)
x = conv_block(x, 64, use_instance_norm=False)
x = conv_block(x, 128)
x = conv_block(x, 256)
x = conv_block(x, 512)
x = conv_block(x, 1024)
x = Dense(1024)(Flatten()(x))
x = Dense(4 * 4 * 1024)(x)
x = Reshape((4, 4, 1024))(x)
out = upscale_ps(512)(x)
(self.netGA, self.netGB) = Model(inputs=inp, outputs=out)
def Decoder_ps(nc_in=512, input_size=8):
input_ = Input(shape=(input_size, input_size, nc_in))
x = input_
x = upscale_ps(256)(x)
x = upscale_ps(128)(x)
x = upscale_ps(64)(x)
x = res_block(x, 64, dilation=2)
out64 = Conv2D(64, kernel_size=3, padding='same')(x)
out64 = LeakyReLU(alpha=0.1)(out64)
out64 = Conv2D(3, kernel_size=5, padding='same', activation='tanh')(out64)
x = upscale_ps(32)(x)
x = res_block(x, 32)
x = res_block(x, 32)
alpha = Conv2D(1, kernel_size=5, padding='same', activation='sigmoid')(x)
rgb = Conv2D(3, kernel_size=5, padding='same', activation='tanh')(x)
out = concatenate([alpha, rgb])
(self.netGA, self.netGB) = Model(input_, [out, out64])
encoder = Encoder()
decoder_A = Decoder_ps()
decoder_B = Decoder_ps()
x = Input(shape=self.img_shape)
netGA = Model(x, decoder_A(encoder(x)))
netGB = Model(x, decoder_B(encoder(x)))
netGA.output_names = ['netGA_out_1', 'netGA_out_2']
netGB.output_names = ['netGB_out_1', 'netGB_out_2']
self.netGA_sm = netGA
self.netGB_sm = netGB
try:
netGA.load_weights(str(self.model_dir / hdf['netGAH5']))
netGB.load_weights(str(self.model_dir / hdf['netGBH5']))
print('Generator models loaded.')
except:
print('Generator weights files not found.')
pass
if self.gpus > 1:
netGA = multi_gpu_model(self.netGA_sm, self.gpus)
netGB = multi_gpu_model(self.netGB_sm, self.gpus)
(self.netGA, self.netGB) = (netGA, netGB)
</DeepExtract>
|
DeepFakeTutorial
|
positive
|
def _prepare_data(self, X, a):
"""
Extract the relevant parts for outcome model and weight model for the entire data matrix
Args:
X (pd.DataFrame): Covariate matrix of size (num_subjects, num_features).
a (pd.Series): Treatment assignment of size (num_subjects,).
Returns:
(pd.DataFrame, pd.DataFrame): X_outcome, X_weight
Data matrix for outcome model and data matrix weight model
"""
<DeepExtract>
outcome_covariates = self.outcome_covariates or X.columns
X_outcome = X[outcome_covariates]
X_outcome = X_outcome
</DeepExtract>
<DeepExtract>
weight_covariates = self.weight_covariates or X.columns
X_weight = X[weight_covariates]
X_weight = X_weight
</DeepExtract>
return (X_outcome, X_weight)
|
def _prepare_data(self, X, a):
"""
Extract the relevant parts for outcome model and weight model for the entire data matrix
Args:
X (pd.DataFrame): Covariate matrix of size (num_subjects, num_features).
a (pd.Series): Treatment assignment of size (num_subjects,).
Returns:
(pd.DataFrame, pd.DataFrame): X_outcome, X_weight
Data matrix for outcome model and data matrix weight model
"""
outcome_covariates = self.outcome_covariates or X.columns
X_outcome = X[outcome_covariates]
X_outcome = X_outcome
weight_covariates = self.weight_covariates or X.columns
X_weight = X[weight_covariates]
X_weight = X_weight
return (X_outcome, X_weight)
|
causallib
|
positive
|
def test_book_retrieve_cache_miss(self):
"""Test idempotent retrieve using 'If-None-Match' HTTP header, should result in HTTP 200."""
book_response = self.client.get(reverse('book-detail', kwargs={'pk': self.book.id}), CONTENT_TYPE='application/json')
self.assertEqual(book_response.status_code, status.HTTP_200_OK)
etag = book_response['ETag']
<DeepExtract>
self.book.issn = issn
self.book.save()
return self.book
</DeepExtract>
book_response = self.client.get(reverse('book-detail', kwargs={'pk': self.book.id}), CONTENT_TYPE='application/json', HTTP_IF_NONE_MATCH=etag)
self.assertEqual(book_response.status_code, status.HTTP_200_OK, 'The response status code must be 200!')
self.assertNotEqual(book_response['ETag'], etag)
|
def test_book_retrieve_cache_miss(self):
"""Test idempotent retrieve using 'If-None-Match' HTTP header, should result in HTTP 200."""
book_response = self.client.get(reverse('book-detail', kwargs={'pk': self.book.id}), CONTENT_TYPE='application/json')
self.assertEqual(book_response.status_code, status.HTTP_200_OK)
etag = book_response['ETag']
self.book.issn = issn
self.book.save()
return self.book
book_response = self.client.get(reverse('book-detail', kwargs={'pk': self.book.id}), CONTENT_TYPE='application/json', HTTP_IF_NONE_MATCH=etag)
self.assertEqual(book_response.status_code, status.HTTP_200_OK, 'The response status code must be 200!')
self.assertNotEqual(book_response['ETag'], etag)
|
drf-extensions
|
positive
|
def interpolate(input, size=None, scale_factor=None, mode='nearest', align_corners=None):
if input.numel() > 0:
return torch.nn.functional.interpolate(input, size, scale_factor, mode, align_corners)
def _check_size_scale_factor(dim):
if size is None and scale_factor is None:
raise ValueError('either size or scale_factor should be defined')
if size is not None and scale_factor is not None:
raise ValueError('only one of size or scale_factor should be defined')
if scale_factor is not None and isinstance(scale_factor, tuple) and (len(scale_factor) != dim):
raise ValueError('scale_factor shape must match input shape. Input is {}D, scale_factor size is {}'.format(dim, len(scale_factor)))
def _output_size(dim):
<DeepExtract>
if size is None and scale_factor is None:
raise ValueError('either size or scale_factor should be defined')
if size is not None and scale_factor is not None:
raise ValueError('only one of size or scale_factor should be defined')
if scale_factor is not None and isinstance(scale_factor, tuple) and (len(scale_factor) != dim):
raise ValueError('scale_factor shape must match input shape. Input is {}D, scale_factor size is {}'.format(dim, len(scale_factor)))
</DeepExtract>
if size is not None:
return size
scale_factors = _ntuple(dim)(scale_factor)
return [int(math.floor(input.size(i + 2) * scale_factors[i])) for i in range(dim)]
output_shape = tuple(_output_size(2))
output_shape = input.shape[:-2] + output_shape
return _NewEmptyTensorOp.apply(input, output_shape)
|
def interpolate(input, size=None, scale_factor=None, mode='nearest', align_corners=None):
if input.numel() > 0:
return torch.nn.functional.interpolate(input, size, scale_factor, mode, align_corners)
def _check_size_scale_factor(dim):
if size is None and scale_factor is None:
raise ValueError('either size or scale_factor should be defined')
if size is not None and scale_factor is not None:
raise ValueError('only one of size or scale_factor should be defined')
if scale_factor is not None and isinstance(scale_factor, tuple) and (len(scale_factor) != dim):
raise ValueError('scale_factor shape must match input shape. Input is {}D, scale_factor size is {}'.format(dim, len(scale_factor)))
def _output_size(dim):
if size is None and scale_factor is None:
raise ValueError('either size or scale_factor should be defined')
if size is not None and scale_factor is not None:
raise ValueError('only one of size or scale_factor should be defined')
if scale_factor is not None and isinstance(scale_factor, tuple) and (len(scale_factor) != dim):
raise ValueError('scale_factor shape must match input shape. Input is {}D, scale_factor size is {}'.format(dim, len(scale_factor)))
if size is not None:
return size
scale_factors = _ntuple(dim)(scale_factor)
return [int(math.floor(input.size(i + 2) * scale_factors[i])) for i in range(dim)]
output_shape = tuple(_output_size(2))
output_shape = input.shape[:-2] + output_shape
return _NewEmptyTensorOp.apply(input, output_shape)
|
APNet
|
positive
|
def test_radii_list_to_ndarray(self):
<DeepExtract>
sections = np.asarray([NacaProfile(digits='0012') for i in range(10)])
radii = np.arange(0.4, 1.31, 0.1)
chord_lengths = np.concatenate((np.arange(0.55, 1.1, 0.15), np.arange(1.03, 0.9, -0.03), np.array([0.3])))
pitch = np.append(np.arange(3.0, 4.0, 0.2), np.arange(4.1, 3.2, -0.2))
rake = np.append(np.arange(0.005, 0.08, 0.01), np.arange(0.075, 0.02, -0.03))
skew_angles = np.append(np.arange(-4.0, -9.0, -3.0), np.arange(-7.0, 15.0, 3.0))
blade = bl.Blade(sections=sections, radii=radii, chord_lengths=chord_lengths, pitch=pitch, rake=rake, skew_angles=skew_angles)
</DeepExtract>
blade.radii = list(range(10))
blade._check_params()
self.assertIsInstance(blade.radii, np.ndarray)
|
def test_radii_list_to_ndarray(self):
sections = np.asarray([NacaProfile(digits='0012') for i in range(10)])
radii = np.arange(0.4, 1.31, 0.1)
chord_lengths = np.concatenate((np.arange(0.55, 1.1, 0.15), np.arange(1.03, 0.9, -0.03), np.array([0.3])))
pitch = np.append(np.arange(3.0, 4.0, 0.2), np.arange(4.1, 3.2, -0.2))
rake = np.append(np.arange(0.005, 0.08, 0.01), np.arange(0.075, 0.02, -0.03))
skew_angles = np.append(np.arange(-4.0, -9.0, -3.0), np.arange(-7.0, 15.0, 3.0))
blade = bl.Blade(sections=sections, radii=radii, chord_lengths=chord_lengths, pitch=pitch, rake=rake, skew_angles=skew_angles)
blade.radii = list(range(10))
blade._check_params()
self.assertIsInstance(blade.radii, np.ndarray)
|
BladeX
|
positive
|
def patch_version(self) -> Optional[PatchVersion]:
"""Return patch version or None if there is None
This method reads/writes version from/to cache.
"""
if self.solution.storage.url == RadsStorage.URL_PBE:
return PatchVersion('main')
cache = self.solution.storage.fspath(f'{self.path}/_patch_version')
if os.path.isfile(cache):
logger.debug(f'retrieving patch version for {self} from cache')
with open(cache) as f:
version = f.read().strip()
version = PatchVersion(version) if version else None
else:
<DeepExtract>
logger.debug(f'retrieving patch version for {self}')
retrievers = {'league_client_sln': ('league_client', 'system.yaml', get_system_yaml_version), 'lol_game_client_sln': ('lol_game_client', 'League of Legends.exe', get_exe_version)}
try:
(project_name, file_name, extractor) = retrievers[self.solution.name]
except KeyError:
raise RuntimeError(f'no known way to retrieve patch version for solution {self.solution.name}')
for pv in self.projects(False):
if pv.project.name == project_name:
break
else:
raise ValueError(f'{project_name} project not found for {self}')
try:
filepaths = pv.filepaths()
except requests.exceptions.HTTPError as e:
if e.response is not None and e.response.status_code == 404:
version = None
raise
path_suffix = f'/{file_name}'
for path in filepaths:
if path.endswith(path_suffix):
fspath = self.solution.storage.fspath(path)
if not os.path.isfile(path):
pv.extract([path])
break
else:
if pv.project.name == 'league_client' and pv.version <= RadsVersion('0.0.0.43'):
version = None
raise ValueError(f"'{file_name}' not found for {pv}")
version = extractor(fspath)
version = PatchVersion(version)
</DeepExtract>
if version is None:
logger.warning(f'failed to retrieve patch version for {self}')
else:
with open(cache, 'w') as f:
f.write(f'{version}\n')
return version
|
def patch_version(self) -> Optional[PatchVersion]:
"""Return patch version or None if there is None
This method reads/writes version from/to cache.
"""
if self.solution.storage.url == RadsStorage.URL_PBE:
return PatchVersion('main')
cache = self.solution.storage.fspath(f'{self.path}/_patch_version')
if os.path.isfile(cache):
logger.debug(f'retrieving patch version for {self} from cache')
with open(cache) as f:
version = f.read().strip()
version = PatchVersion(version) if version else None
else:
logger.debug(f'retrieving patch version for {self}')
retrievers = {'league_client_sln': ('league_client', 'system.yaml', get_system_yaml_version), 'lol_game_client_sln': ('lol_game_client', 'League of Legends.exe', get_exe_version)}
try:
(project_name, file_name, extractor) = retrievers[self.solution.name]
except KeyError:
raise RuntimeError(f'no known way to retrieve patch version for solution {self.solution.name}')
for pv in self.projects(False):
if pv.project.name == project_name:
break
else:
raise ValueError(f'{project_name} project not found for {self}')
try:
filepaths = pv.filepaths()
except requests.exceptions.HTTPError as e:
if e.response is not None and e.response.status_code == 404:
version = None
raise
path_suffix = f'/{file_name}'
for path in filepaths:
if path.endswith(path_suffix):
fspath = self.solution.storage.fspath(path)
if not os.path.isfile(path):
pv.extract([path])
break
else:
if pv.project.name == 'league_client' and pv.version <= RadsVersion('0.0.0.43'):
version = None
raise ValueError(f"'{file_name}' not found for {pv}")
version = extractor(fspath)
version = PatchVersion(version)
if version is None:
logger.warning(f'failed to retrieve patch version for {self}')
else:
with open(cache, 'w') as f:
f.write(f'{version}\n')
return version
|
CDTB
|
positive
|
@pytest.mark.integration
def test_update_all_events_in_database(session, factory, patched_meetup):
"""
GIVEN: table contains upcoming events, adapter has same events with updated details
WHEN: add_events_to_database is called
THEN: update event information in database
"""
group = factory.UpcomingEventsGroup()
num_events = 20
database_events = factory.Event.create_batch(size=num_events, group=group)
fetched_events = []
for event in database_events:
fetched_events.append(factory.EventDetails(id=event.remote_id, venue='TBD'))
<DeepExtract>
def _wrapper(events):
fake_meetup = FakeMeetupAdapter(fetched_events)
patcher(MODULE_TO_TEST, namespace='meetup', replacement=fake_meetup)
return _wrapper
</DeepExtract>
sync_database_with_fetched_events(group)
all_database_events = Event.query.all()
assert len(all_database_events) == num_events
for event in all_database_events:
assert event.venue == 'TBD'
|
@pytest.mark.integration
def test_update_all_events_in_database(session, factory, patched_meetup):
"""
GIVEN: table contains upcoming events, adapter has same events with updated details
WHEN: add_events_to_database is called
THEN: update event information in database
"""
group = factory.UpcomingEventsGroup()
num_events = 20
database_events = factory.Event.create_batch(size=num_events, group=group)
fetched_events = []
for event in database_events:
fetched_events.append(factory.EventDetails(id=event.remote_id, venue='TBD'))
def _wrapper(events):
fake_meetup = FakeMeetupAdapter(fetched_events)
patcher(MODULE_TO_TEST, namespace='meetup', replacement=fake_meetup)
return _wrapper
sync_database_with_fetched_events(group)
all_database_events = Event.query.all()
assert len(all_database_events) == num_events
for event in all_database_events:
assert event.venue == 'TBD'
|
busy-beaver
|
positive
|
def create_pid_file(tmpdir, pid):
<DeepExtract>
temp_file = tmpdir.join(watchdog.STUNNEL_PID_FILE)
temp_file.write(str(pid), ensure=True)
pid_file = temp_file
</DeepExtract>
return (pid_file.dirname, pid_file.basename, str(pid_file))
|
def create_pid_file(tmpdir, pid):
temp_file = tmpdir.join(watchdog.STUNNEL_PID_FILE)
temp_file.write(str(pid), ensure=True)
pid_file = temp_file
return (pid_file.dirname, pid_file.basename, str(pid_file))
|
efs-utils
|
positive
|
def test_worker_sends_final_results_after_last_chunk(self):
messages = [{'frontend': self.make_frontend_request('message 1', 'ONLINE', has_next=True)}, {'frontend': self.make_frontend_request('', 'ONLINE', has_next=False)}]
<DeepExtract>
self.poller.add_messages(messages)
self.worker.run()
</DeepExtract>
expected_message1 = createResultsMessage([(0, False, [(1.0, 'Interim result')])])
expected_message2 = createResultsMessage([(0, True, [(1.0, 'Hello World!')])])
<DeepExtract>
sent_messages = [parseResultsMessage(message) for message in self.poller.sent_messages['frontend']]
self.assertEquals([expected_message1, expected_message2], sent_messages)
</DeepExtract>
|
def test_worker_sends_final_results_after_last_chunk(self):
messages = [{'frontend': self.make_frontend_request('message 1', 'ONLINE', has_next=True)}, {'frontend': self.make_frontend_request('', 'ONLINE', has_next=False)}]
self.poller.add_messages(messages)
self.worker.run()
expected_message1 = createResultsMessage([(0, False, [(1.0, 'Interim result')])])
expected_message2 = createResultsMessage([(0, True, [(1.0, 'Hello World!')])])
sent_messages = [parseResultsMessage(message) for message in self.poller.sent_messages['frontend']]
self.assertEquals([expected_message1, expected_message2], sent_messages)
</DeepExtract>
|
cloud-asr
|
positive
|
def launch(self):
<DeepExtract>
pid = os.fork()
if pid:
log.debug('Forked worker PID is %d', pid)
self.stats['forks'] += 1
else:
os.setpgrp()
self.socket.close()
self.socket = None
exit_by_exception()
pid = pid
</DeepExtract>
if pid:
return pid
else:
from wsgiref.simple_server import make_server
from borgcube.web.wsgi import get_wsgi_application
from .utils import ThreadPoolWSGIServer
(host, port) = settings.BUILTIN_WEB.rsplit(':', maxsplit=1)
set_process_name('borgcubed [web process]')
log.info('Serving HTTP on http://%s:%s', host, port)
if settings.DEBUG:
log.warning('DEBUG mode is enabled. This is rather dangerous.')
if host not in ('127.0.0.1', 'localhost'):
log.error('DEBUG mode is not possible for non-local host %s', host)
sys.exit(1)
httpd = make_server(host, int(port), get_wsgi_application(), server_class=ThreadPoolWSGIServer)
try:
httpd.serve_forever()
finally:
httpd.join()
sys.exit(0)
|
def launch(self):
pid = os.fork()
if pid:
log.debug('Forked worker PID is %d', pid)
self.stats['forks'] += 1
else:
os.setpgrp()
self.socket.close()
self.socket = None
exit_by_exception()
pid = pid
if pid:
return pid
else:
from wsgiref.simple_server import make_server
from borgcube.web.wsgi import get_wsgi_application
from .utils import ThreadPoolWSGIServer
(host, port) = settings.BUILTIN_WEB.rsplit(':', maxsplit=1)
set_process_name('borgcubed [web process]')
log.info('Serving HTTP on http://%s:%s', host, port)
if settings.DEBUG:
log.warning('DEBUG mode is enabled. This is rather dangerous.')
if host not in ('127.0.0.1', 'localhost'):
log.error('DEBUG mode is not possible for non-local host %s', host)
sys.exit(1)
httpd = make_server(host, int(port), get_wsgi_application(), server_class=ThreadPoolWSGIServer)
try:
httpd.serve_forever()
finally:
httpd.join()
sys.exit(0)
|
borgcube
|
positive
|
def test_serialize_normalize(self):
from decimal import Decimal
val = Decimal('1.00')
node = DummySchemaNode(None)
<DeepExtract>
from colander import Invalid
exc = Invalid(node, msg, val)
typ = exc
</DeepExtract>
result = typ.serialize(node, val)
self.assertEqual(result, '1')
|
def test_serialize_normalize(self):
from decimal import Decimal
val = Decimal('1.00')
node = DummySchemaNode(None)
from colander import Invalid
exc = Invalid(node, msg, val)
typ = exc
result = typ.serialize(node, val)
self.assertEqual(result, '1')
|
colander
|
positive
|
def receive_submit_request(application_file_path):
"""
Handle the request to submit a task
"""
blob_client = config.blob_client
application = common.load_application(application_file_path)
<DeepExtract>
spark_home = os.environ['SPARK_HOME']
with open(os.path.join(spark_home, 'conf', 'master')) as f:
master_ip = f.read().rstrip()
files_path = os.environ['AZ_BATCH_TASK_WORKING_DIR']
jars = [os.path.join(files_path, os.path.basename(jar)) for jar in application.jars]
py_files = [os.path.join(files_path, os.path.basename(py_file)) for py_file in application.py_files]
files = [os.path.join(files_path, os.path.basename(f)) for f in application.files]
spark_submit_cmd = CommandBuilder('{0}/bin/spark-submit'.format(spark_home))
spark_submit_cmd.add_option('--master', 'spark://{0}:7077'.format(master_ip))
spark_submit_cmd.add_option('--name', application.name)
spark_submit_cmd.add_option('--class', application.main_class)
spark_submit_cmd.add_option('--jars', jars and ','.join(jars))
spark_submit_cmd.add_option('--py-files', py_files and ','.join(py_files))
spark_submit_cmd.add_option('--files', files and ','.join(files))
spark_submit_cmd.add_option('--driver-java-options', application.driver_java_options)
spark_submit_cmd.add_option('--driver-library-path', application.driver_library_path)
spark_submit_cmd.add_option('--driver-class-path', application.driver_class_path)
spark_submit_cmd.add_option('--driver-memory', application.driver_memory)
spark_submit_cmd.add_option('--executor-memory', application.executor_memory)
if application.driver_cores:
spark_submit_cmd.add_option('--driver-cores', str(application.driver_cores))
if application.executor_cores:
spark_submit_cmd.add_option('--executor-cores', str(application.executor_cores))
spark_submit_cmd.add_argument(os.path.expandvars(application.application) + ' ' + ' '.join(["'" + str(app_arg) + "'" for app_arg in application.application_args or []]))
with open('spark-submit.txt', mode='w', encoding='UTF-8') as stream:
stream.write(spark_submit_cmd.to_str())
cmd = spark_submit_cmd
</DeepExtract>
exit_code = -1
try:
exit_code = subprocess.call(cmd.to_str(), shell=True)
common.upload_log(blob_client, application)
except Exception as e:
common.upload_error_log(str(e), os.path.join(os.environ['AZ_BATCH_TASK_WORKING_DIR'], 'application.yaml'))
return exit_code
|
def receive_submit_request(application_file_path):
"""
Handle the request to submit a task
"""
blob_client = config.blob_client
application = common.load_application(application_file_path)
spark_home = os.environ['SPARK_HOME']
with open(os.path.join(spark_home, 'conf', 'master')) as f:
master_ip = f.read().rstrip()
files_path = os.environ['AZ_BATCH_TASK_WORKING_DIR']
jars = [os.path.join(files_path, os.path.basename(jar)) for jar in application.jars]
py_files = [os.path.join(files_path, os.path.basename(py_file)) for py_file in application.py_files]
files = [os.path.join(files_path, os.path.basename(f)) for f in application.files]
spark_submit_cmd = CommandBuilder('{0}/bin/spark-submit'.format(spark_home))
spark_submit_cmd.add_option('--master', 'spark://{0}:7077'.format(master_ip))
spark_submit_cmd.add_option('--name', application.name)
spark_submit_cmd.add_option('--class', application.main_class)
spark_submit_cmd.add_option('--jars', jars and ','.join(jars))
spark_submit_cmd.add_option('--py-files', py_files and ','.join(py_files))
spark_submit_cmd.add_option('--files', files and ','.join(files))
spark_submit_cmd.add_option('--driver-java-options', application.driver_java_options)
spark_submit_cmd.add_option('--driver-library-path', application.driver_library_path)
spark_submit_cmd.add_option('--driver-class-path', application.driver_class_path)
spark_submit_cmd.add_option('--driver-memory', application.driver_memory)
spark_submit_cmd.add_option('--executor-memory', application.executor_memory)
if application.driver_cores:
spark_submit_cmd.add_option('--driver-cores', str(application.driver_cores))
if application.executor_cores:
spark_submit_cmd.add_option('--executor-cores', str(application.executor_cores))
spark_submit_cmd.add_argument(os.path.expandvars(application.application) + ' ' + ' '.join(["'" + str(app_arg) + "'" for app_arg in application.application_args or []]))
with open('spark-submit.txt', mode='w', encoding='UTF-8') as stream:
stream.write(spark_submit_cmd.to_str())
cmd = spark_submit_cmd
exit_code = -1
try:
exit_code = subprocess.call(cmd.to_str(), shell=True)
common.upload_log(blob_client, application)
except Exception as e:
common.upload_error_log(str(e), os.path.join(os.environ['AZ_BATCH_TASK_WORKING_DIR'], 'application.yaml'))
return exit_code
|
aztk
|
positive
|
def tasklist_shad_cpp(request, course, seminar=None, group=None):
user = request.user
user_is_attended = False
user_is_attended_special_course = False
if seminar:
groups = seminar.groups.all().order_by('name')
else:
groups = course.groups.all().order_by('name')
course.can_edit = course.user_can_edit_course(user)
if course.can_be_chosen_by_extern:
course.groups.add(course.group_with_extern)
if group:
groups = [group]
group_x_student_x_task_takens = OrderedDict()
group_x_task_list = {}
group_x_max_score = {}
default_teacher = {}
show_hidden_tasks = request.session.get(str(request.user.id) + '_' + str(course.id) + '_show_hidden_tasks', False)
show_academ_users = request.session.get(str(request.user.id) + '_' + str(course.id) + '_show_academ_users', True)
academ_students = []
for group in groups:
student_x_task_x_task_takens = {}
tasks_for_groups = TaskGroupRelations.objects.filter(task__course=course, group=group, deleted=False, task__parent_task=seminar).exclude(task__type=Task.TYPE_MATERIAL).distinct().order_by('position').prefetch_related('task__groups').select_related('task')
if show_hidden_tasks:
group_x_task_list[group] = [x.task for x in tasks_for_groups]
else:
group_x_task_list[group] = [x.task for x in tasks_for_groups if not x.task.is_hidden]
group_x_max_score.setdefault(group, 0)
for task in group_x_task_list[group]:
if not task.is_hidden:
if task.type == task.TYPE_SEMINAR:
group_x_max_score[group] += sum([x.score_max for x in task.children.all()])
else:
group_x_max_score[group] += task.score_max
if task.task_text is None:
task.task_text = ''
issues_students_in_group = Issue.objects.filter(task__in=group_x_task_list[group], student__group__in=[group]).order_by('student').select_related('task').prefetch_related('task__groups', 'task')
issues_x_student = defaultdict(list)
for issue in issues_students_in_group.all():
student_id = issue.student.id
issues_x_student[student_id].append(issue)
students = group.students.filter(is_active=True)
not_active_students = UserProfile.objects.filter(Q(user__in=group.students.filter(is_active=True)) & (Q(user_status__tag='not_active') | Q(user_status__tag='academic')))
academ_students += [x.user for x in not_active_students]
if not show_academ_users:
students = set(students) - set(academ_students)
for student in students:
if user == student:
user_is_attended = True
user_is_attended_special_course = True
student_task_takens = issues_x_student[student.id]
task_x_task_taken = {}
student_summ_scores = 0
for task_taken in student_task_takens:
task_x_task_taken[task_taken.task.id] = task_taken
if not task_taken.task.is_hidden:
if task_taken.task.type == Task.TYPE_SEMINAR or task_taken.task.score_after_deadline or (not (not task_taken.task.score_after_deadline and task_taken.is_status_accepted_after_deadline())):
student_summ_scores += task_taken.mark
student_x_task_x_task_takens[student] = (task_x_task_taken, student_summ_scores)
group_x_student_x_task_takens[group] = student_x_task_x_task_takens
try:
default_teacher[group] = DefaultTeacher.objects.get(course=course, group=group).teacher
except DefaultTeacher.DoesNotExist:
default_teacher[group] = None
group_x_student_information = OrderedDict()
for (group, student_x_task_x_task_takens) in group_x_student_x_task_takens.iteritems():
group_x_student_information.setdefault(group, [])
for student in sorted(student_x_task_x_task_takens.keys(), key=lambda x: u'{0} {1}'.format(x.last_name, x.first_name)):
if user == student:
user_is_attended = True
elif not course.user_can_see_transcript(user, student):
continue
<DeepExtract>
mark_id = -1
course_mark = '--'
course_mark_int = -1
course_marks = course.mark_system
if course_marks and course_marks.marks:
if course_marks.marks.all()[0].name_int != -1:
course_mark_int = -10
try:
student_course_mark = StudentCourseMark.objects.get(course=course, student=student)
if student_course_mark.mark:
mark_id = student_course_mark.mark.id
course_mark = unicode(student_course_mark)
course_mark_int = student_course_mark.mark.name_int
except StudentCourseMark.DoesNotExist:
pass
(mark_id, course_mark, course_mark_int) = (mark_id, course_mark, course_mark_int)
</DeepExtract>
group_x_student_information[group].append((student, student_x_task_x_task_takens[student][0], student_x_task_x_task_takens[student][1], mark_id, course_mark, course_mark_int))
context = {'course': course, 'course_mark_system_vals': course.mark_system.marks.all() if course.mark_system else None, 'group_information': group_x_student_information, 'group_tasks': group_x_task_list, 'group_x_max_score': group_x_max_score, 'default_teacher': default_teacher, 'user': user, 'user_is_attended': user_is_attended, 'user_is_attended_special_course': user_is_attended_special_course, 'user_is_teacher': course.user_is_teacher(user), 'seminar': seminar, 'visible_queue': course.user_can_see_queue(user), 'visible_attendance_log': course.user_can_see_attendance_log(request.user), 'visible_hide_button': Task.objects.filter(Q(course=course) & Q(is_hidden=True)).exists(), 'show_hidden_tasks': show_hidden_tasks, 'visible_hide_button_users': len(academ_students), 'show_academ_users': show_academ_users}
return context
|
def tasklist_shad_cpp(request, course, seminar=None, group=None):
user = request.user
user_is_attended = False
user_is_attended_special_course = False
if seminar:
groups = seminar.groups.all().order_by('name')
else:
groups = course.groups.all().order_by('name')
course.can_edit = course.user_can_edit_course(user)
if course.can_be_chosen_by_extern:
course.groups.add(course.group_with_extern)
if group:
groups = [group]
group_x_student_x_task_takens = OrderedDict()
group_x_task_list = {}
group_x_max_score = {}
default_teacher = {}
show_hidden_tasks = request.session.get(str(request.user.id) + '_' + str(course.id) + '_show_hidden_tasks', False)
show_academ_users = request.session.get(str(request.user.id) + '_' + str(course.id) + '_show_academ_users', True)
academ_students = []
for group in groups:
student_x_task_x_task_takens = {}
tasks_for_groups = TaskGroupRelations.objects.filter(task__course=course, group=group, deleted=False, task__parent_task=seminar).exclude(task__type=Task.TYPE_MATERIAL).distinct().order_by('position').prefetch_related('task__groups').select_related('task')
if show_hidden_tasks:
group_x_task_list[group] = [x.task for x in tasks_for_groups]
else:
group_x_task_list[group] = [x.task for x in tasks_for_groups if not x.task.is_hidden]
group_x_max_score.setdefault(group, 0)
for task in group_x_task_list[group]:
if not task.is_hidden:
if task.type == task.TYPE_SEMINAR:
group_x_max_score[group] += sum([x.score_max for x in task.children.all()])
else:
group_x_max_score[group] += task.score_max
if task.task_text is None:
task.task_text = ''
issues_students_in_group = Issue.objects.filter(task__in=group_x_task_list[group], student__group__in=[group]).order_by('student').select_related('task').prefetch_related('task__groups', 'task')
issues_x_student = defaultdict(list)
for issue in issues_students_in_group.all():
student_id = issue.student.id
issues_x_student[student_id].append(issue)
students = group.students.filter(is_active=True)
not_active_students = UserProfile.objects.filter(Q(user__in=group.students.filter(is_active=True)) & (Q(user_status__tag='not_active') | Q(user_status__tag='academic')))
academ_students += [x.user for x in not_active_students]
if not show_academ_users:
students = set(students) - set(academ_students)
for student in students:
if user == student:
user_is_attended = True
user_is_attended_special_course = True
student_task_takens = issues_x_student[student.id]
task_x_task_taken = {}
student_summ_scores = 0
for task_taken in student_task_takens:
task_x_task_taken[task_taken.task.id] = task_taken
if not task_taken.task.is_hidden:
if task_taken.task.type == Task.TYPE_SEMINAR or task_taken.task.score_after_deadline or (not (not task_taken.task.score_after_deadline and task_taken.is_status_accepted_after_deadline())):
student_summ_scores += task_taken.mark
student_x_task_x_task_takens[student] = (task_x_task_taken, student_summ_scores)
group_x_student_x_task_takens[group] = student_x_task_x_task_takens
try:
default_teacher[group] = DefaultTeacher.objects.get(course=course, group=group).teacher
except DefaultTeacher.DoesNotExist:
default_teacher[group] = None
group_x_student_information = OrderedDict()
for (group, student_x_task_x_task_takens) in group_x_student_x_task_takens.iteritems():
group_x_student_information.setdefault(group, [])
for student in sorted(student_x_task_x_task_takens.keys(), key=lambda x: u'{0} {1}'.format(x.last_name, x.first_name)):
if user == student:
user_is_attended = True
elif not course.user_can_see_transcript(user, student):
continue
mark_id = -1
course_mark = '--'
course_mark_int = -1
course_marks = course.mark_system
if course_marks and course_marks.marks:
if course_marks.marks.all()[0].name_int != -1:
course_mark_int = -10
try:
student_course_mark = StudentCourseMark.objects.get(course=course, student=student)
if student_course_mark.mark:
mark_id = student_course_mark.mark.id
course_mark = unicode(student_course_mark)
course_mark_int = student_course_mark.mark.name_int
except StudentCourseMark.DoesNotExist:
pass
(mark_id, course_mark, course_mark_int) = (mark_id, course_mark, course_mark_int)
group_x_student_information[group].append((student, student_x_task_x_task_takens[student][0], student_x_task_x_task_takens[student][1], mark_id, course_mark, course_mark_int))
context = {'course': course, 'course_mark_system_vals': course.mark_system.marks.all() if course.mark_system else None, 'group_information': group_x_student_information, 'group_tasks': group_x_task_list, 'group_x_max_score': group_x_max_score, 'default_teacher': default_teacher, 'user': user, 'user_is_attended': user_is_attended, 'user_is_attended_special_course': user_is_attended_special_course, 'user_is_teacher': course.user_is_teacher(user), 'seminar': seminar, 'visible_queue': course.user_can_see_queue(user), 'visible_attendance_log': course.user_can_see_attendance_log(request.user), 'visible_hide_button': Task.objects.filter(Q(course=course) & Q(is_hidden=True)).exists(), 'show_hidden_tasks': show_hidden_tasks, 'visible_hide_button_users': len(academ_students), 'show_academ_users': show_academ_users}
return context
|
anytask
|
positive
|
def _select_entire_single_column(self, col_sel):
<DeepExtract>
if col_sel not in self._column_info:
raise KeyError(f'Column "{col_sel}" does not exist')
</DeepExtract>
<DeepExtract>
col_info = self._column_info[col_sel]
(dtype, loc) = (col_info.dtype, col_info.loc)
</DeepExtract>
new_data = {dtype: self._data[dtype][:, loc].reshape(-1, 1)}
new_columns = np.array([col_sel], dtype='O')
new_column_info = {col_sel: utils.Column(dtype, 0, 0)}
new_str_reverse_map = {}
if dtype == 'S':
new_str_reverse_map = {0: self._str_reverse_map[loc]}
return self._construct_from_new(new_data, new_column_info, new_columns, new_str_reverse_map)
|
def _select_entire_single_column(self, col_sel):
if col_sel not in self._column_info:
raise KeyError(f'Column "{col_sel}" does not exist')
col_info = self._column_info[col_sel]
(dtype, loc) = (col_info.dtype, col_info.loc)
new_data = {dtype: self._data[dtype][:, loc].reshape(-1, 1)}
new_columns = np.array([col_sel], dtype='O')
new_column_info = {col_sel: utils.Column(dtype, 0, 0)}
new_str_reverse_map = {}
if dtype == 'S':
new_str_reverse_map = {0: self._str_reverse_map[loc]}
return self._construct_from_new(new_data, new_column_info, new_columns, new_str_reverse_map)
|
dexplo
|
positive
|
def _diff(self, cluster_a, cluster_b):
for (name, account) in cluster_a.accounts.items():
if name == 'root':
continue
if name in cluster_b.accounts:
total = 0
for (uid, user) in account.users.items():
if uid == 'root':
continue
if uid not in cluster_b.accounts[name].users:
<DeepExtract>
if self._skip_user(uid, name):
return
if self.sync:
try:
slurm_remove_assoc(uid, cluster_a.name, name, noop=self.noop)
except SlurmError as e:
logger.error('Failed removing Slurm association user %s account %s cluster %s: %s', uid, name, cluster_a.name, e)
else:
logger.error('Removed Slurm association user %s account %s cluster %s successfully', uid, name, cluster_a.name)
row = [uid, name, cluster_a.name, 'Remove']
self.write('\t'.join(row))
</DeepExtract>
total += 1
else:
<DeepExtract>
logger.debug('diff qos: cluster=%s account=%s uid=%s a=%s b=%s', cluster_a.name, name, user.name, user.spec_list(), cluster_b.accounts[name].users[uid].spec_list())
specs_a = []
for s in user.spec_list():
if s.startswith('QOS'):
specs_a += self._parse_qos(s)
specs_b = []
for s in cluster_b.accounts[name].users[uid].spec_list():
if s.startswith('QOS'):
specs_b += self._parse_qos(s)
specs_set_a = set(specs_a)
specs_set_b = set(specs_b)
diff = specs_set_a.difference(specs_set_b)
logger.debug('diff qos: cluster=%s account=%s uid=%s a=%s b=%s diff=%s', cluster_a.name, name, user.name, specs_set_a, specs_set_b, diff)
if len(diff) > 0:
self.remove_qos(user.name, name, cluster_a.name, 'QOS-=' + ','.join([x for x in list(diff)]))
</DeepExtract>
if total == len(account.users):
<DeepExtract>
if self._skip_account(name):
return
if self.sync:
try:
slurm_remove_account(cluster_a.name, name, noop=self.noop)
except SlurmError as e:
logger.error('Failed removing Slurm account %s cluster %s: %s', name, cluster_a.name, e)
else:
logger.error('Removed Slurm account %s cluster %s successfully', name, cluster_a.name)
row = ['', name, cluster_a.name, 'Remove']
self.write('\t'.join(row))
</DeepExtract>
else:
for (uid, user) in account.users.items():
<DeepExtract>
if self._skip_user(uid, name):
return
if self.sync:
try:
slurm_remove_assoc(uid, cluster_a.name, name, noop=self.noop)
except SlurmError as e:
logger.error('Failed removing Slurm association user %s account %s cluster %s: %s', uid, name, cluster_a.name, e)
else:
logger.error('Removed Slurm association user %s account %s cluster %s successfully', uid, name, cluster_a.name)
row = [uid, name, cluster_a.name, 'Remove']
self.write('\t'.join(row))
</DeepExtract>
<DeepExtract>
if self._skip_account(name):
return
if self.sync:
try:
slurm_remove_account(cluster_a.name, name, noop=self.noop)
except SlurmError as e:
logger.error('Failed removing Slurm account %s cluster %s: %s', name, cluster_a.name, e)
else:
logger.error('Removed Slurm account %s cluster %s successfully', name, cluster_a.name)
row = ['', name, cluster_a.name, 'Remove']
self.write('\t'.join(row))
</DeepExtract>
|
def _diff(self, cluster_a, cluster_b):
for (name, account) in cluster_a.accounts.items():
if name == 'root':
continue
if name in cluster_b.accounts:
total = 0
for (uid, user) in account.users.items():
if uid == 'root':
continue
if uid not in cluster_b.accounts[name].users:
if self._skip_user(uid, name):
return
if self.sync:
try:
slurm_remove_assoc(uid, cluster_a.name, name, noop=self.noop)
except SlurmError as e:
logger.error('Failed removing Slurm association user %s account %s cluster %s: %s', uid, name, cluster_a.name, e)
else:
logger.error('Removed Slurm association user %s account %s cluster %s successfully', uid, name, cluster_a.name)
row = [uid, name, cluster_a.name, 'Remove']
self.write('\t'.join(row))
total += 1
else:
logger.debug('diff qos: cluster=%s account=%s uid=%s a=%s b=%s', cluster_a.name, name, user.name, user.spec_list(), cluster_b.accounts[name].users[uid].spec_list())
specs_a = []
for s in user.spec_list():
if s.startswith('QOS'):
specs_a += self._parse_qos(s)
specs_b = []
for s in cluster_b.accounts[name].users[uid].spec_list():
if s.startswith('QOS'):
specs_b += self._parse_qos(s)
specs_set_a = set(specs_a)
specs_set_b = set(specs_b)
diff = specs_set_a.difference(specs_set_b)
logger.debug('diff qos: cluster=%s account=%s uid=%s a=%s b=%s diff=%s', cluster_a.name, name, user.name, specs_set_a, specs_set_b, diff)
if len(diff) > 0:
self.remove_qos(user.name, name, cluster_a.name, 'QOS-=' + ','.join([x for x in list(diff)]))
if total == len(account.users):
if self._skip_account(name):
return
if self.sync:
try:
slurm_remove_account(cluster_a.name, name, noop=self.noop)
except SlurmError as e:
logger.error('Failed removing Slurm account %s cluster %s: %s', name, cluster_a.name, e)
else:
logger.error('Removed Slurm account %s cluster %s successfully', name, cluster_a.name)
row = ['', name, cluster_a.name, 'Remove']
self.write('\t'.join(row))
else:
for (uid, user) in account.users.items():
if self._skip_user(uid, name):
return
if self.sync:
try:
slurm_remove_assoc(uid, cluster_a.name, name, noop=self.noop)
except SlurmError as e:
logger.error('Failed removing Slurm association user %s account %s cluster %s: %s', uid, name, cluster_a.name, e)
else:
logger.error('Removed Slurm association user %s account %s cluster %s successfully', uid, name, cluster_a.name)
row = [uid, name, cluster_a.name, 'Remove']
self.write('\t'.join(row))
if self._skip_account(name):
return
if self.sync:
try:
slurm_remove_account(cluster_a.name, name, noop=self.noop)
except SlurmError as e:
logger.error('Failed removing Slurm account %s cluster %s: %s', name, cluster_a.name, e)
else:
logger.error('Removed Slurm account %s cluster %s successfully', name, cluster_a.name)
row = ['', name, cluster_a.name, 'Remove']
self.write('\t'.join(row))
</DeepExtract>
|
coldfront
|
positive
|
@check_ssh
def ssh_output(self, cmd, allow_fail=False, combine_stderr=True, timeout_sec=None):
"""Runs the command via SSH and captures the output, returning it as a string.
:param cmd: The remote ssh command.
:param allow_fail: If True, ignore nonzero exit status of the remote command,
else raise an ``RemoteCommandError``
:param combine_stderr: If True, return output from both stderr and stdout of the remote process.
:param timeout_sec: Set timeout on blocking reads/writes. Default None. For more details see
http://docs.paramiko.org/en/2.0/api/channel.html#paramiko.channel.Channel.settimeout
:return: The stdout output from the ssh command.
:raise RemoteCommandError: If ``allow_fail`` is False and the command returns a non-zero exit status
"""
<DeepExtract>
'Running ssh command: %s' % cmd = '%s: %s' % (str(self), 'Running ssh command: %s' % cmd)
self.logger.log(logging.DEBUG, 'Running ssh command: %s' % cmd, *args, **kwargs)
</DeepExtract>
client = self.ssh_client
chan = client.get_transport().open_session(timeout=timeout_sec)
chan.settimeout(timeout_sec)
chan.exec_command(cmd)
chan.set_combine_stderr(combine_stderr)
stdin = chan.makefile('wb', -1)
stdout = chan.makefile('r', -1)
stderr = chan.makefile_stderr('r', -1)
try:
stdoutdata = stdout.read()
exit_status = stdin.channel.recv_exit_status()
if exit_status != 0:
if not allow_fail:
raise RemoteCommandError(self, cmd, exit_status, stderr.read())
else:
<DeepExtract>
"Running ssh command '%s' exited with status %d and message: %s" % (cmd, exit_status, stderr.read()) = '%s: %s' % (str(self), "Running ssh command '%s' exited with status %d and message: %s" % (cmd, exit_status, stderr.read()))
self.logger.log(logging.DEBUG, "Running ssh command '%s' exited with status %d and message: %s" % (cmd, exit_status, stderr.read()), *args, **kwargs)
</DeepExtract>
finally:
stdin.close()
stdout.close()
stderr.close()
<DeepExtract>
'Returning ssh command output:\n%s' % stdoutdata = '%s: %s' % (str(self), 'Returning ssh command output:\n%s' % stdoutdata)
self.logger.log(logging.DEBUG, 'Returning ssh command output:\n%s' % stdoutdata, *args, **kwargs)
</DeepExtract>
return stdoutdata
|
@check_ssh
def ssh_output(self, cmd, allow_fail=False, combine_stderr=True, timeout_sec=None):
"""Runs the command via SSH and captures the output, returning it as a string.
:param cmd: The remote ssh command.
:param allow_fail: If True, ignore nonzero exit status of the remote command,
else raise an ``RemoteCommandError``
:param combine_stderr: If True, return output from both stderr and stdout of the remote process.
:param timeout_sec: Set timeout on blocking reads/writes. Default None. For more details see
http://docs.paramiko.org/en/2.0/api/channel.html#paramiko.channel.Channel.settimeout
:return: The stdout output from the ssh command.
:raise RemoteCommandError: If ``allow_fail`` is False and the command returns a non-zero exit status
"""
'Running ssh command: %s' % cmd = '%s: %s' % (str(self), 'Running ssh command: %s' % cmd)
self.logger.log(logging.DEBUG, 'Running ssh command: %s' % cmd, *args, **kwargs)
client = self.ssh_client
chan = client.get_transport().open_session(timeout=timeout_sec)
chan.settimeout(timeout_sec)
chan.exec_command(cmd)
chan.set_combine_stderr(combine_stderr)
stdin = chan.makefile('wb', -1)
stdout = chan.makefile('r', -1)
stderr = chan.makefile_stderr('r', -1)
try:
stdoutdata = stdout.read()
exit_status = stdin.channel.recv_exit_status()
if exit_status != 0:
if not allow_fail:
raise RemoteCommandError(self, cmd, exit_status, stderr.read())
else:
"Running ssh command '%s' exited with status %d and message: %s" % (cmd, exit_status, stderr.read()) = '%s: %s' % (str(self), "Running ssh command '%s' exited with status %d and message: %s" % (cmd, exit_status, stderr.read()))
self.logger.log(logging.DEBUG, "Running ssh command '%s' exited with status %d and message: %s" % (cmd, exit_status, stderr.read()), *args, **kwargs)
finally:
stdin.close()
stdout.close()
stderr.close()
'Returning ssh command output:\n%s' % stdoutdata = '%s: %s' % (str(self), 'Returning ssh command output:\n%s' % stdoutdata)
self.logger.log(logging.DEBUG, 'Returning ssh command output:\n%s' % stdoutdata, *args, **kwargs)
return stdoutdata
|
ducktape
|
positive
|
def define_G(input_nc, output_nc, ngf, which_model_netG, norm='batch', use_dropout=False, init_type='normal', gpu_ids=[], n_downsampling=2):
netG = None
use_gpu = len(gpu_ids) > 0
<DeepExtract>
if norm == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm == 'batch_sync':
norm_layer = BatchNorm2d
elif norm == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
elif norm == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm)
norm_layer = norm_layer
</DeepExtract>
if use_gpu:
assert torch.cuda.is_available()
if which_model_netG == 'Graph':
assert len(input_nc) == 2
netG = GraphNetwork(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, gpu_ids=gpu_ids, n_downsampling=n_downsampling)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % which_model_netG)
if len(gpu_ids) > 0:
netG.cuda(gpu_ids[0])
<DeepExtract>
print('initialization method [%s]' % init_type)
if init_type == 'normal':
netG.apply(weights_init_normal)
elif init_type == 'xavier':
netG.apply(weights_init_xavier)
elif init_type == 'kaiming':
netG.apply(weights_init_kaiming)
elif init_type == 'orthogonal':
netG.apply(weights_init_orthogonal)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
</DeepExtract>
return netG
|
def define_G(input_nc, output_nc, ngf, which_model_netG, norm='batch', use_dropout=False, init_type='normal', gpu_ids=[], n_downsampling=2):
netG = None
use_gpu = len(gpu_ids) > 0
if norm == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm == 'batch_sync':
norm_layer = BatchNorm2d
elif norm == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
elif norm == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm)
norm_layer = norm_layer
if use_gpu:
assert torch.cuda.is_available()
if which_model_netG == 'Graph':
assert len(input_nc) == 2
netG = GraphNetwork(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, gpu_ids=gpu_ids, n_downsampling=n_downsampling)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % which_model_netG)
if len(gpu_ids) > 0:
netG.cuda(gpu_ids[0])
print('initialization method [%s]' % init_type)
if init_type == 'normal':
netG.apply(weights_init_normal)
elif init_type == 'xavier':
netG.apply(weights_init_xavier)
elif init_type == 'kaiming':
netG.apply(weights_init_kaiming)
elif init_type == 'orthogonal':
netG.apply(weights_init_orthogonal)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
return netG
|
BiGraphGAN
|
positive
|
def __init__(self, pages: Dict[str, list]={}, project_url: Union[str, Dict[str, str]]=None, template_dir=None, examples_dir=None, extra_aliases: Union[List[str], Dict[str, str]]=None, max_signature_line_length: int=110, titles_size='###'):
self.pages = pages
self.project_url = project_url
self.template_dir = template_dir
self.examples_dir = examples_dir
self.class_aliases = {}
<DeepExtract>
for list_elements in self.pages.values():
for element_as_str in list_elements:
element = utils.import_object(element_as_str)
if not isclass(element):
continue
true_dotted_path = utils.get_dotted_path(element)
self.class_aliases[true_dotted_path] = element_as_str
if isinstance(extra_aliases, dict):
self.class_aliases.update(extra_aliases)
elif isinstance(extra_aliases, list):
for alias in extra_aliases:
full_dotted_path = utils.get_dotted_path(utils.import_object(alias))
self.class_aliases[full_dotted_path] = alias
</DeepExtract>
self.max_signature_line_length = max_signature_line_length
self.titles_size = titles_size
|
def __init__(self, pages: Dict[str, list]={}, project_url: Union[str, Dict[str, str]]=None, template_dir=None, examples_dir=None, extra_aliases: Union[List[str], Dict[str, str]]=None, max_signature_line_length: int=110, titles_size='###'):
self.pages = pages
self.project_url = project_url
self.template_dir = template_dir
self.examples_dir = examples_dir
self.class_aliases = {}
for list_elements in self.pages.values():
for element_as_str in list_elements:
element = utils.import_object(element_as_str)
if not isclass(element):
continue
true_dotted_path = utils.get_dotted_path(element)
self.class_aliases[true_dotted_path] = element_as_str
if isinstance(extra_aliases, dict):
self.class_aliases.update(extra_aliases)
elif isinstance(extra_aliases, list):
for alias in extra_aliases:
full_dotted_path = utils.get_dotted_path(utils.import_object(alias))
self.class_aliases[full_dotted_path] = alias
self.max_signature_line_length = max_signature_line_length
self.titles_size = titles_size
|
autokeras
|
positive
|
@add_data('b40')
def b40_toy1d_dataset(key, data_size, batch_size, n_data=40, x_lim=[-4, 4], noise_std=3):
"""x^3 invertible function with added Gaussian noise.
Return:
(inputs, targets) are for plotting/visualization
get_batch: function for getting another randomly selected batch of data
"""
subkeys = jax.random.split(key, n_data)
inputs = np.concatenate([np.linspace(-4, -3, num=n_data // 2), np.linspace(-2.5, -1, num=n_data // 5), np.linspace(-1, 2, num=n_data // 5), np.linspace(2, 4, num=n_data // 2)])
<DeepExtract>
rngs = jax.random.split(key, inputs.shape[0])
noise = vmap(random.normal, (0, None), 0)(rngs, (1,))
assert len(noise) == inputs.shape[0]
noise = noise.squeeze()
</DeepExtract>
true_fn = lambda x: x ** 3
targets = true_fn(inputs) + noise * noise_std
inputs = inputs[..., None]
targets = targets[..., None]
print('plot sample-gap-40 inputs: {} targets: {}'.format(inputs.shape, targets.shape))
D = inputs.shape[-1]
test_x0 = np.repeat(np.expand_dims(np.linspace(x_lim[0] - 2, x_lim[1] + 2, data_size), axis=1), D, axis=1)
test_x1 = np.repeat(np.expand_dims(true_fn(test_x0[:, 0]), axis=1), D, axis=1)
test = (test_x0, test_x1)
def get_batch(key, data_size, batch_size, D=1):
assert data_size % batch_size == 0
num_batches = data_size / batch_size
(key, subkey) = jax.random.split(key)
batch_x = jax.random.uniform(subkey, (batch_size, D), minval=x_lim[0], maxval=x_lim[1])
batch_y = np.repeat(true_fn(batch_x[:, 0])[..., None], D, axis=1)
return (key, (batch_x, batch_y))
return (inputs, targets, test, get_batch, noise_std)
|
@add_data('b40')
def b40_toy1d_dataset(key, data_size, batch_size, n_data=40, x_lim=[-4, 4], noise_std=3):
"""x^3 invertible function with added Gaussian noise.
Return:
(inputs, targets) are for plotting/visualization
get_batch: function for getting another randomly selected batch of data
"""
subkeys = jax.random.split(key, n_data)
inputs = np.concatenate([np.linspace(-4, -3, num=n_data // 2), np.linspace(-2.5, -1, num=n_data // 5), np.linspace(-1, 2, num=n_data // 5), np.linspace(2, 4, num=n_data // 2)])
rngs = jax.random.split(key, inputs.shape[0])
noise = vmap(random.normal, (0, None), 0)(rngs, (1,))
assert len(noise) == inputs.shape[0]
noise = noise.squeeze()
true_fn = lambda x: x ** 3
targets = true_fn(inputs) + noise * noise_std
inputs = inputs[..., None]
targets = targets[..., None]
print('plot sample-gap-40 inputs: {} targets: {}'.format(inputs.shape, targets.shape))
D = inputs.shape[-1]
test_x0 = np.repeat(np.expand_dims(np.linspace(x_lim[0] - 2, x_lim[1] + 2, data_size), axis=1), D, axis=1)
test_x1 = np.repeat(np.expand_dims(true_fn(test_x0[:, 0]), axis=1), D, axis=1)
test = (test_x0, test_x1)
def get_batch(key, data_size, batch_size, D=1):
assert data_size % batch_size == 0
num_batches = data_size / batch_size
(key, subkey) = jax.random.split(key)
batch_x = jax.random.uniform(subkey, (batch_size, D), minval=x_lim[0], maxval=x_lim[1])
batch_y = np.repeat(true_fn(batch_x[:, 0])[..., None], D, axis=1)
return (key, (batch_x, batch_y))
return (inputs, targets, test, get_batch, noise_std)
|
bayesian-sde
|
positive
|
def acc_and_f1(preds, labels):
<DeepExtract>
acc = (preds == labels).mean()
</DeepExtract>
f1 = f1_score(y_true=labels, y_pred=preds)
return {'acc': acc, 'f1': f1, 'acc_and_f1': (acc + f1) / 2}
|
def acc_and_f1(preds, labels):
acc = (preds == labels).mean()
f1 = f1_score(y_true=labels, y_pred=preds)
return {'acc': acc, 'f1': f1, 'acc_and_f1': (acc + f1) / 2}
|
ACS-QG
|
positive
|
def _create(self) -> ReleaseManifest:
"""Create the manifest from GitHub asset metadata and file contents."""
assets = {}
for (asset_name, asset_data) in self.assets.items():
entries = {}
data = asset_data['data']
metadata = asset_data['metadata']
for (file_name, file_data) in data.items():
file_metadata = file_data['metadata']
name = Path(file_name).name
file_metadata.update(name=name)
entry = AssetManifestEntry(**file_metadata)
entries[name] = entry
assets[asset_name] = AssetManifestMetadata(metadata['browser_download_url'], entries, metadata['zipped_sha256'], metadata['created_at'], metadata['label'])
<DeepExtract>
ignore = ['assets']
manual_set_keys = ['author', 'description']
keys = [f.name for f in dataclasses.fields(ReleaseManifest) if f.name not in ignore + manual_set_keys]
parsed = {k: self.release.raw_data[k] for k in keys}
parsed.update(description=self.release.raw_data['body'], author=self.release.raw_data['author']['login'])
release_metadata = parsed
</DeepExtract>
release_metadata.update(assets=assets)
release_manifest = ReleaseManifest(**release_metadata)
return release_manifest
|
def _create(self) -> ReleaseManifest:
"""Create the manifest from GitHub asset metadata and file contents."""
assets = {}
for (asset_name, asset_data) in self.assets.items():
entries = {}
data = asset_data['data']
metadata = asset_data['metadata']
for (file_name, file_data) in data.items():
file_metadata = file_data['metadata']
name = Path(file_name).name
file_metadata.update(name=name)
entry = AssetManifestEntry(**file_metadata)
entries[name] = entry
assets[asset_name] = AssetManifestMetadata(metadata['browser_download_url'], entries, metadata['zipped_sha256'], metadata['created_at'], metadata['label'])
ignore = ['assets']
manual_set_keys = ['author', 'description']
keys = [f.name for f in dataclasses.fields(ReleaseManifest) if f.name not in ignore + manual_set_keys]
parsed = {k: self.release.raw_data[k] for k in keys}
parsed.update(description=self.release.raw_data['body'], author=self.release.raw_data['author']['login'])
release_metadata = parsed
release_metadata.update(assets=assets)
release_manifest = ReleaseManifest(**release_metadata)
return release_manifest
|
detection-rules
|
positive
|
def login(webdriver, datasets):
<DeepExtract>
user = random.choice(datasets['users'])
project_with_repo_prs = random.choice(datasets['pull_requests'])
datasets['username'] = user[1]
datasets['password'] = user[2]
datasets['project_key'] = project_with_repo_prs[1]
datasets['repo_slug'] = project_with_repo_prs[0]
datasets['pull_request_branch_from'] = project_with_repo_prs[3]
datasets['pull_request_branch_to'] = project_with_repo_prs[4]
datasets['pull_request_id'] = project_with_repo_prs[2]
</DeepExtract>
client = BitbucketRestClient(BITBUCKET_SETTINGS.server_url, BITBUCKET_SETTINGS.admin_login, BITBUCKET_SETTINGS.admin_password)
webdriver.app_version = version.parse(client.get_bitbucket_version())
login_page = LoginPage(webdriver)
@print_timing('selenium_login')
def measure():
@print_timing('selenium_login:open_login_page')
def sub_measure():
login_page.go_to()
if login_page.is_logged_in():
login_page.delete_all_cookies()
login_page.go_to()
<DeepExtract>
login_page.go_to()
if login_page.is_logged_in():
login_page.delete_all_cookies()
login_page.go_to()
</DeepExtract>
login_page.set_credentials(datasets['username'], datasets['password'])
@print_timing('selenium_login:login_get_started')
def sub_measure():
login_page.submit_login()
get_started_page = GetStarted(webdriver)
get_started_page.wait_for_page_loaded()
webdriver.node_id = login_page.get_node_id()
print(f'node_id:{webdriver.node_id}')
<DeepExtract>
login_page.go_to()
if login_page.is_logged_in():
login_page.delete_all_cookies()
login_page.go_to()
</DeepExtract>
<DeepExtract>
@print_timing('selenium_login:open_login_page')
def sub_measure():
login_page.go_to()
if login_page.is_logged_in():
login_page.delete_all_cookies()
login_page.go_to()
sub_measure()
login_page.set_credentials(datasets['username'], datasets['password'])
@print_timing('selenium_login:login_get_started')
def sub_measure():
login_page.submit_login()
get_started_page = GetStarted(webdriver)
get_started_page.wait_for_page_loaded()
webdriver.node_id = login_page.get_node_id()
print(f'node_id:{webdriver.node_id}')
sub_measure()
</DeepExtract>
|
def login(webdriver, datasets):
user = random.choice(datasets['users'])
project_with_repo_prs = random.choice(datasets['pull_requests'])
datasets['username'] = user[1]
datasets['password'] = user[2]
datasets['project_key'] = project_with_repo_prs[1]
datasets['repo_slug'] = project_with_repo_prs[0]
datasets['pull_request_branch_from'] = project_with_repo_prs[3]
datasets['pull_request_branch_to'] = project_with_repo_prs[4]
datasets['pull_request_id'] = project_with_repo_prs[2]
client = BitbucketRestClient(BITBUCKET_SETTINGS.server_url, BITBUCKET_SETTINGS.admin_login, BITBUCKET_SETTINGS.admin_password)
webdriver.app_version = version.parse(client.get_bitbucket_version())
login_page = LoginPage(webdriver)
@print_timing('selenium_login')
def measure():
@print_timing('selenium_login:open_login_page')
def sub_measure():
login_page.go_to()
if login_page.is_logged_in():
login_page.delete_all_cookies()
login_page.go_to()
login_page.go_to()
if login_page.is_logged_in():
login_page.delete_all_cookies()
login_page.go_to()
login_page.set_credentials(datasets['username'], datasets['password'])
@print_timing('selenium_login:login_get_started')
def sub_measure():
login_page.submit_login()
get_started_page = GetStarted(webdriver)
get_started_page.wait_for_page_loaded()
webdriver.node_id = login_page.get_node_id()
print(f'node_id:{webdriver.node_id}')
login_page.go_to()
if login_page.is_logged_in():
login_page.delete_all_cookies()
login_page.go_to()
@print_timing('selenium_login:open_login_page')
def sub_measure():
login_page.go_to()
if login_page.is_logged_in():
login_page.delete_all_cookies()
login_page.go_to()
sub_measure()
login_page.set_credentials(datasets['username'], datasets['password'])
@print_timing('selenium_login:login_get_started')
def sub_measure():
login_page.submit_login()
get_started_page = GetStarted(webdriver)
get_started_page.wait_for_page_loaded()
webdriver.node_id = login_page.get_node_id()
print(f'node_id:{webdriver.node_id}')
sub_measure()
</DeepExtract>
|
dc-app-performance-toolkit
|
positive
|
def flatten(self, input_layer=None):
if input_layer is None:
input_layer = self.top_layer
shape = input_layer.get_shape().as_list()
flat_dim = shape[1] * shape[2] * shape[3]
<DeepExtract>
if input_layer is None:
input_layer = self.top_layer
self.top_layer = tf.reshape(input_layer, [-1, flat_dim])
self.top_size = [-1, flat_dim][-1]
self.top_layer = self.top_layer
</DeepExtract>
return self.top_layer
|
def flatten(self, input_layer=None):
if input_layer is None:
input_layer = self.top_layer
shape = input_layer.get_shape().as_list()
flat_dim = shape[1] * shape[2] * shape[3]
if input_layer is None:
input_layer = self.top_layer
self.top_layer = tf.reshape(input_layer, [-1, flat_dim])
self.top_size = [-1, flat_dim][-1]
self.top_layer = self.top_layer
return self.top_layer
|
AOFP
|
positive
|
def sudo(self, params=''):
cmd = 'sudo {}'.format(params)
<DeepExtract>
raise NotImplementedError('Do not call base class!')
</DeepExtract>
return self.get_response()
|
def sudo(self, params=''):
cmd = 'sudo {}'.format(params)
raise NotImplementedError('Do not call base class!')
return self.get_response()
|
beeswarm
|
positive
|
def open(self, name, mode='r', *args, **kwargs):
""" Find a resource and return a file object, or raise IOError. """
<DeepExtract>
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
fname = fpath
if self.cachemode == 'all':
self.cache[name] = None
fname = self.cache[name]
</DeepExtract>
if not fname:
raise IOError('Resource %r not found.' % name)
return self.opener(name, *args, mode=mode, **kwargs)
|
def open(self, name, mode='r', *args, **kwargs):
""" Find a resource and return a file object, or raise IOError. """
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
fname = fpath
if self.cachemode == 'all':
self.cache[name] = None
fname = self.cache[name]
if not fname:
raise IOError('Resource %r not found.' % name)
return self.opener(name, *args, mode=mode, **kwargs)
|
AndroidGCMTutorial
|
positive
|
@app.function_name(name='put_get_multiple_blobs_as_bytes_return_http_response')
@app.blob_input(arg_name='inputfile1', data_type='BINARY', path='python-worker-tests/shmem-test-bytes-1.txt', connection='AzureWebJobsStorage')
@app.blob_input(arg_name='inputfile2', data_type='BINARY', path='python-worker-tests/shmem-test-bytes-2.txt', connection='AzureWebJobsStorage')
@app.blob_output(arg_name='outputfile1', path='python-worker-tests/shmem-test-bytes-out-1.txt', data_type='BINARY', connection='AzureWebJobsStorage')
@app.blob_output(arg_name='outputfile2', path='python-worker-tests/shmem-test-bytes-out-2.txt', data_type='BINARY', connection='AzureWebJobsStorage')
@app.route(route='put_get_multiple_blobs_as_bytes_return_http_response')
def put_get_multiple_blobs_as_bytes_return_http_response(req: func.HttpRequest, inputfile1: bytes, inputfile2: bytes, outputfile1: func.Out[bytes], outputfile2: func.Out[bytes]) -> func.HttpResponse:
"""
Read two blobs (bytes) and respond back (in HTTP response) with the number
of bytes read from each blob and the MD5 digest of the content of each.
Write two blobs (bytes) and respond back (in HTTP response) with the number
bytes written in each blob and the MD5 digest of the content of each.
The number of bytes to write are specified in the input HTTP request.
"""
input_content_size_1 = len(inputfile1)
input_content_size_2 = len(inputfile2)
input_content_md5_1 = hashlib.md5(inputfile1).hexdigest()
input_content_md5_2 = hashlib.md5(inputfile2).hexdigest()
output_content_size_1 = int(req.params['output_content_size_1'])
output_content_size_2 = int(req.params['output_content_size_2'])
<DeepExtract>
content = bytearray((random.getrandbits(8) for _ in range(output_content_size_1)))
content_md5 = hashlib.md5(content).hexdigest()
(output_content_1, output_content_md5_1) = (content, content_md5)
</DeepExtract>
<DeepExtract>
content = bytearray((random.getrandbits(8) for _ in range(output_content_size_2)))
content_md5 = hashlib.md5(content).hexdigest()
(output_content_2, output_content_md5_2) = (content, content_md5)
</DeepExtract>
outputfile1.set(output_content_1)
outputfile2.set(output_content_2)
response_dict = {'input_content_size_1': input_content_size_1, 'input_content_size_2': input_content_size_2, 'input_content_md5_1': input_content_md5_1, 'input_content_md5_2': input_content_md5_2, 'output_content_size_1': output_content_size_1, 'output_content_size_2': output_content_size_2, 'output_content_md5_1': output_content_md5_1, 'output_content_md5_2': output_content_md5_2}
response_body = json.dumps(response_dict, indent=2)
return func.HttpResponse(body=response_body, mimetype='application/json', status_code=200)
|
@app.function_name(name='put_get_multiple_blobs_as_bytes_return_http_response')
@app.blob_input(arg_name='inputfile1', data_type='BINARY', path='python-worker-tests/shmem-test-bytes-1.txt', connection='AzureWebJobsStorage')
@app.blob_input(arg_name='inputfile2', data_type='BINARY', path='python-worker-tests/shmem-test-bytes-2.txt', connection='AzureWebJobsStorage')
@app.blob_output(arg_name='outputfile1', path='python-worker-tests/shmem-test-bytes-out-1.txt', data_type='BINARY', connection='AzureWebJobsStorage')
@app.blob_output(arg_name='outputfile2', path='python-worker-tests/shmem-test-bytes-out-2.txt', data_type='BINARY', connection='AzureWebJobsStorage')
@app.route(route='put_get_multiple_blobs_as_bytes_return_http_response')
def put_get_multiple_blobs_as_bytes_return_http_response(req: func.HttpRequest, inputfile1: bytes, inputfile2: bytes, outputfile1: func.Out[bytes], outputfile2: func.Out[bytes]) -> func.HttpResponse:
"""
Read two blobs (bytes) and respond back (in HTTP response) with the number
of bytes read from each blob and the MD5 digest of the content of each.
Write two blobs (bytes) and respond back (in HTTP response) with the number
bytes written in each blob and the MD5 digest of the content of each.
The number of bytes to write are specified in the input HTTP request.
"""
input_content_size_1 = len(inputfile1)
input_content_size_2 = len(inputfile2)
input_content_md5_1 = hashlib.md5(inputfile1).hexdigest()
input_content_md5_2 = hashlib.md5(inputfile2).hexdigest()
output_content_size_1 = int(req.params['output_content_size_1'])
output_content_size_2 = int(req.params['output_content_size_2'])
content = bytearray((random.getrandbits(8) for _ in range(output_content_size_1)))
content_md5 = hashlib.md5(content).hexdigest()
(output_content_1, output_content_md5_1) = (content, content_md5)
content = bytearray((random.getrandbits(8) for _ in range(output_content_size_2)))
content_md5 = hashlib.md5(content).hexdigest()
(output_content_2, output_content_md5_2) = (content, content_md5)
outputfile1.set(output_content_1)
outputfile2.set(output_content_2)
response_dict = {'input_content_size_1': input_content_size_1, 'input_content_size_2': input_content_size_2, 'input_content_md5_1': input_content_md5_1, 'input_content_md5_2': input_content_md5_2, 'output_content_size_1': output_content_size_1, 'output_content_size_2': output_content_size_2, 'output_content_md5_1': output_content_md5_1, 'output_content_md5_2': output_content_md5_2}
response_body = json.dumps(response_dict, indent=2)
return func.HttpResponse(body=response_body, mimetype='application/json', status_code=200)
|
azure-functions-python-worker
|
positive
|
def _encode_as_pieces(text):
<DeepExtract>
if six.PY3:
if isinstance(text, str):
text = text
elif isinstance(text, bytes):
text = text.decode('utf-8', 'ignore')
else:
raise ValueError('Unsupported string type: %s' % type(text))
elif six.PY2:
if isinstance(text, str):
text = text.decode('utf-8', 'ignore')
elif isinstance(text, unicode):
text = text
else:
raise ValueError('Unsupported string type: %s' % type(text))
else:
raise ValueError('Not running on Python2 or Python 3?')
</DeepExtract>
if self.split_by_punct:
<DeepExtract>
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
words = [''.join(x) for x in output]
</DeepExtract>
pieces = [self.spm.encode_as_pieces(w) for w in words]
return [p for w in pieces for p in w]
else:
return self.spm.encode_as_pieces(text)
|
def _encode_as_pieces(text):
if six.PY3:
if isinstance(text, str):
text = text
elif isinstance(text, bytes):
text = text.decode('utf-8', 'ignore')
else:
raise ValueError('Unsupported string type: %s' % type(text))
elif six.PY2:
if isinstance(text, str):
text = text.decode('utf-8', 'ignore')
elif isinstance(text, unicode):
text = text
else:
raise ValueError('Unsupported string type: %s' % type(text))
else:
raise ValueError('Not running on Python2 or Python 3?')
if self.split_by_punct:
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
words = [''.join(x) for x in output]
pieces = [self.spm.encode_as_pieces(w) for w in words]
return [p for w in pieces for p in w]
else:
return self.spm.encode_as_pieces(text)
|
DeBERTa
|
positive
|
def observe(self, X, y):
"""Feed the observations back to hyperopt.
Parameters
----------
X : list of dict-like
Places where the objective function has already been evaluated.
Each suggestion is a dictionary where each key corresponds to a
parameter being optimized.
y : array-like, shape (n,)
Corresponding values where objective has been evaluated.
"""
assert len(X) == len(y)
for (x_guess, y_) in zip(X, y):
x_guess_ = HyperoptOptimizer.hashable_dict(x_guess)
assert x_guess_ in self.trial_id_lookup, 'Appears to be guess that did not originate from suggest'
trial_id = self.trial_id_lookup.pop(x_guess_)
<DeepExtract>
for trial in self.trials._dynamic_trials:
if trial['tid'] == trial_id:
assert isinstance(trial, dict)
assert 'state' in trial and 'result' in trial
assert trial['state'] == JOB_STATE_NEW
trial = trial
assert False, 'No matching trial ID'
</DeepExtract>
assert self.cleanup_guess(trial['misc']['vals']) == x_guess, 'trial ID not consistent with x values stored'
result = {'loss': float(y_), 'status': STATUS_OK}
trial['state'] = JOB_STATE_DONE
trial['result'] = result
self.trials.refresh()
|
def observe(self, X, y):
"""Feed the observations back to hyperopt.
Parameters
----------
X : list of dict-like
Places where the objective function has already been evaluated.
Each suggestion is a dictionary where each key corresponds to a
parameter being optimized.
y : array-like, shape (n,)
Corresponding values where objective has been evaluated.
"""
assert len(X) == len(y)
for (x_guess, y_) in zip(X, y):
x_guess_ = HyperoptOptimizer.hashable_dict(x_guess)
assert x_guess_ in self.trial_id_lookup, 'Appears to be guess that did not originate from suggest'
trial_id = self.trial_id_lookup.pop(x_guess_)
for trial in self.trials._dynamic_trials:
if trial['tid'] == trial_id:
assert isinstance(trial, dict)
assert 'state' in trial and 'result' in trial
assert trial['state'] == JOB_STATE_NEW
trial = trial
assert False, 'No matching trial ID'
assert self.cleanup_guess(trial['misc']['vals']) == x_guess, 'trial ID not consistent with x values stored'
result = {'loss': float(y_), 'status': STATUS_OK}
trial['state'] = JOB_STATE_DONE
trial['result'] = result
self.trials.refresh()
|
bayesmark
|
positive
|
def _coco_eval_to_keypoint_results(coco_eval):
<DeepExtract>
res = OrderedDict({'keypoint': OrderedDict([('AP', -1), ('AP50', -1), ('AP75', -1), ('APm', -1), ('APl', -1)])})
</DeepExtract>
if coco_eval is not None:
s = coco_eval.stats
res['keypoint']['AP'] = s[COCO_AP]
res['keypoint']['AP50'] = s[COCO_AP50]
res['keypoint']['AP75'] = s[COCO_AP75]
res['keypoint']['APm'] = s[COCO_KPS_APM]
res['keypoint']['APl'] = s[COCO_KPS_APL]
return res
|
def _coco_eval_to_keypoint_results(coco_eval):
res = OrderedDict({'keypoint': OrderedDict([('AP', -1), ('AP50', -1), ('AP75', -1), ('APm', -1), ('APl', -1)])})
if coco_eval is not None:
s = coco_eval.stats
res['keypoint']['AP'] = s[COCO_AP]
res['keypoint']['AP50'] = s[COCO_AP50]
res['keypoint']['AP75'] = s[COCO_AP75]
res['keypoint']['APm'] = s[COCO_KPS_APM]
res['keypoint']['APl'] = s[COCO_KPS_APL]
return res
|
Context-aware-ZSR
|
positive
|
def utcoffset(self, dt):
<DeepExtract>
if not self._dstmonth:
isdst = False
elif dt is None:
isdst = None
dston = picknthweekday(dt.year, self._dstmonth, self._dstdayofweek, self._dsthour, self._dstminute, self._dstweeknumber)
dstoff = picknthweekday(dt.year, self._stdmonth, self._stddayofweek, self._stdhour, self._stdminute, self._stdweeknumber)
if dston < dstoff:
isdst = dston <= dt.replace(tzinfo=None) < dstoff
else:
isdst = not dstoff <= dt.replace(tzinfo=None) < dston
</DeepExtract>
if isdst is None:
return None
elif isdst:
return datetime.timedelta(minutes=self._dstoffset)
else:
return datetime.timedelta(minutes=self._stdoffset)
|
def utcoffset(self, dt):
if not self._dstmonth:
isdst = False
elif dt is None:
isdst = None
dston = picknthweekday(dt.year, self._dstmonth, self._dstdayofweek, self._dsthour, self._dstminute, self._dstweeknumber)
dstoff = picknthweekday(dt.year, self._stdmonth, self._stddayofweek, self._stdhour, self._stdminute, self._stdweeknumber)
if dston < dstoff:
isdst = dston <= dt.replace(tzinfo=None) < dstoff
else:
isdst = not dstoff <= dt.replace(tzinfo=None) < dston
if isdst is None:
return None
elif isdst:
return datetime.timedelta(minutes=self._dstoffset)
else:
return datetime.timedelta(minutes=self._stdoffset)
|
alfred2-hasher
|
positive
|
def get_status(self):
""" Get status
"""
<DeepExtract>
if time.time() - self.pre_time < 1.0:
time.sleep(1.0)
self._intf.write(str('status'))
self.pre_time = time.time()
</DeepExtract>
<DeepExtract>
ret = self._intf.read()
if len(ret) < 2 or ret[-2:] != '\r\n':
logger.warning('read() termination error')
ret = ret[:-2]
</DeepExtract>
logger.debug('status:{:s}'.format(ret))
try:
tmp = ret.split(' ', 1)
status = int(tmp[0])
status_str = tmp[1:]
except (ValueError, AttributeError):
logger.warning('get_status() wrong format: {}'.format(repr(ret)))
status = -99
status_str = ret
return (status, status_str)
|
def get_status(self):
""" Get status
"""
if time.time() - self.pre_time < 1.0:
time.sleep(1.0)
self._intf.write(str('status'))
self.pre_time = time.time()
ret = self._intf.read()
if len(ret) < 2 or ret[-2:] != '\r\n':
logger.warning('read() termination error')
ret = ret[:-2]
logger.debug('status:{:s}'.format(ret))
try:
tmp = ret.split(' ', 1)
status = int(tmp[0])
status_str = tmp[1:]
except (ValueError, AttributeError):
logger.warning('get_status() wrong format: {}'.format(repr(ret)))
status = -99
status_str = ret
return (status, status_str)
|
basil
|
positive
|
def main():
setup_SIGINT()
try:
<DeepExtract>
progname = os.path.basename(sys.argv[0])
parser = CLIParser(prog=progname, description='\n Reshuffle the input CNF. Returns a formula logically\n equivalent to the input with random application of\n (1) Polarity flips (2) Variables permutation (3) Clauses permutation.\n ', epilog="\n For more information type '%s [--help | -h ]'\n " % progname)
parser.add_argument('--output', '-o', type=argparse.FileType('w'), metavar='<output>', default='-', help="Output file. The formula is saved\n on file instead of being sent to standard\n output. Setting '<output>' to '-' is another\n way to send the formula to standard output.\n (default: -)\n ")
parser.add_argument('--seed', '-S', metavar='<seed>', default=None, type=str, action='store', help='Seed for any random process in the\n program. Any python hashable object will\n be fine. (default: current time)\n ')
parser.add_argument('--input', '-i', type=argparse.FileType('r'), metavar='<input>', default='-', help="Input file. A formula in dimacs format. Setting '<input>' to '-' is\n another way to read from standard input.\n (default: -)\n ")
parser.add_argument('--no-polarity-flips', '-p', action='store_true', dest='no_polarity_flips', help='No polarity flips')
parser.add_argument('--no-variables-permutation', '-v', action='store_true', dest='no_variables_permutation', help='No permutation of variables')
parser.add_argument('--no-clauses-permutation', '-c', action='store_true', dest='no_clauses_permutation', help='No permutation of clauses')
parser.add_argument('--quiet', '-q', action='store_false', default=True, dest='verbose', help='Output just the formula with no header.')
sys.argv = [str(x) for x in sys.argv]
args = parser.parse_args(sys.argv[1:])
if hasattr(args, 'seed') and args.seed:
random.seed(args.seed)
msg = "Waiting for a DIMACS formula on <stdin>.\n Alternatively you can feed a formula to <stdin>\n with piping or using '-i' command line argument."
with msg_prefix('c INPUT: '):
interactive_msg(msg, filltext=70)
F = CNF.from_file(args.input)
polarity_flips = 'fixed' if args.no_polarity_flips else 'shuffle'
variables_permutation = 'fixed' if args.no_variables_permutation else 'shuffle'
clauses_permutation = 'fixed' if args.no_clauses_permutation else 'shuffle'
G = Shuffle(F, polarity_flips, variables_permutation, clauses_permutation)
if mode == 'formula':
return G
elif mode == 'string':
return G.to_dimacs()
else:
G.to_file(args.output, fileformat='dimacs')
</DeepExtract>
except ValueError as e:
error_msg('DIMACS ERROR: ' + str(e))
sys.exit(-1)
except CLIError as e:
error_msg(str(e))
sys.exit(-1)
except InternalBug as e:
print(str(e), file=sys.stderr)
sys.exit(-1)
except (BrokenPipeError, IOError):
pass
sys.stderr.close()
|
def main():
setup_SIGINT()
try:
progname = os.path.basename(sys.argv[0])
parser = CLIParser(prog=progname, description='\n Reshuffle the input CNF. Returns a formula logically\n equivalent to the input with random application of\n (1) Polarity flips (2) Variables permutation (3) Clauses permutation.\n ', epilog="\n For more information type '%s [--help | -h ]'\n " % progname)
parser.add_argument('--output', '-o', type=argparse.FileType('w'), metavar='<output>', default='-', help="Output file. The formula is saved\n on file instead of being sent to standard\n output. Setting '<output>' to '-' is another\n way to send the formula to standard output.\n (default: -)\n ")
parser.add_argument('--seed', '-S', metavar='<seed>', default=None, type=str, action='store', help='Seed for any random process in the\n program. Any python hashable object will\n be fine. (default: current time)\n ')
parser.add_argument('--input', '-i', type=argparse.FileType('r'), metavar='<input>', default='-', help="Input file. A formula in dimacs format. Setting '<input>' to '-' is\n another way to read from standard input.\n (default: -)\n ")
parser.add_argument('--no-polarity-flips', '-p', action='store_true', dest='no_polarity_flips', help='No polarity flips')
parser.add_argument('--no-variables-permutation', '-v', action='store_true', dest='no_variables_permutation', help='No permutation of variables')
parser.add_argument('--no-clauses-permutation', '-c', action='store_true', dest='no_clauses_permutation', help='No permutation of clauses')
parser.add_argument('--quiet', '-q', action='store_false', default=True, dest='verbose', help='Output just the formula with no header.')
sys.argv = [str(x) for x in sys.argv]
args = parser.parse_args(sys.argv[1:])
if hasattr(args, 'seed') and args.seed:
random.seed(args.seed)
msg = "Waiting for a DIMACS formula on <stdin>.\n Alternatively you can feed a formula to <stdin>\n with piping or using '-i' command line argument."
with msg_prefix('c INPUT: '):
interactive_msg(msg, filltext=70)
F = CNF.from_file(args.input)
polarity_flips = 'fixed' if args.no_polarity_flips else 'shuffle'
variables_permutation = 'fixed' if args.no_variables_permutation else 'shuffle'
clauses_permutation = 'fixed' if args.no_clauses_permutation else 'shuffle'
G = Shuffle(F, polarity_flips, variables_permutation, clauses_permutation)
if mode == 'formula':
return G
elif mode == 'string':
return G.to_dimacs()
else:
G.to_file(args.output, fileformat='dimacs')
except ValueError as e:
error_msg('DIMACS ERROR: ' + str(e))
sys.exit(-1)
except CLIError as e:
error_msg(str(e))
sys.exit(-1)
except InternalBug as e:
print(str(e), file=sys.stderr)
sys.exit(-1)
except (BrokenPipeError, IOError):
pass
sys.stderr.close()
|
cnfgen
|
positive
|
def forward(self, input_ids, token_type_ids=None, input_mask=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, head_mask=None):
input_ids = input_ids.transpose(0, 1).contiguous()
token_type_ids = token_type_ids.transpose(0, 1).contiguous() if token_type_ids is not None else None
input_mask = input_mask.transpose(0, 1).contiguous() if input_mask is not None else None
attention_mask = attention_mask.transpose(0, 1).contiguous() if attention_mask is not None else None
perm_mask = perm_mask.permute(1, 2, 0).contiguous() if perm_mask is not None else None
target_mapping = target_mapping.permute(1, 2, 0).contiguous() if target_mapping is not None else None
(qlen, bsz) = (input_ids.shape[0], input_ids.shape[1])
mlen = mems[0].shape[0] if mems is not None else 0
klen = mlen + qlen
dtype_float = next(self.parameters()).dtype
device = next(self.parameters()).device
if self.attn_type == 'uni':
<DeepExtract>
attn_mask = torch.ones([qlen, qlen])
mask_up = torch.triu(attn_mask, diagonal=1)
attn_mask_pad = torch.zeros([qlen, mlen])
ret = torch.cat([attn_mask_pad, mask_up], dim=1)
if self.same_length:
mask_lo = torch.tril(attn_mask, diagonal=-1)
ret = torch.cat([ret[:, :qlen] + mask_lo, ret[:, qlen:]], dim=1)
ret = ret.to(next(self.parameters()))
attn_mask = ret
</DeepExtract>
attn_mask = attn_mask[:, :, None, None]
elif self.attn_type == 'bi':
attn_mask = None
else:
raise ValueError('Unsupported attention type: {}'.format(self.attn_type))
assert input_mask is None or attention_mask is None, 'You can only use one of input_mask (uses 1 for padding) '
'or attention_mask (uses 0 for padding, added for compatbility with BERT). Please choose one.'
if input_mask is None and attention_mask is not None:
input_mask = 1.0 - attention_mask
if input_mask is not None and perm_mask is not None:
data_mask = input_mask[None] + perm_mask
elif input_mask is not None and perm_mask is None:
data_mask = input_mask[None]
elif input_mask is None and perm_mask is not None:
data_mask = perm_mask
else:
data_mask = None
if data_mask is not None:
mems_mask = torch.zeros([data_mask.shape[0], mlen, bsz]).to(data_mask)
data_mask = torch.cat([mems_mask, data_mask], dim=1)
if attn_mask is None:
attn_mask = data_mask[:, :, :, None]
else:
attn_mask += data_mask[:, :, :, None]
if attn_mask is not None:
attn_mask = (attn_mask > 0).to(dtype_float)
if attn_mask is not None:
non_tgt_mask = -torch.eye(qlen).to(attn_mask)
non_tgt_mask = torch.cat([torch.zeros([qlen, mlen]).to(attn_mask), non_tgt_mask], dim=-1)
non_tgt_mask = (attn_mask + non_tgt_mask[:, :, None, None] > 0).to(attn_mask)
else:
non_tgt_mask = None
word_emb_k = self.word_embedding(input_ids)
output_h = self.dropout(word_emb_k)
if target_mapping is not None:
word_emb_q = self.mask_emb.expand(target_mapping.shape[0], bsz, -1)
output_g = self.dropout(word_emb_q)
else:
output_g = None
if token_type_ids is not None:
mem_pad = torch.zeros([mlen, bsz], dtype=torch.long, device=device)
cat_ids = torch.cat([mem_pad, token_type_ids], dim=0)
seg_mat = (token_type_ids[:, None] != cat_ids[None, :]).long()
seg_mat = F.one_hot(seg_mat, num_classes=2).to(dtype_float)
else:
seg_mat = None
<DeepExtract>
freq_seq = torch.arange(0, self.d_model, 2.0, dtype=torch.float)
inv_freq = 1 / torch.pow(10000, freq_seq / self.d_model)
if self.attn_type == 'bi':
(beg, end) = (klen, -qlen)
elif self.attn_type == 'uni':
(beg, end) = (klen, -1)
else:
raise ValueError('Unknown `attn_type` {}.'.format(self.attn_type))
if self.bi_data:
fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.float)
bwd_pos_seq = torch.arange(-beg, -end, 1.0, dtype=torch.float)
if self.clamp_len > 0:
fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
bwd_pos_seq = bwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
if bsz is not None:
fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz // 2)
bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz // 2)
else:
fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq)
bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq)
pos_emb = torch.cat([fwd_pos_emb, bwd_pos_emb], dim=1)
else:
fwd_pos_seq = torch.arange(beg, end, -1.0)
if self.clamp_len > 0:
fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz)
pos_emb = pos_emb.to(next(self.parameters()))
pos_emb = pos_emb
</DeepExtract>
pos_emb = self.dropout(pos_emb)
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
head_mask = head_mask.to(dtype=next(self.parameters()).dtype)
else:
head_mask = [None] * self.n_layer
new_mems = ()
if mems is None:
mems = [None] * len(self.layer)
attentions = []
hidden_states = []
for (i, layer_module) in enumerate(self.layer):
new_mems = new_mems + (self.cache_mem(output_h, mems[i]),)
if self.output_hidden_states:
hidden_states.append((output_h, output_g) if output_g is not None else output_h)
outputs = layer_module(output_h, output_g, attn_mask_h=non_tgt_mask, attn_mask_g=attn_mask, r=pos_emb, seg_mat=seg_mat, mems=mems[i], target_mapping=target_mapping, head_mask=head_mask[i])
(output_h, output_g) = outputs[:2]
if self.output_attentions:
attentions.append(outputs[2])
if self.output_hidden_states:
hidden_states.append((output_h, output_g) if output_g is not None else output_h)
output = self.dropout(output_g if output_g is not None else output_h)
outputs = (output.permute(1, 0, 2).contiguous(), new_mems)
if self.output_hidden_states:
if output_g is not None:
hidden_states = tuple((h.permute(1, 0, 2).contiguous() for hs in hidden_states for h in hs))
else:
hidden_states = tuple((hs.permute(1, 0, 2).contiguous() for hs in hidden_states))
outputs = outputs + (hidden_states,)
if self.output_attentions:
attentions = tuple((t.permute(2, 3, 0, 1).contiguous() for t in attentions))
outputs = outputs + (attentions,)
return outputs
|
def forward(self, input_ids, token_type_ids=None, input_mask=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, head_mask=None):
input_ids = input_ids.transpose(0, 1).contiguous()
token_type_ids = token_type_ids.transpose(0, 1).contiguous() if token_type_ids is not None else None
input_mask = input_mask.transpose(0, 1).contiguous() if input_mask is not None else None
attention_mask = attention_mask.transpose(0, 1).contiguous() if attention_mask is not None else None
perm_mask = perm_mask.permute(1, 2, 0).contiguous() if perm_mask is not None else None
target_mapping = target_mapping.permute(1, 2, 0).contiguous() if target_mapping is not None else None
(qlen, bsz) = (input_ids.shape[0], input_ids.shape[1])
mlen = mems[0].shape[0] if mems is not None else 0
klen = mlen + qlen
dtype_float = next(self.parameters()).dtype
device = next(self.parameters()).device
if self.attn_type == 'uni':
attn_mask = torch.ones([qlen, qlen])
mask_up = torch.triu(attn_mask, diagonal=1)
attn_mask_pad = torch.zeros([qlen, mlen])
ret = torch.cat([attn_mask_pad, mask_up], dim=1)
if self.same_length:
mask_lo = torch.tril(attn_mask, diagonal=-1)
ret = torch.cat([ret[:, :qlen] + mask_lo, ret[:, qlen:]], dim=1)
ret = ret.to(next(self.parameters()))
attn_mask = ret
attn_mask = attn_mask[:, :, None, None]
elif self.attn_type == 'bi':
attn_mask = None
else:
raise ValueError('Unsupported attention type: {}'.format(self.attn_type))
assert input_mask is None or attention_mask is None, 'You can only use one of input_mask (uses 1 for padding) '
'or attention_mask (uses 0 for padding, added for compatbility with BERT). Please choose one.'
if input_mask is None and attention_mask is not None:
input_mask = 1.0 - attention_mask
if input_mask is not None and perm_mask is not None:
data_mask = input_mask[None] + perm_mask
elif input_mask is not None and perm_mask is None:
data_mask = input_mask[None]
elif input_mask is None and perm_mask is not None:
data_mask = perm_mask
else:
data_mask = None
if data_mask is not None:
mems_mask = torch.zeros([data_mask.shape[0], mlen, bsz]).to(data_mask)
data_mask = torch.cat([mems_mask, data_mask], dim=1)
if attn_mask is None:
attn_mask = data_mask[:, :, :, None]
else:
attn_mask += data_mask[:, :, :, None]
if attn_mask is not None:
attn_mask = (attn_mask > 0).to(dtype_float)
if attn_mask is not None:
non_tgt_mask = -torch.eye(qlen).to(attn_mask)
non_tgt_mask = torch.cat([torch.zeros([qlen, mlen]).to(attn_mask), non_tgt_mask], dim=-1)
non_tgt_mask = (attn_mask + non_tgt_mask[:, :, None, None] > 0).to(attn_mask)
else:
non_tgt_mask = None
word_emb_k = self.word_embedding(input_ids)
output_h = self.dropout(word_emb_k)
if target_mapping is not None:
word_emb_q = self.mask_emb.expand(target_mapping.shape[0], bsz, -1)
output_g = self.dropout(word_emb_q)
else:
output_g = None
if token_type_ids is not None:
mem_pad = torch.zeros([mlen, bsz], dtype=torch.long, device=device)
cat_ids = torch.cat([mem_pad, token_type_ids], dim=0)
seg_mat = (token_type_ids[:, None] != cat_ids[None, :]).long()
seg_mat = F.one_hot(seg_mat, num_classes=2).to(dtype_float)
else:
seg_mat = None
freq_seq = torch.arange(0, self.d_model, 2.0, dtype=torch.float)
inv_freq = 1 / torch.pow(10000, freq_seq / self.d_model)
if self.attn_type == 'bi':
(beg, end) = (klen, -qlen)
elif self.attn_type == 'uni':
(beg, end) = (klen, -1)
else:
raise ValueError('Unknown `attn_type` {}.'.format(self.attn_type))
if self.bi_data:
fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.float)
bwd_pos_seq = torch.arange(-beg, -end, 1.0, dtype=torch.float)
if self.clamp_len > 0:
fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
bwd_pos_seq = bwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
if bsz is not None:
fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz // 2)
bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz // 2)
else:
fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq)
bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq)
pos_emb = torch.cat([fwd_pos_emb, bwd_pos_emb], dim=1)
else:
fwd_pos_seq = torch.arange(beg, end, -1.0)
if self.clamp_len > 0:
fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz)
pos_emb = pos_emb.to(next(self.parameters()))
pos_emb = pos_emb
pos_emb = self.dropout(pos_emb)
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
head_mask = head_mask.to(dtype=next(self.parameters()).dtype)
else:
head_mask = [None] * self.n_layer
new_mems = ()
if mems is None:
mems = [None] * len(self.layer)
attentions = []
hidden_states = []
for (i, layer_module) in enumerate(self.layer):
new_mems = new_mems + (self.cache_mem(output_h, mems[i]),)
if self.output_hidden_states:
hidden_states.append((output_h, output_g) if output_g is not None else output_h)
outputs = layer_module(output_h, output_g, attn_mask_h=non_tgt_mask, attn_mask_g=attn_mask, r=pos_emb, seg_mat=seg_mat, mems=mems[i], target_mapping=target_mapping, head_mask=head_mask[i])
(output_h, output_g) = outputs[:2]
if self.output_attentions:
attentions.append(outputs[2])
if self.output_hidden_states:
hidden_states.append((output_h, output_g) if output_g is not None else output_h)
output = self.dropout(output_g if output_g is not None else output_h)
outputs = (output.permute(1, 0, 2).contiguous(), new_mems)
if self.output_hidden_states:
if output_g is not None:
hidden_states = tuple((h.permute(1, 0, 2).contiguous() for hs in hidden_states for h in hs))
else:
hidden_states = tuple((hs.permute(1, 0, 2).contiguous() for hs in hidden_states))
outputs = outputs + (hidden_states,)
if self.output_attentions:
attentions = tuple((t.permute(2, 3, 0, 1).contiguous() for t in attentions))
outputs = outputs + (attentions,)
return outputs
|
DIAC2019-DQD-Based-on-Adversarial-Attack
|
positive
|
def get_kitti_info_path(idx, prefix, info_type='image_2', file_tail='.png', training=True, relative_path=True):
<DeepExtract>
img_idx_str = '{:06d}'.format(idx)
</DeepExtract>
img_idx_str += file_tail
prefix = pathlib.Path(prefix)
if training:
file_path = pathlib.Path('training') / info_type / img_idx_str
else:
file_path = pathlib.Path('testing') / info_type / img_idx_str
if not (prefix / file_path).exists():
raise ValueError('file not exist: {}'.format(file_path))
if relative_path:
return str(file_path)
else:
return str(prefix / file_path)
|
def get_kitti_info_path(idx, prefix, info_type='image_2', file_tail='.png', training=True, relative_path=True):
img_idx_str = '{:06d}'.format(idx)
img_idx_str += file_tail
prefix = pathlib.Path(prefix)
if training:
file_path = pathlib.Path('training') / info_type / img_idx_str
else:
file_path = pathlib.Path('testing') / info_type / img_idx_str
if not (prefix / file_path).exists():
raise ValueError('file not exist: {}'.format(file_path))
if relative_path:
return str(file_path)
else:
return str(prefix / file_path)
|
CenterPoint-KITTI
|
positive
|
def train(self):
"""
Launches training and stores checkpoints at a frequency defined within params
:return: None
"""
image_paths = get_image_paths(self.params, isTest=False)
if self.params.BLOCK_IMAGES:
train_data = get_train_data(image_paths, self.params)
else:
train_data = []
for imgPath in image_paths:
train_data.append((imgPath, 0, 0))
<DeepExtract>
idx = np.random.permutation(len(train_data))
while True:
batchInds = get_batch_inds(idx, self.params)
for inds in batchInds:
(imgBatch, labelBatch) = load_batch(inds, train_data, self.params, self.mode, self.meanVals)
yield (imgBatch, labelBatch)
</DeepExtract>
<DeepExtract>
if self.params.BLOCK_IMAGES:
imgSz = self.params.BLOCK_SZ
else:
imgSz = self.params.IMG_SZ
input_tensor = Input(shape=(imgSz[0], imgSz[1], self.params.NUM_CHANNELS))
input_shape = (imgSz[0], imgSz[1], self.params.NUM_CHANNELS)
if self.params.CONTINUE_TRAINING:
model = self.get_model(None, input_tensor, input_shape)
if self.mode == self.params.SINGLEVIEW_MODE:
print('Continuing trainining from %s' % self.params.CONTINUE_SINGLEVIEW_MODEL_FILE)
model.load_weights(self.params.CONTINUE_SINGLEVIEW_MODEL_FILE)
elif self.mode == self.params.SEMANTIC_MODE:
print('Continuing trainining from %s' % self.params.CONTINUE_SEMANTIC_MODEL_FILE)
model.load_weights(self.params.CONTINUE_SEMANTIC_MODEL_FILE)
else:
if self.params.NUM_CHANNELS > 3:
input_tensor_rgb = Input(shape=(imgSz[0], imgSz[1], 3))
input_shape_rgb = (imgSz[0], imgSz[1], 3)
model = self.get_model(None, input_tensor, input_shape)
if self.params.ENCODER_WEIGHTS is not None:
baseModel = self.get_model(self.params.ENCODER_WEIGHTS, input_tensor_rgb, input_shape_rgb)
print('Copying %s weights to %d-band model' % (self.params.ENCODER_WEIGHTS, self.params.NUM_CHANNELS))
for i in tqdm(range(len(baseModel.layers))):
if i >= 7:
model.layers[i].set_weights(baseModel.layers[i].get_weights())
else:
model = self.get_model(self.params.ENCODER_WEIGHTS, input_tensor, input_shape)
loss = self.params.SEMANTIC_LOSS
if self.mode == self.params.SINGLEVIEW_MODE:
loss = self.no_nan_mse
model.compile(self.params.OPTIMIZER, loss=loss)
model = model
</DeepExtract>
model.summary()
checkpoint = ModelCheckpoint(filepath=self.params.CHECKPOINT_PATH, monitor='loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=self.params.MODEL_SAVE_PERIOD)
if len(train_data) <= 0:
raise ValueError('No training data found. Update params.py accordingly')
model.fit_generator(train_datagen, steps_per_epoch=int(len(train_data) / self.params.BATCH_SZ), epochs=self.params.NUM_EPOCHS, callbacks=[checkpoint])
|
def train(self):
"""
Launches training and stores checkpoints at a frequency defined within params
:return: None
"""
image_paths = get_image_paths(self.params, isTest=False)
if self.params.BLOCK_IMAGES:
train_data = get_train_data(image_paths, self.params)
else:
train_data = []
for imgPath in image_paths:
train_data.append((imgPath, 0, 0))
idx = np.random.permutation(len(train_data))
while True:
batchInds = get_batch_inds(idx, self.params)
for inds in batchInds:
(imgBatch, labelBatch) = load_batch(inds, train_data, self.params, self.mode, self.meanVals)
yield (imgBatch, labelBatch)
if self.params.BLOCK_IMAGES:
imgSz = self.params.BLOCK_SZ
else:
imgSz = self.params.IMG_SZ
input_tensor = Input(shape=(imgSz[0], imgSz[1], self.params.NUM_CHANNELS))
input_shape = (imgSz[0], imgSz[1], self.params.NUM_CHANNELS)
if self.params.CONTINUE_TRAINING:
model = self.get_model(None, input_tensor, input_shape)
if self.mode == self.params.SINGLEVIEW_MODE:
print('Continuing trainining from %s' % self.params.CONTINUE_SINGLEVIEW_MODEL_FILE)
model.load_weights(self.params.CONTINUE_SINGLEVIEW_MODEL_FILE)
elif self.mode == self.params.SEMANTIC_MODE:
print('Continuing trainining from %s' % self.params.CONTINUE_SEMANTIC_MODEL_FILE)
model.load_weights(self.params.CONTINUE_SEMANTIC_MODEL_FILE)
else:
if self.params.NUM_CHANNELS > 3:
input_tensor_rgb = Input(shape=(imgSz[0], imgSz[1], 3))
input_shape_rgb = (imgSz[0], imgSz[1], 3)
model = self.get_model(None, input_tensor, input_shape)
if self.params.ENCODER_WEIGHTS is not None:
baseModel = self.get_model(self.params.ENCODER_WEIGHTS, input_tensor_rgb, input_shape_rgb)
print('Copying %s weights to %d-band model' % (self.params.ENCODER_WEIGHTS, self.params.NUM_CHANNELS))
for i in tqdm(range(len(baseModel.layers))):
if i >= 7:
model.layers[i].set_weights(baseModel.layers[i].get_weights())
else:
model = self.get_model(self.params.ENCODER_WEIGHTS, input_tensor, input_shape)
loss = self.params.SEMANTIC_LOSS
if self.mode == self.params.SINGLEVIEW_MODE:
loss = self.no_nan_mse
model.compile(self.params.OPTIMIZER, loss=loss)
model = model
model.summary()
checkpoint = ModelCheckpoint(filepath=self.params.CHECKPOINT_PATH, monitor='loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=self.params.MODEL_SAVE_PERIOD)
if len(train_data) <= 0:
raise ValueError('No training data found. Update params.py accordingly')
model.fit_generator(train_datagen, steps_per_epoch=int(len(train_data) / self.params.BATCH_SZ), epochs=self.params.NUM_EPOCHS, callbacks=[checkpoint])
|
dfc2019
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.