before
stringlengths
0
955k
after
stringlengths
0
877k
repo
stringlengths
1
74
type
stringclasses
1 value
def __init__(self, num_layers, heads, head_convs, _): assert head_convs['hm'][0] in [64, 256] super(PoseResDCN, self).__init__(heads, head_convs, 1, head_convs['hm'][0], opt=_) (block, layers) = resnet_spec[num_layers] self.inplanes = 64 self.deconv_with_bias = False self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) <DeepExtract> downsample = None if stride != 1 or self.inplanes != 64 * block.expansion: downsample = nn.Sequential(nn.Conv2d(self.inplanes, 64 * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(64 * block.expansion, momentum=BN_MOMENTUM)) layers = [] layers.append(block(self.inplanes, 64, stride, downsample)) self.inplanes = 64 * block.expansion for i in range(1, layers[0]): layers.append(block(self.inplanes, 64)) self.layer1 = nn.Sequential(*layers) </DeepExtract> <DeepExtract> downsample = None if 2 != 1 or self.inplanes != 128 * block.expansion: downsample = nn.Sequential(nn.Conv2d(self.inplanes, 128 * block.expansion, kernel_size=1, stride=2, bias=False), nn.BatchNorm2d(128 * block.expansion, momentum=BN_MOMENTUM)) layers = [] layers.append(block(self.inplanes, 128, 2, downsample)) self.inplanes = 128 * block.expansion for i in range(1, layers[1]): layers.append(block(self.inplanes, 128)) self.layer2 = nn.Sequential(*layers) </DeepExtract> <DeepExtract> downsample = None if 2 != 1 or self.inplanes != 256 * block.expansion: downsample = nn.Sequential(nn.Conv2d(self.inplanes, 256 * block.expansion, kernel_size=1, stride=2, bias=False), nn.BatchNorm2d(256 * block.expansion, momentum=BN_MOMENTUM)) layers = [] layers.append(block(self.inplanes, 256, 2, downsample)) self.inplanes = 256 * block.expansion for i in range(1, layers[2]): layers.append(block(self.inplanes, 256)) self.layer3 = nn.Sequential(*layers) </DeepExtract> <DeepExtract> downsample = None if 2 != 1 or self.inplanes != 512 * block.expansion: downsample = nn.Sequential(nn.Conv2d(self.inplanes, 512 * block.expansion, kernel_size=1, stride=2, bias=False), nn.BatchNorm2d(512 * block.expansion, momentum=BN_MOMENTUM)) layers = [] layers.append(block(self.inplanes, 512, 2, downsample)) self.inplanes = 512 * block.expansion for i in range(1, layers[3]): layers.append(block(self.inplanes, 512)) self.layer4 = nn.Sequential(*layers) </DeepExtract> if head_convs['hm'][0] == 64: print('Using slimed resnet: 256 128 64 up channels.') <DeepExtract> assert 3 == len([256, 128, 64]), 'ERROR: num_deconv_layers is different len(num_deconv_filters)' assert 3 == len([4, 4, 4]), 'ERROR: num_deconv_layers is different len(num_deconv_filters)' layers = [] for i in range(3): (kernel, padding, output_padding) = self._get_deconv_cfg([4, 4, 4][i], i) planes = [256, 128, 64][i] fc = DCN(self.inplanes, planes, kernel_size=(3, 3), stride=1, padding=1, dilation=1, deformable_groups=1) up = nn.ConvTranspose2d(in_channels=planes, out_channels=planes, kernel_size=kernel, stride=2, padding=padding, output_padding=output_padding, bias=self.deconv_with_bias) fill_up_weights(up) layers.append(fc) layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)) layers.append(nn.ReLU(inplace=True)) layers.append(up) layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)) layers.append(nn.ReLU(inplace=True)) self.inplanes = planes self.deconv_layers = nn.Sequential(*layers) </DeepExtract> else: print('Using original resnet: 256 256 256 up channels.') print('Using 256 deconvs') <DeepExtract> assert 3 == len([256, 256, 256]), 'ERROR: num_deconv_layers is different len(num_deconv_filters)' assert 3 == len([4, 4, 4]), 'ERROR: num_deconv_layers is different len(num_deconv_filters)' layers = [] for i in range(3): (kernel, padding, output_padding) = self._get_deconv_cfg([4, 4, 4][i], i) planes = [256, 256, 256][i] fc = DCN(self.inplanes, planes, kernel_size=(3, 3), stride=1, padding=1, dilation=1, deformable_groups=1) up = nn.ConvTranspose2d(in_channels=planes, out_channels=planes, kernel_size=kernel, stride=2, padding=padding, output_padding=output_padding, bias=self.deconv_with_bias) fill_up_weights(up) layers.append(fc) layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)) layers.append(nn.ReLU(inplace=True)) layers.append(up) layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)) layers.append(nn.ReLU(inplace=True)) self.inplanes = planes self.deconv_layers = nn.Sequential(*layers) </DeepExtract> <DeepExtract> url = model_urls['resnet{}'.format(num_layers)] pretrained_state_dict = model_zoo.load_url(url) print('=> loading pretrained model {}'.format(url)) self.load_state_dict(pretrained_state_dict, strict=False) if _.rgb: print('shuffle ImageNet pretrained model from RGB to BGR') (self.base.base_layer[0].weight.data[:, 0], self.base.base_layer[0].weight.data[:, 2]) = (self.base.base_layer[0].weight.data[:, 2].clone(), self.base.base_layer[0].weight.data[:, 0].clone()) print('=> init deconv weights from normal distribution') for (name, m) in self.deconv_layers.named_modules(): if isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) </DeepExtract>
def __init__(self, num_layers, heads, head_convs, _): assert head_convs['hm'][0] in [64, 256] super(PoseResDCN, self).__init__(heads, head_convs, 1, head_convs['hm'][0], opt=_) (block, layers) = resnet_spec[num_layers] self.inplanes = 64 self.deconv_with_bias = False self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) downsample = None if stride != 1 or self.inplanes != 64 * block.expansion: downsample = nn.Sequential(nn.Conv2d(self.inplanes, 64 * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(64 * block.expansion, momentum=BN_MOMENTUM)) layers = [] layers.append(block(self.inplanes, 64, stride, downsample)) self.inplanes = 64 * block.expansion for i in range(1, layers[0]): layers.append(block(self.inplanes, 64)) self.layer1 = nn.Sequential(*layers) downsample = None if 2 != 1 or self.inplanes != 128 * block.expansion: downsample = nn.Sequential(nn.Conv2d(self.inplanes, 128 * block.expansion, kernel_size=1, stride=2, bias=False), nn.BatchNorm2d(128 * block.expansion, momentum=BN_MOMENTUM)) layers = [] layers.append(block(self.inplanes, 128, 2, downsample)) self.inplanes = 128 * block.expansion for i in range(1, layers[1]): layers.append(block(self.inplanes, 128)) self.layer2 = nn.Sequential(*layers) downsample = None if 2 != 1 or self.inplanes != 256 * block.expansion: downsample = nn.Sequential(nn.Conv2d(self.inplanes, 256 * block.expansion, kernel_size=1, stride=2, bias=False), nn.BatchNorm2d(256 * block.expansion, momentum=BN_MOMENTUM)) layers = [] layers.append(block(self.inplanes, 256, 2, downsample)) self.inplanes = 256 * block.expansion for i in range(1, layers[2]): layers.append(block(self.inplanes, 256)) self.layer3 = nn.Sequential(*layers) downsample = None if 2 != 1 or self.inplanes != 512 * block.expansion: downsample = nn.Sequential(nn.Conv2d(self.inplanes, 512 * block.expansion, kernel_size=1, stride=2, bias=False), nn.BatchNorm2d(512 * block.expansion, momentum=BN_MOMENTUM)) layers = [] layers.append(block(self.inplanes, 512, 2, downsample)) self.inplanes = 512 * block.expansion for i in range(1, layers[3]): layers.append(block(self.inplanes, 512)) self.layer4 = nn.Sequential(*layers) if head_convs['hm'][0] == 64: print('Using slimed resnet: 256 128 64 up channels.') assert 3 == len([256, 128, 64]), 'ERROR: num_deconv_layers is different len(num_deconv_filters)' assert 3 == len([4, 4, 4]), 'ERROR: num_deconv_layers is different len(num_deconv_filters)' layers = [] for i in range(3): (kernel, padding, output_padding) = self._get_deconv_cfg([4, 4, 4][i], i) planes = [256, 128, 64][i] fc = DCN(self.inplanes, planes, kernel_size=(3, 3), stride=1, padding=1, dilation=1, deformable_groups=1) up = nn.ConvTranspose2d(in_channels=planes, out_channels=planes, kernel_size=kernel, stride=2, padding=padding, output_padding=output_padding, bias=self.deconv_with_bias) fill_up_weights(up) layers.append(fc) layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)) layers.append(nn.ReLU(inplace=True)) layers.append(up) layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)) layers.append(nn.ReLU(inplace=True)) self.inplanes = planes self.deconv_layers = nn.Sequential(*layers) else: print('Using original resnet: 256 256 256 up channels.') print('Using 256 deconvs') assert 3 == len([256, 256, 256]), 'ERROR: num_deconv_layers is different len(num_deconv_filters)' assert 3 == len([4, 4, 4]), 'ERROR: num_deconv_layers is different len(num_deconv_filters)' layers = [] for i in range(3): (kernel, padding, output_padding) = self._get_deconv_cfg([4, 4, 4][i], i) planes = [256, 256, 256][i] fc = DCN(self.inplanes, planes, kernel_size=(3, 3), stride=1, padding=1, dilation=1, deformable_groups=1) up = nn.ConvTranspose2d(in_channels=planes, out_channels=planes, kernel_size=kernel, stride=2, padding=padding, output_padding=output_padding, bias=self.deconv_with_bias) fill_up_weights(up) layers.append(fc) layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)) layers.append(nn.ReLU(inplace=True)) layers.append(up) layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)) layers.append(nn.ReLU(inplace=True)) self.inplanes = planes self.deconv_layers = nn.Sequential(*layers) url = model_urls['resnet{}'.format(num_layers)] pretrained_state_dict = model_zoo.load_url(url) print('=> loading pretrained model {}'.format(url)) self.load_state_dict(pretrained_state_dict, strict=False) if _.rgb: print('shuffle ImageNet pretrained model from RGB to BGR') (self.base.base_layer[0].weight.data[:, 0], self.base.base_layer[0].weight.data[:, 2]) = (self.base.base_layer[0].weight.data[:, 2].clone(), self.base.base_layer[0].weight.data[:, 0].clone()) print('=> init deconv weights from normal distribution') for (name, m) in self.deconv_layers.named_modules(): if isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) </DeepExtract>
CenterFusion
positive
def taxonomy_hierarchy(tid, verbose=False): """ Get the taxonomical hierarchy for a tax id. Yields so you can call this in a while loop Note we just yield the id :param tid: taxonomy ID :param verbose: More output """ global data global conn while tid != 1: if tid == 1: if verbose: sys.stderr.write(f'{bcolors.RED}Taxonomy ID 1 found{bcolors.ENDC}\n') return if not tid: if verbose: sys.stderr.write(f'{bcolors.RED}No tid{bcolors.ENDC}\n') return if tid not in data['node']: try: <DeepExtract> global data cur = conn.cursor() if tid in data['node']: return (data['node'][tid], data['name'][tid]) cur.execute('select * from nodes where tax_id = ?', [tid]) p = cur.fetchone() if not p: cur.execute('select new_tax_id from merged where old_tax_id = ?', [tid]) newid = cur.fetchone() if newid and newid[0]: cur.execute('select * from nodes where tax_id = ?', [newid[0]]) p = cur.fetchone() else: cur.execute('select tax_id from deleted where tax_id = ?', [tid]) newid = cur.fetchone() if newid and newid[0]: sys.stderr.write(f'{bcolors.PINK}Node {tid} has been deleted\n') return (TaxonNode(t=-1, p=1), TaxonName(t=-1, u='Deleted')) raise EntryNotInDatabaseError(f'ERROR: {tid} is not in the database and not merged\n') t = TaxonNode(*p) data['node'][tid] = t cur.execute('select * from names where tax_id = ?', [tid]) n = TaxonName(tid) for p in cur.fetchall(): if p[2]: n.unique = p[2] n.set_name(p[3], p[1]) data['name'][tid] = n return (t, n) </DeepExtract> except EntryNotInDatabaseError: if verbose: sys.stderr.write(f'{bcolors.RED}{tid} is not in database. Can not continue{bcolors.ENDC}\n') return if verbose: sys.stderr.write(f"{bcolors.GREEN}tid: {tid} parent: {data['node'][tid].parent}{bcolors.ENDC}\n") yield data['node'][tid].parent tid = data['node'][tid].parent
def taxonomy_hierarchy(tid, verbose=False): """ Get the taxonomical hierarchy for a tax id. Yields so you can call this in a while loop Note we just yield the id :param tid: taxonomy ID :param verbose: More output """ global data global conn while tid != 1: if tid == 1: if verbose: sys.stderr.write(f'{bcolors.RED}Taxonomy ID 1 found{bcolors.ENDC}\n') return if not tid: if verbose: sys.stderr.write(f'{bcolors.RED}No tid{bcolors.ENDC}\n') return if tid not in data['node']: try: global data cur = conn.cursor() if tid in data['node']: return (data['node'][tid], data['name'][tid]) cur.execute('select * from nodes where tax_id = ?', [tid]) p = cur.fetchone() if not p: cur.execute('select new_tax_id from merged where old_tax_id = ?', [tid]) newid = cur.fetchone() if newid and newid[0]: cur.execute('select * from nodes where tax_id = ?', [newid[0]]) p = cur.fetchone() else: cur.execute('select tax_id from deleted where tax_id = ?', [tid]) newid = cur.fetchone() if newid and newid[0]: sys.stderr.write(f'{bcolors.PINK}Node {tid} has been deleted\n') return (TaxonNode(t=-1, p=1), TaxonName(t=-1, u='Deleted')) raise EntryNotInDatabaseError(f'ERROR: {tid} is not in the database and not merged\n') t = TaxonNode(*p) data['node'][tid] = t cur.execute('select * from names where tax_id = ?', [tid]) n = TaxonName(tid) for p in cur.fetchall(): if p[2]: n.unique = p[2] n.set_name(p[3], p[1]) data['name'][tid] = n return (t, n) except EntryNotInDatabaseError: if verbose: sys.stderr.write(f'{bcolors.RED}{tid} is not in database. Can not continue{bcolors.ENDC}\n') return if verbose: sys.stderr.write(f"{bcolors.GREEN}tid: {tid} parent: {data['node'][tid].parent}{bcolors.ENDC}\n") yield data['node'][tid].parent tid = data['node'][tid].parent
EdwardsLab
positive
def visit_line(self, node): if len(node.astext()) == 0: self.sp() <DeepExtract> if self.vspace == 1999: self.vspace = 0 if self.vspace: self.nl() self.cmd('\\vs{%d}\n' % self.vspace) self.vspace = 0 self.noindent() </DeepExtract> else: <DeepExtract> self.context.append('{') self.last_output_char = '{' </DeepExtract> <DeepExtract> if isinstance('\\vl ', six.string_types): '\\vl ' = ['\\vl '] for c in '\\vl ': if c: self.context.append(c) self.last_output_char = c[-1] </DeepExtract>
def visit_line(self, node): if len(node.astext()) == 0: self.sp() if self.vspace == 1999: self.vspace = 0 if self.vspace: self.nl() self.cmd('\\vs{%d}\n' % self.vspace) self.vspace = 0 self.noindent() else: self.context.append('{') self.last_output_char = '{' if isinstance('\\vl ', six.string_types): '\\vl ' = ['\\vl '] for c in '\\vl ': if c: self.context.append(c) self.last_output_char = c[-1] </DeepExtract>
ebookmaker
positive
def get_histogram(data): """Return the histogram relative to the given data Assume that the data are already sorted """ count = len(data) if count < 2: raise StatisticsError('Too few data points ({}) for get_histogram'.format(count)) min_ = data[0] max_ = data[-1] <DeepExtract> var = variance(data, xbar) try: std = var.sqrt() except AttributeError: std = math.sqrt(var) </DeepExtract> <DeepExtract> width = _get_bin_width(std, count) count = int(round((max_ - min_) / width) + 1) if count: bins = [i * width + min_ for i in xrange(1, count + 1)] else: bins = [min_] bins = bins </DeepExtract> res = {x: 0 for x in bins} for value in data: for bin_ in bins: if value <= bin_: res[bin_] += 1 break return sorted(iteritems(res))
def get_histogram(data): """Return the histogram relative to the given data Assume that the data are already sorted """ count = len(data) if count < 2: raise StatisticsError('Too few data points ({}) for get_histogram'.format(count)) min_ = data[0] max_ = data[-1] var = variance(data, xbar) try: std = var.sqrt() except AttributeError: std = math.sqrt(var) width = _get_bin_width(std, count) count = int(round((max_ - min_) / width) + 1) if count: bins = [i * width + min_ for i in xrange(1, count + 1)] else: bins = [min_] bins = bins res = {x: 0 for x in bins} for value in data: for bin_ in bins: if value <= bin_: res[bin_] += 1 break return sorted(iteritems(res))
appmetrics
positive
def _ord_6(self, q1, p1, q2, p2, delta): """ Order 6 Integration Scheme References ---------- .. [1] Yoshida, Haruo, "Construction of higher order symplectic integrators"; Physics Letters A, vol. 150, no. 5-7, pp. 262-268, 1990. `DOI: <https://doi.org/10.1016/0375-9601(90)90092-3>`__ """ dl = delta (Z0, Z1) = _Z(self.order) <DeepExtract> dl = dl * Z1 (Z0, Z1) = _Z(self.order) step1 = self._ord_2(q1, p1, q2, p2, dl * Z1) step2 = self._ord_2(step1[0], step1[1], step1[2], step1[3], dl * Z0) step3 = self._ord_2(step2[0], step2[1], step2[2], step2[3], dl * Z1) step1 = step3 </DeepExtract> <DeepExtract> dl = dl * Z0 (Z0, Z1) = _Z(self.order) step1 = self._ord_2(step1[0], step1[1], step1[2], step1[3], dl * Z1) step2 = self._ord_2(step1[0], step1[1], step1[2], step1[3], dl * Z0) step3 = self._ord_2(step2[0], step2[1], step2[2], step2[3], dl * Z1) step2 = step3 </DeepExtract> <DeepExtract> dl = dl * Z1 (Z0, Z1) = _Z(self.order) step1 = self._ord_2(step2[0], step2[1], step2[2], step2[3], dl * Z1) step2 = self._ord_2(step1[0], step1[1], step1[2], step1[3], dl * Z0) step3 = self._ord_2(step2[0], step2[1], step2[2], step2[3], dl * Z1) step3 = step3 </DeepExtract> return step3
def _ord_6(self, q1, p1, q2, p2, delta): """ Order 6 Integration Scheme References ---------- .. [1] Yoshida, Haruo, "Construction of higher order symplectic integrators"; Physics Letters A, vol. 150, no. 5-7, pp. 262-268, 1990. `DOI: <https://doi.org/10.1016/0375-9601(90)90092-3>`__ """ dl = delta (Z0, Z1) = _Z(self.order) dl = dl * Z1 (Z0, Z1) = _Z(self.order) step1 = self._ord_2(q1, p1, q2, p2, dl * Z1) step2 = self._ord_2(step1[0], step1[1], step1[2], step1[3], dl * Z0) step3 = self._ord_2(step2[0], step2[1], step2[2], step2[3], dl * Z1) step1 = step3 dl = dl * Z0 (Z0, Z1) = _Z(self.order) step1 = self._ord_2(step1[0], step1[1], step1[2], step1[3], dl * Z1) step2 = self._ord_2(step1[0], step1[1], step1[2], step1[3], dl * Z0) step3 = self._ord_2(step2[0], step2[1], step2[2], step2[3], dl * Z1) step2 = step3 dl = dl * Z1 (Z0, Z1) = _Z(self.order) step1 = self._ord_2(step2[0], step2[1], step2[2], step2[3], dl * Z1) step2 = self._ord_2(step1[0], step1[1], step1[2], step1[3], dl * Z0) step3 = self._ord_2(step2[0], step2[1], step2[2], step2[3], dl * Z1) step3 = step3 return step3
einsteinpy
positive
def main(): loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict] for logger in loggers: if 'cosypose' in logger.name: logger.setLevel(logging.DEBUG) parser = argparse.ArgumentParser('Evaluation') parser.add_argument('--debug', action='store_true') parser.add_argument('--skip_predictions', action='store_true') parser.add_argument('--comment', default='', type=str) parser.add_argument('--id', default=-1, type=int) parser.add_argument('--config', default='', type=str) parser.add_argument('--models', default='', type=str) args = parser.parse_args() init_distributed_mode() cfg = argparse.ArgumentParser('').parse_args([]) cfg.n_workers = 8 cfg.pred_bsz = 8 cfg.eval_bsz = 8 cfg.n_frames = None cfg.skip_evaluation = False cfg.skip_model_predictions = args.skip_predictions cfg.external_predictions = True cfg.detector = None if args.debug: cfg.n_frames = 10 if args.config == 'bop': ds_names = ['itodd.val', 'hb.val'] else: raise ValueError detector_run_ids = {'ycbv.bop19': 'ycbv--377940', 'hb.val': 'detector-bop-hb--497808', 'itodd.val': 'detector-bop-itodd--509908'} if args.id < 0: n_rand = np.random.randint(1000000.0) args.id = n_rand save_dir = RESULTS_DIR / f'{args.config}-{args.models}-{args.comment}-{args.id}' logger.info(f'Save dir: {save_dir}') for ds_name in ds_names: this_cfg = deepcopy(cfg) this_cfg.ds_name = ds_name this_cfg.save_dir = save_dir / f'dataset={ds_name}' logger.info(f'DATASET: {ds_name}') if ds_name in detector_run_ids: this_cfg.detector_run_id = detector_run_ids[ds_name] else: this_cfg.skip_model_predictions = True logger.info(f'No model provided for dataset: {ds_name}.') <DeepExtract> logger.info(f"{'-' * 80}") for (k, v) in this_cfg.__dict__.items(): logger.info(f'{k}: {v}') logger.info(f"{'-' * 80}") scene_ds = make_scene_dataset(this_cfg.ds_name, n_frames=this_cfg.n_frames) pred_kwargs = dict() pred_runner = DetectionRunner(scene_ds, batch_size=this_cfg.pred_bsz, cache_data=len(pred_kwargs) > 1, n_workers=this_cfg.n_workers) if not this_cfg.skip_model_predictions: if detector is not None: model = detector else: model = load_detector(this_cfg.detector_run_id) pred_kwargs.update({'model': dict(detector=model, gt_detections=False)}) all_predictions = dict() if this_cfg.external_predictions: if 'ycbv' in this_cfg.ds_name: all_predictions['posecnn'] = load_posecnn_results().cpu() elif 'tless' in this_cfg.ds_name: all_predictions['retinanet/pix2pose'] = load_pix2pose_results(all_detections=True).cpu() else: pass for (pred_prefix, pred_kwargs_n) in pred_kwargs.items(): logger.info(f'Prediction: {pred_prefix}') preds = pred_runner.get_predictions(**pred_kwargs_n) for (preds_name, preds_n) in preds.items(): all_predictions[f'{pred_prefix}/{preds_name}'] = preds_n logger.info('Done with predictions') torch.distributed.barrier() meters = get_meters(scene_ds) logger.info(f'Meters: {meters}') eval_runner = DetectionEvaluation(scene_ds, meters, batch_size=this_cfg.eval_bsz, cache_data=len(all_predictions) > 1, n_workers=this_cfg.n_workers, sampler=pred_runner.sampler) (eval_metrics, eval_dfs) = (dict(), dict()) if not this_cfg.skip_evaluation: for (preds_k, preds) in all_predictions.items(): do_eval = True if do_eval: logger.info(f'Evaluation of predictions: {preds_k}') if len(preds) == 0: preds = eval_runner.make_empty_predictions() (eval_metrics[preds_k], eval_dfs[preds_k]) = eval_runner.evaluate(preds) else: logger.info(f'Skipped: {preds_k}') for (k, v) in all_predictions.items(): all_predictions[k] = v.gather_distributed(tmp_dir=get_tmp_dir()).cpu() results = None if get_rank() == 0: save_dir = Path(this_cfg.save_dir) save_dir.mkdir(exist_ok=True, parents=True) logger.info(f'Finished evaluation on {this_cfg.ds_name}') results = format_results(all_predictions, eval_metrics, eval_dfs) torch.save(results, save_dir / 'results.pth.tar') (save_dir / 'summary.txt').write_text(results.get('summary_txt', '')) (save_dir / 'config.yaml').write_text(yaml.dump(this_cfg)) logger.info(f'Saved predictions+metrics in {save_dir}') logger.info('Done with evaluation') torch.distributed.barrier() return results </DeepExtract> logger.info('')
def main(): loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict] for logger in loggers: if 'cosypose' in logger.name: logger.setLevel(logging.DEBUG) parser = argparse.ArgumentParser('Evaluation') parser.add_argument('--debug', action='store_true') parser.add_argument('--skip_predictions', action='store_true') parser.add_argument('--comment', default='', type=str) parser.add_argument('--id', default=-1, type=int) parser.add_argument('--config', default='', type=str) parser.add_argument('--models', default='', type=str) args = parser.parse_args() init_distributed_mode() cfg = argparse.ArgumentParser('').parse_args([]) cfg.n_workers = 8 cfg.pred_bsz = 8 cfg.eval_bsz = 8 cfg.n_frames = None cfg.skip_evaluation = False cfg.skip_model_predictions = args.skip_predictions cfg.external_predictions = True cfg.detector = None if args.debug: cfg.n_frames = 10 if args.config == 'bop': ds_names = ['itodd.val', 'hb.val'] else: raise ValueError detector_run_ids = {'ycbv.bop19': 'ycbv--377940', 'hb.val': 'detector-bop-hb--497808', 'itodd.val': 'detector-bop-itodd--509908'} if args.id < 0: n_rand = np.random.randint(1000000.0) args.id = n_rand save_dir = RESULTS_DIR / f'{args.config}-{args.models}-{args.comment}-{args.id}' logger.info(f'Save dir: {save_dir}') for ds_name in ds_names: this_cfg = deepcopy(cfg) this_cfg.ds_name = ds_name this_cfg.save_dir = save_dir / f'dataset={ds_name}' logger.info(f'DATASET: {ds_name}') if ds_name in detector_run_ids: this_cfg.detector_run_id = detector_run_ids[ds_name] else: this_cfg.skip_model_predictions = True logger.info(f'No model provided for dataset: {ds_name}.') logger.info(f"{'-' * 80}") for (k, v) in this_cfg.__dict__.items(): logger.info(f'{k}: {v}') logger.info(f"{'-' * 80}") scene_ds = make_scene_dataset(this_cfg.ds_name, n_frames=this_cfg.n_frames) pred_kwargs = dict() pred_runner = DetectionRunner(scene_ds, batch_size=this_cfg.pred_bsz, cache_data=len(pred_kwargs) > 1, n_workers=this_cfg.n_workers) if not this_cfg.skip_model_predictions: if detector is not None: model = detector else: model = load_detector(this_cfg.detector_run_id) pred_kwargs.update({'model': dict(detector=model, gt_detections=False)}) all_predictions = dict() if this_cfg.external_predictions: if 'ycbv' in this_cfg.ds_name: all_predictions['posecnn'] = load_posecnn_results().cpu() elif 'tless' in this_cfg.ds_name: all_predictions['retinanet/pix2pose'] = load_pix2pose_results(all_detections=True).cpu() else: pass for (pred_prefix, pred_kwargs_n) in pred_kwargs.items(): logger.info(f'Prediction: {pred_prefix}') preds = pred_runner.get_predictions(**pred_kwargs_n) for (preds_name, preds_n) in preds.items(): all_predictions[f'{pred_prefix}/{preds_name}'] = preds_n logger.info('Done with predictions') torch.distributed.barrier() meters = get_meters(scene_ds) logger.info(f'Meters: {meters}') eval_runner = DetectionEvaluation(scene_ds, meters, batch_size=this_cfg.eval_bsz, cache_data=len(all_predictions) > 1, n_workers=this_cfg.n_workers, sampler=pred_runner.sampler) (eval_metrics, eval_dfs) = (dict(), dict()) if not this_cfg.skip_evaluation: for (preds_k, preds) in all_predictions.items(): do_eval = True if do_eval: logger.info(f'Evaluation of predictions: {preds_k}') if len(preds) == 0: preds = eval_runner.make_empty_predictions() (eval_metrics[preds_k], eval_dfs[preds_k]) = eval_runner.evaluate(preds) else: logger.info(f'Skipped: {preds_k}') for (k, v) in all_predictions.items(): all_predictions[k] = v.gather_distributed(tmp_dir=get_tmp_dir()).cpu() results = None if get_rank() == 0: save_dir = Path(this_cfg.save_dir) save_dir.mkdir(exist_ok=True, parents=True) logger.info(f'Finished evaluation on {this_cfg.ds_name}') results = format_results(all_predictions, eval_metrics, eval_dfs) torch.save(results, save_dir / 'results.pth.tar') (save_dir / 'summary.txt').write_text(results.get('summary_txt', '')) (save_dir / 'config.yaml').write_text(yaml.dump(this_cfg)) logger.info(f'Saved predictions+metrics in {save_dir}') logger.info('Done with evaluation') torch.distributed.barrier() return results logger.info('')
cosypose
positive
def interpret_command(command_str, quiet=False): """ Interprets the command `command_str` and executes it. Returns `True` if the interactive mode should be exited. """ if command_str == '' or command_str.isspace(): return False command = CommandLineInterpreter.get_command(command_str, quiet) if command is None: return False if command.should_exit(): return True result = command.execute() if isinstance(command, execute_command.ExecuteCommand): <DeepExtract> if result is None: return False for cmd in result: stop = self.interpret_command(cmd) if stop: return True return False </DeepExtract> command.flush_output() return False
def interpret_command(command_str, quiet=False): """ Interprets the command `command_str` and executes it. Returns `True` if the interactive mode should be exited. """ if command_str == '' or command_str.isspace(): return False command = CommandLineInterpreter.get_command(command_str, quiet) if command is None: return False if command.should_exit(): return True result = command.execute() if isinstance(command, execute_command.ExecuteCommand): if result is None: return False for cmd in result: stop = self.interpret_command(cmd) if stop: return True return False command.flush_output() return False
Chatette
positive
def global_grid(self, scale=None): """Global grid.""" if scale == None: scale = 1 <DeepExtract> raise NotImplementedError </DeepExtract> problem_grid = self.COV.problem_coord(native_grid) return reshape_vector(problem_grid, dim=self.dist.dim, axis=self.axis)
def global_grid(self, scale=None): """Global grid.""" if scale == None: scale = 1 raise NotImplementedError problem_grid = self.COV.problem_coord(native_grid) return reshape_vector(problem_grid, dim=self.dist.dim, axis=self.axis)
dedalus
positive
@string.setter def string(self, string): <DeepExtract> if decompose: for element in self.contents[:]: if isinstance(element, Tag): element.decompose() else: element.extract() else: for element in self.contents[:]: element.extract() </DeepExtract> <DeepExtract> self.insert(len(self.contents), string.__class__(string)) </DeepExtract>
@string.setter def string(self, string): if decompose: for element in self.contents[:]: if isinstance(element, Tag): element.decompose() else: element.extract() else: for element in self.contents[:]: element.extract() self.insert(len(self.contents), string.__class__(string)) </DeepExtract>
coursera-python-for-everybody-specialization
positive
def _reopen(self, append=True, **kwargs): <DeepExtract> global _singletons for v in _singletons.itervalues(): v._closeall() </DeepExtract> <DeepExtract> self._db = substage.SubstagesInfo(self.stage) self._db.open_dbs(trace) </DeepExtract>
def _reopen(self, append=True, **kwargs): global _singletons for v in _singletons.itervalues(): v._closeall() self._db = substage.SubstagesInfo(self.stage) self._db.open_dbs(trace) </DeepExtract>
bootloader_instrumentation_suite
positive
def rollback(self): """Roll back the current transaction""" <DeepExtract> if not self._sock: raise err.InterfaceError("(0, '')") if self._result is not None: if self._result.unbuffered_active: warnings.warn('Previous unbuffered result was left incomplete') self._result._finish_unbuffered_query() while self._result.has_next: self.next_result() self._result = None if isinstance('ROLLBACK', text_type): 'ROLLBACK' = 'ROLLBACK'.encode(self.encoding) packet_size = min(MAX_PACKET_LEN, len('ROLLBACK') + 1) prelude = struct.pack('<iB', packet_size, COMMAND.COM_QUERY) packet = prelude + 'ROLLBACK'[:packet_size - 1] self._write_bytes(packet) if DEBUG: dump_packet(packet) self._next_seq_id = 1 if packet_size < MAX_PACKET_LEN: return 'ROLLBACK' = 'ROLLBACK'[packet_size - 1:] while True: packet_size = min(MAX_PACKET_LEN, len('ROLLBACK')) self.write_packet('ROLLBACK'[:packet_size]) 'ROLLBACK' = 'ROLLBACK'[packet_size:] if not 'ROLLBACK' and packet_size < MAX_PACKET_LEN: break </DeepExtract> <DeepExtract> pkt = self._read_packet() if not pkt.is_ok_packet(): raise err.OperationalError(2014, 'Command Out of Sync') ok = OKPacketWrapper(pkt) self.server_status = ok.server_status return ok </DeepExtract>
def rollback(self): """Roll back the current transaction""" if not self._sock: raise err.InterfaceError("(0, '')") if self._result is not None: if self._result.unbuffered_active: warnings.warn('Previous unbuffered result was left incomplete') self._result._finish_unbuffered_query() while self._result.has_next: self.next_result() self._result = None if isinstance('ROLLBACK', text_type): 'ROLLBACK' = 'ROLLBACK'.encode(self.encoding) packet_size = min(MAX_PACKET_LEN, len('ROLLBACK') + 1) prelude = struct.pack('<iB', packet_size, COMMAND.COM_QUERY) packet = prelude + 'ROLLBACK'[:packet_size - 1] self._write_bytes(packet) if DEBUG: dump_packet(packet) self._next_seq_id = 1 if packet_size < MAX_PACKET_LEN: return 'ROLLBACK' = 'ROLLBACK'[packet_size - 1:] while True: packet_size = min(MAX_PACKET_LEN, len('ROLLBACK')) self.write_packet('ROLLBACK'[:packet_size]) 'ROLLBACK' = 'ROLLBACK'[packet_size:] if not 'ROLLBACK' and packet_size < MAX_PACKET_LEN: break pkt = self._read_packet() if not pkt.is_ok_packet(): raise err.OperationalError(2014, 'Command Out of Sync') ok = OKPacketWrapper(pkt) self.server_status = ok.server_status return ok </DeepExtract>
aws-servicebroker
positive
def start_rest_app(self): <DeepExtract> app = Application([('/config', ConfigHandler, dict(shared_memory_manager_dict=self.shared_memory_manager_dict)), ('/control', ControlHandler, dict(shared_memory_manager_dict=self.shared_memory_manager_dict)), ('/health', HealthHandler, dict(shared_memory_manager_dict=self.shared_memory_manager_dict)), ('/loadAsSets', LoadAsSetsHandler, dict(shared_memory_manager_dict=self.shared_memory_manager_dict)), ('/hijackLearnRule', HijackLearnRuleHandler, dict(shared_memory_manager_dict=self.shared_memory_manager_dict))]) </DeepExtract> app.listen(REST_PORT) log.info('REST worker started and listening to port {}'.format(REST_PORT)) IOLoop.current().start()
def start_rest_app(self): app = Application([('/config', ConfigHandler, dict(shared_memory_manager_dict=self.shared_memory_manager_dict)), ('/control', ControlHandler, dict(shared_memory_manager_dict=self.shared_memory_manager_dict)), ('/health', HealthHandler, dict(shared_memory_manager_dict=self.shared_memory_manager_dict)), ('/loadAsSets', LoadAsSetsHandler, dict(shared_memory_manager_dict=self.shared_memory_manager_dict)), ('/hijackLearnRule', HijackLearnRuleHandler, dict(shared_memory_manager_dict=self.shared_memory_manager_dict))]) app.listen(REST_PORT) log.info('REST worker started and listening to port {}'.format(REST_PORT)) IOLoop.current().start()
artemis
positive
def restore_models(model_dir, models, global_step): <DeepExtract> if isinstance(models, dict): name_to_model = {name: m for (name, m) in models.items()} else: _check_model_names(models) name_to_model = {m.name: m for m in models} name_to_model = name_to_model </DeepExtract> for (name, model) in name_to_model.items(): ckpt_filename = '{}-{}.tckpt'.format(name, global_step) ckpt_path = model_dir + '/' + ckpt_filename <DeepExtract> if not Path(ckpt_path).is_file(): raise ValueError('checkpoint {} not exist.'.format(ckpt_path)) model.load_state_dict(torch.load(ckpt_path)) print('Restoring parameters from {}'.format(ckpt_path)) </DeepExtract>
def restore_models(model_dir, models, global_step): if isinstance(models, dict): name_to_model = {name: m for (name, m) in models.items()} else: _check_model_names(models) name_to_model = {m.name: m for m in models} name_to_model = name_to_model for (name, model) in name_to_model.items(): ckpt_filename = '{}-{}.tckpt'.format(name, global_step) ckpt_path = model_dir + '/' + ckpt_filename if not Path(ckpt_path).is_file(): raise ValueError('checkpoint {} not exist.'.format(ckpt_path)) model.load_state_dict(torch.load(ckpt_path)) print('Restoring parameters from {}'.format(ckpt_path)) </DeepExtract>
alfred
positive
def load_policy(filename): with open(filename, 'rb') as f: data = pickle.loads(f.read()) nonlin_type = data['nonlin_type'] policy_type = [k for k in data.keys() if k != 'nonlin_type'][0] assert policy_type == 'GaussianPolicy', 'Policy type {} not supported'.format(policy_type) policy_params = data[policy_type] assert set(policy_params.keys()) == {'logstdevs_1_Da', 'hidden', 'obsnorm', 'out'} def build_policy(obs_bo): def read_layer(l): assert list(l.keys()) == ['AffineLayer'] assert sorted(l['AffineLayer'].keys()) == ['W', 'b'] return (l['AffineLayer']['W'].astype(np.float32), l['AffineLayer']['b'].astype(np.float32)) def apply_nonlin(x): if nonlin_type == 'lrelu': return tf_util.lrelu(x, leak=0.01) elif nonlin_type == 'tanh': return tf.tanh(x) else: raise NotImplementedError(nonlin_type) assert list(policy_params['obsnorm'].keys()) == ['Standardizer'] obsnorm_mean = policy_params['obsnorm']['Standardizer']['mean_1_D'] obsnorm_meansq = policy_params['obsnorm']['Standardizer']['meansq_1_D'] obsnorm_stdev = np.sqrt(np.maximum(0, obsnorm_meansq - np.square(obsnorm_mean))) print('obs', obsnorm_mean.shape, obsnorm_stdev.shape) normedobs_bo = (obs_bo - obsnorm_mean) / (obsnorm_stdev + 1e-06) curr_activations_bd = normedobs_bo assert list(policy_params['hidden'].keys()) == ['FeedforwardNet'] layer_params = policy_params['hidden']['FeedforwardNet'] for layer_name in sorted(layer_params.keys()): l = layer_params[layer_name] <DeepExtract> assert list(l.keys()) == ['AffineLayer'] assert sorted(l['AffineLayer'].keys()) == ['W', 'b'] (W, b) = (l['AffineLayer']['W'].astype(np.float32), l['AffineLayer']['b'].astype(np.float32)) </DeepExtract> <DeepExtract> if nonlin_type == 'lrelu': curr_activations_bd = tf_util.lrelu(tf.matmul(curr_activations_bd, W) + b, leak=0.01) elif nonlin_type == 'tanh': curr_activations_bd = tf.tanh(tf.matmul(curr_activations_bd, W) + b) else: raise NotImplementedError(nonlin_type) </DeepExtract> <DeepExtract> assert list(policy_params['out'].keys()) == ['AffineLayer'] assert sorted(policy_params['out']['AffineLayer'].keys()) == ['W', 'b'] (W, b) = (policy_params['out']['AffineLayer']['W'].astype(np.float32), policy_params['out']['AffineLayer']['b'].astype(np.float32)) </DeepExtract> output_bo = tf.matmul(curr_activations_bd, W) + b return output_bo obs_bo = tf.placeholder(tf.float32, [None, None]) <DeepExtract> def read_layer(l): assert list(l.keys()) == ['AffineLayer'] assert sorted(l['AffineLayer'].keys()) == ['W', 'b'] a_ba = (l['AffineLayer']['W'].astype(np.float32), l['AffineLayer']['b'].astype(np.float32)) def apply_nonlin(x): if nonlin_type == 'lrelu': a_ba = tf_util.lrelu(x, leak=0.01) elif nonlin_type == 'tanh': a_ba = tf.tanh(x) else: raise NotImplementedError(nonlin_type) assert list(policy_params['obsnorm'].keys()) == ['Standardizer'] obsnorm_mean = policy_params['obsnorm']['Standardizer']['mean_1_D'] obsnorm_meansq = policy_params['obsnorm']['Standardizer']['meansq_1_D'] obsnorm_stdev = np.sqrt(np.maximum(0, obsnorm_meansq - np.square(obsnorm_mean))) print('obs', obsnorm_mean.shape, obsnorm_stdev.shape) normedobs_bo = (obs_bo - obsnorm_mean) / (obsnorm_stdev + 1e-06) curr_activations_bd = normedobs_bo assert list(policy_params['hidden'].keys()) == ['FeedforwardNet'] layer_params = policy_params['hidden']['FeedforwardNet'] for layer_name in sorted(layer_params.keys()): l = layer_params[layer_name] (W, b) = read_layer(l) curr_activations_bd = apply_nonlin(tf.matmul(curr_activations_bd, W) + b) (W, b) = read_layer(policy_params['out']) output_bo = tf.matmul(curr_activations_bd, W) + b a_ba = output_bo </DeepExtract> policy_fn = tf_util.function([obs_bo], a_ba) return policy_fn
def load_policy(filename): with open(filename, 'rb') as f: data = pickle.loads(f.read()) nonlin_type = data['nonlin_type'] policy_type = [k for k in data.keys() if k != 'nonlin_type'][0] assert policy_type == 'GaussianPolicy', 'Policy type {} not supported'.format(policy_type) policy_params = data[policy_type] assert set(policy_params.keys()) == {'logstdevs_1_Da', 'hidden', 'obsnorm', 'out'} def build_policy(obs_bo): def read_layer(l): assert list(l.keys()) == ['AffineLayer'] assert sorted(l['AffineLayer'].keys()) == ['W', 'b'] return (l['AffineLayer']['W'].astype(np.float32), l['AffineLayer']['b'].astype(np.float32)) def apply_nonlin(x): if nonlin_type == 'lrelu': return tf_util.lrelu(x, leak=0.01) elif nonlin_type == 'tanh': return tf.tanh(x) else: raise NotImplementedError(nonlin_type) assert list(policy_params['obsnorm'].keys()) == ['Standardizer'] obsnorm_mean = policy_params['obsnorm']['Standardizer']['mean_1_D'] obsnorm_meansq = policy_params['obsnorm']['Standardizer']['meansq_1_D'] obsnorm_stdev = np.sqrt(np.maximum(0, obsnorm_meansq - np.square(obsnorm_mean))) print('obs', obsnorm_mean.shape, obsnorm_stdev.shape) normedobs_bo = (obs_bo - obsnorm_mean) / (obsnorm_stdev + 1e-06) curr_activations_bd = normedobs_bo assert list(policy_params['hidden'].keys()) == ['FeedforwardNet'] layer_params = policy_params['hidden']['FeedforwardNet'] for layer_name in sorted(layer_params.keys()): l = layer_params[layer_name] assert list(l.keys()) == ['AffineLayer'] assert sorted(l['AffineLayer'].keys()) == ['W', 'b'] (W, b) = (l['AffineLayer']['W'].astype(np.float32), l['AffineLayer']['b'].astype(np.float32)) if nonlin_type == 'lrelu': curr_activations_bd = tf_util.lrelu(tf.matmul(curr_activations_bd, W) + b, leak=0.01) elif nonlin_type == 'tanh': curr_activations_bd = tf.tanh(tf.matmul(curr_activations_bd, W) + b) else: raise NotImplementedError(nonlin_type) assert list(policy_params['out'].keys()) == ['AffineLayer'] assert sorted(policy_params['out']['AffineLayer'].keys()) == ['W', 'b'] (W, b) = (policy_params['out']['AffineLayer']['W'].astype(np.float32), policy_params['out']['AffineLayer']['b'].astype(np.float32)) output_bo = tf.matmul(curr_activations_bd, W) + b return output_bo obs_bo = tf.placeholder(tf.float32, [None, None]) def read_layer(l): assert list(l.keys()) == ['AffineLayer'] assert sorted(l['AffineLayer'].keys()) == ['W', 'b'] a_ba = (l['AffineLayer']['W'].astype(np.float32), l['AffineLayer']['b'].astype(np.float32)) def apply_nonlin(x): if nonlin_type == 'lrelu': a_ba = tf_util.lrelu(x, leak=0.01) elif nonlin_type == 'tanh': a_ba = tf.tanh(x) else: raise NotImplementedError(nonlin_type) assert list(policy_params['obsnorm'].keys()) == ['Standardizer'] obsnorm_mean = policy_params['obsnorm']['Standardizer']['mean_1_D'] obsnorm_meansq = policy_params['obsnorm']['Standardizer']['meansq_1_D'] obsnorm_stdev = np.sqrt(np.maximum(0, obsnorm_meansq - np.square(obsnorm_mean))) print('obs', obsnorm_mean.shape, obsnorm_stdev.shape) normedobs_bo = (obs_bo - obsnorm_mean) / (obsnorm_stdev + 1e-06) curr_activations_bd = normedobs_bo assert list(policy_params['hidden'].keys()) == ['FeedforwardNet'] layer_params = policy_params['hidden']['FeedforwardNet'] for layer_name in sorted(layer_params.keys()): l = layer_params[layer_name] (W, b) = read_layer(l) curr_activations_bd = apply_nonlin(tf.matmul(curr_activations_bd, W) + b) (W, b) = read_layer(policy_params['out']) output_bo = tf.matmul(curr_activations_bd, W) + b a_ba = output_bo policy_fn = tf_util.function([obs_bo], a_ba) return policy_fn
cs294-112_hws
positive
def get_uami_vault_access_token(): """ Get the vault access token to get all the other passwords/secrets. """ hdrs = {'Metadata': 'true', 'Cache-Control': 'no-cache'} url = 'http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=https%3A%2F%2Fvault.azure.net' <DeepExtract> if not url: (status, data) = (None, None) urlreq = request.Request(url, headers=get_request_headers(hdrs), method=name) (status, data) = urlopen_request(urlreq, name) </DeepExtract> if status and isinstance(status, int) and (status == 200): vaulttoken = data['access_token'] else: pass return vaulttoken
def get_uami_vault_access_token(): """ Get the vault access token to get all the other passwords/secrets. """ hdrs = {'Metadata': 'true', 'Cache-Control': 'no-cache'} url = 'http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=https%3A%2F%2Fvault.azure.net' if not url: (status, data) = (None, None) urlreq = request.Request(url, headers=get_request_headers(hdrs), method=name) (status, data) = urlopen_request(urlreq, name) if status and isinstance(status, int) and (status == 200): vaulttoken = data['access_token'] else: pass return vaulttoken
cloud-validation-framework
positive
def build_vocab(self): if self.vocab_file: print('building vocab from {}'.format(self.vocab_file)) <DeepExtract> self.idx2sym = [] self.sym2idx = OrderedDict() with open(self.vocab_file, 'r', encoding='utf-8') as f: for line in f: symb = line.strip().split()[0] self.add_symbol(symb) if '<UNK>' in self.sym2idx: self.unk_idx = self.sym2idx['<UNK>'] elif '<unk>' in self.sym2idx: self.unk_idx = self.sym2idx['<unk>'] else: raise ValueError('No <unkown> token in vocabulary') </DeepExtract> print('final vocab size {}'.format(len(self))) else: print('building vocab with min_freq={}, max_size={}'.format(self.min_freq, self.max_size)) self.idx2sym = [] self.sym2idx = OrderedDict() for sym in self.special: <DeepExtract> if sym not in self.sym2idx: self.idx2sym.append(sym) self.sym2idx[sym] = len(self.idx2sym) - 1 setattr(self, '{}_idx'.format(sym.strip('<>')), self.sym2idx[sym]) </DeepExtract> for (sym, cnt) in self.counter.most_common(self.max_size): if cnt < self.min_freq: break <DeepExtract> if sym not in self.sym2idx: self.idx2sym.append(sym) self.sym2idx[sym] = len(self.idx2sym) - 1 </DeepExtract> print('final vocab size {} from {} unique tokens'.format(len(self), len(self.counter)))
def build_vocab(self): if self.vocab_file: print('building vocab from {}'.format(self.vocab_file)) self.idx2sym = [] self.sym2idx = OrderedDict() with open(self.vocab_file, 'r', encoding='utf-8') as f: for line in f: symb = line.strip().split()[0] self.add_symbol(symb) if '<UNK>' in self.sym2idx: self.unk_idx = self.sym2idx['<UNK>'] elif '<unk>' in self.sym2idx: self.unk_idx = self.sym2idx['<unk>'] else: raise ValueError('No <unkown> token in vocabulary') print('final vocab size {}'.format(len(self))) else: print('building vocab with min_freq={}, max_size={}'.format(self.min_freq, self.max_size)) self.idx2sym = [] self.sym2idx = OrderedDict() for sym in self.special: if sym not in self.sym2idx: self.idx2sym.append(sym) self.sym2idx[sym] = len(self.idx2sym) - 1 setattr(self, '{}_idx'.format(sym.strip('<>')), self.sym2idx[sym]) for (sym, cnt) in self.counter.most_common(self.max_size): if cnt < self.min_freq: break if sym not in self.sym2idx: self.idx2sym.append(sym) self.sym2idx[sym] = len(self.idx2sym) - 1 print('final vocab size {} from {} unique tokens'.format(len(self), len(self.counter)))
bert-jointly-relation-entity-extraciton
positive
def log_prob(self, hidden): """ Computes log probabilities for all :math:`n\\_classes` From: https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/adaptive.py Args: hidden (Tensor): a minibatch of examples Returns: log-probabilities of for each class :math:`c` in range :math:`0 <= c <= n\\_classes`, where :math:`n\\_classes` is a parameter passed to ``AdaptiveLogSoftmaxWithLoss`` constructor. Shape: - Input: :math:`(N, in\\_features)` - Output: :math:`(N, n\\_classes)` """ if self.n_clusters == 0: <DeepExtract> if self.out_projs[0] is None: logit = F.linear(hidden, self.out_layers[0].weight, bias=self.out_layers[0].bias) else: proj_hid = F.linear(hidden, self.out_projs[0].t().contiguous()) logit = F.linear(proj_hid, self.out_layers[0].weight, bias=self.out_layers[0].bias) logit = logit </DeepExtract> return F.log_softmax(logit, dim=-1) else: (weights, biases) = ([], []) for i in range(len(self.cutoffs)): if self.div_val == 1: (l_idx, r_idx) = (self.cutoff_ends[i], self.cutoff_ends[i + 1]) weight_i = self.out_layers[0].weight[l_idx:r_idx] bias_i = self.out_layers[0].bias[l_idx:r_idx] else: weight_i = self.out_layers[i].weight bias_i = self.out_layers[i].bias if i == 0: weight_i = torch.cat([weight_i, self.cluster_weight], dim=0) bias_i = torch.cat([bias_i, self.cluster_bias], dim=0) weights.append(weight_i) biases.append(bias_i) (head_weight, head_bias, head_proj) = (weights[0], biases[0], self.out_projs[0]) <DeepExtract> if head_proj is None: logit = F.linear(hidden, head_weight, bias=head_bias) else: proj_hid = F.linear(hidden, head_proj.t().contiguous()) logit = F.linear(proj_hid, head_weight, bias=head_bias) head_logit = logit </DeepExtract> out = hidden.new_empty((head_logit.size(0), self.n_token)) head_logprob = F.log_softmax(head_logit, dim=1) cutoff_values = [0] + self.cutoffs for i in range(len(cutoff_values) - 1): (start_idx, stop_idx) = (cutoff_values[i], cutoff_values[i + 1]) if i == 0: out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]] else: (weight_i, bias_i, proj_i) = (weights[i], biases[i], self.out_projs[i]) <DeepExtract> if proj_i is None: logit = F.linear(hidden, weight_i, bias=bias_i) else: proj_hid = F.linear(hidden, proj_i.t().contiguous()) logit = F.linear(proj_hid, weight_i, bias=bias_i) tail_logit_i = logit </DeepExtract> tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) logprob_i = head_logprob[:, -i] + tail_logprob_i out[:, start_idx, stop_idx] = logprob_i return out
def log_prob(self, hidden): """ Computes log probabilities for all :math:`n\\_classes` From: https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/adaptive.py Args: hidden (Tensor): a minibatch of examples Returns: log-probabilities of for each class :math:`c` in range :math:`0 <= c <= n\\_classes`, where :math:`n\\_classes` is a parameter passed to ``AdaptiveLogSoftmaxWithLoss`` constructor. Shape: - Input: :math:`(N, in\\_features)` - Output: :math:`(N, n\\_classes)` """ if self.n_clusters == 0: if self.out_projs[0] is None: logit = F.linear(hidden, self.out_layers[0].weight, bias=self.out_layers[0].bias) else: proj_hid = F.linear(hidden, self.out_projs[0].t().contiguous()) logit = F.linear(proj_hid, self.out_layers[0].weight, bias=self.out_layers[0].bias) logit = logit return F.log_softmax(logit, dim=-1) else: (weights, biases) = ([], []) for i in range(len(self.cutoffs)): if self.div_val == 1: (l_idx, r_idx) = (self.cutoff_ends[i], self.cutoff_ends[i + 1]) weight_i = self.out_layers[0].weight[l_idx:r_idx] bias_i = self.out_layers[0].bias[l_idx:r_idx] else: weight_i = self.out_layers[i].weight bias_i = self.out_layers[i].bias if i == 0: weight_i = torch.cat([weight_i, self.cluster_weight], dim=0) bias_i = torch.cat([bias_i, self.cluster_bias], dim=0) weights.append(weight_i) biases.append(bias_i) (head_weight, head_bias, head_proj) = (weights[0], biases[0], self.out_projs[0]) if head_proj is None: logit = F.linear(hidden, head_weight, bias=head_bias) else: proj_hid = F.linear(hidden, head_proj.t().contiguous()) logit = F.linear(proj_hid, head_weight, bias=head_bias) head_logit = logit out = hidden.new_empty((head_logit.size(0), self.n_token)) head_logprob = F.log_softmax(head_logit, dim=1) cutoff_values = [0] + self.cutoffs for i in range(len(cutoff_values) - 1): (start_idx, stop_idx) = (cutoff_values[i], cutoff_values[i + 1]) if i == 0: out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]] else: (weight_i, bias_i, proj_i) = (weights[i], biases[i], self.out_projs[i]) if proj_i is None: logit = F.linear(hidden, weight_i, bias=bias_i) else: proj_hid = F.linear(hidden, proj_i.t().contiguous()) logit = F.linear(proj_hid, weight_i, bias=bias_i) tail_logit_i = logit tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) logprob_i = head_logprob[:, -i] + tail_logprob_i out[:, start_idx, stop_idx] = logprob_i return out
Bert_sentence_similarity
positive
def __backend_runner_wrapper(self, backend_name): <DeepExtract> try: backend = self._backends[backend_name] except KeyError: backend = BACKENDS.get(backend_name) if backend is None: raise ValueError(f"Backend {backend_name} doesn't exist.") self._backends[backend_name] = backend() backend = self._backends[backend_name] </DeepExtract> def runner(*args, **kwargs): return backend.run(self.ops, self.n_qubits, *args, **kwargs) return runner
def __backend_runner_wrapper(self, backend_name): try: backend = self._backends[backend_name] except KeyError: backend = BACKENDS.get(backend_name) if backend is None: raise ValueError(f"Backend {backend_name} doesn't exist.") self._backends[backend_name] = backend() backend = self._backends[backend_name] def runner(*args, **kwargs): return backend.run(self.ops, self.n_qubits, *args, **kwargs) return runner
Blueqat
positive
def train(self, epoch, fields, report_func=None): """ Called for each epoch to train. """ total_stats = Statistics(0, {}) report_stats = Statistics(0, {}) for (i, batch) in enumerate(self.train_iter): self.model.zero_grad() <DeepExtract> (q, q_len) = batch.src (lay, lay_len) = batch.lay (lay_bpe, lay_bpe_len) = batch.lay_bpe (lay_out, tgt_out, token_out, loss_coverage) = self.model(q, q_len, None, lay, batch.lay_e, lay_len, lay_bpe, lay_bpe_len, batch.lay_index, batch.tgt_mask, batch.tgt, batch.lay_parent_index, batch.tgt_parent_index) pred = {'lay': lay_out, 'tgt': tgt_out, 'token': token_out} gold = {} mask_loss = {} if self.model.opt.bpe: gold['lay'] = lay_bpe[1:] else: gold['lay'] = lay[1:] if self.model.opt.mask_target_loss: gold['tgt'] = batch.tgt_loss_masked[1:] else: gold['tgt'] = batch.tgt_loss[1:] if self.model.opt.layout_token_prune: batch_size = lay.size(1) token_loss = torch.FloatTensor(len(fields['lay'].vocab), batch_size).zero_() lay_cpu = lay.data.clone().cpu() for b in range(batch_size): for i in range(1, lay_len[b]): token_loss[lay_cpu[i, b], b] = 1 gold['token'] = Variable(token_loss[len(table.IO.special_token_list):].cuda(), requires_grad=False) m_list = [] for tk_idx in range(len(table.IO.special_token_list), len(fields['lay'].vocab)): w = fields['lay'].vocab.itos[tk_idx] if w.startswith('(') or w in (')', table.IO.TOK_WORD): m_list.append(1) else: m_list.append(0) mask_loss['token'] = m_list if self.model.opt.coverage_loss > 0 and epoch > 10: gold['cover'] = loss_coverage * self.model.opt.coverage_loss loss = self.train_loss.compute_loss(pred, gold, mask_loss) r_dict = {} for metric_name in ('lay', 'tgt'): p = pred[metric_name].data g = gold[metric_name].data r_dict[metric_name + '-token'] = count_accuracy(p, g, mask=g.eq(table.IO.PAD), row=False) r_dict[metric_name] = count_accuracy(p, g, mask=g.eq(table.IO.PAD), row=True) if self.model.opt.layout_token_prune: metric_name = 'token' p = pred[metric_name].data g = gold[metric_name].data r_dict[metric_name + '-token'] = count_token_prune_accuracy(p, g, mask_loss['token'], row=False) r_dict[metric_name] = count_token_prune_accuracy(p, g, mask_loss['token'], row=True) st = dict([(k, (v[0].sum(), v[1])) for (k, v) in r_dict.items()]) st['all'] = aggregate_accuracy(r_dict, ('lay', 'tgt')) if self.model.opt.coverage_loss > 0 and epoch > 10: st['attn_impor_loss'] = (gold['cover'].data[0], 1) batch_stats = Statistics(loss.data[0], st) (loss, batch_stats) = (loss, batch_stats) </DeepExtract> loss.backward() self.optim.step() total_stats.update(batch_stats) report_stats.update(batch_stats) if report_func is not None: report_stats = report_func(epoch, i, len(self.train_iter), total_stats.start_time, self.optim.lr, report_stats) if self.model.opt.moving_avg > 0: decay_rate = min(self.model.opt.moving_avg, (1 + epoch) / (1.5 + epoch)) for (p, avg_p) in zip(self.model.parameters(), self.moving_avg): avg_p.mul_(decay_rate).add_(1.0 - decay_rate, p.data) return total_stats
def train(self, epoch, fields, report_func=None): """ Called for each epoch to train. """ total_stats = Statistics(0, {}) report_stats = Statistics(0, {}) for (i, batch) in enumerate(self.train_iter): self.model.zero_grad() (q, q_len) = batch.src (lay, lay_len) = batch.lay (lay_bpe, lay_bpe_len) = batch.lay_bpe (lay_out, tgt_out, token_out, loss_coverage) = self.model(q, q_len, None, lay, batch.lay_e, lay_len, lay_bpe, lay_bpe_len, batch.lay_index, batch.tgt_mask, batch.tgt, batch.lay_parent_index, batch.tgt_parent_index) pred = {'lay': lay_out, 'tgt': tgt_out, 'token': token_out} gold = {} mask_loss = {} if self.model.opt.bpe: gold['lay'] = lay_bpe[1:] else: gold['lay'] = lay[1:] if self.model.opt.mask_target_loss: gold['tgt'] = batch.tgt_loss_masked[1:] else: gold['tgt'] = batch.tgt_loss[1:] if self.model.opt.layout_token_prune: batch_size = lay.size(1) token_loss = torch.FloatTensor(len(fields['lay'].vocab), batch_size).zero_() lay_cpu = lay.data.clone().cpu() for b in range(batch_size): for i in range(1, lay_len[b]): token_loss[lay_cpu[i, b], b] = 1 gold['token'] = Variable(token_loss[len(table.IO.special_token_list):].cuda(), requires_grad=False) m_list = [] for tk_idx in range(len(table.IO.special_token_list), len(fields['lay'].vocab)): w = fields['lay'].vocab.itos[tk_idx] if w.startswith('(') or w in (')', table.IO.TOK_WORD): m_list.append(1) else: m_list.append(0) mask_loss['token'] = m_list if self.model.opt.coverage_loss > 0 and epoch > 10: gold['cover'] = loss_coverage * self.model.opt.coverage_loss loss = self.train_loss.compute_loss(pred, gold, mask_loss) r_dict = {} for metric_name in ('lay', 'tgt'): p = pred[metric_name].data g = gold[metric_name].data r_dict[metric_name + '-token'] = count_accuracy(p, g, mask=g.eq(table.IO.PAD), row=False) r_dict[metric_name] = count_accuracy(p, g, mask=g.eq(table.IO.PAD), row=True) if self.model.opt.layout_token_prune: metric_name = 'token' p = pred[metric_name].data g = gold[metric_name].data r_dict[metric_name + '-token'] = count_token_prune_accuracy(p, g, mask_loss['token'], row=False) r_dict[metric_name] = count_token_prune_accuracy(p, g, mask_loss['token'], row=True) st = dict([(k, (v[0].sum(), v[1])) for (k, v) in r_dict.items()]) st['all'] = aggregate_accuracy(r_dict, ('lay', 'tgt')) if self.model.opt.coverage_loss > 0 and epoch > 10: st['attn_impor_loss'] = (gold['cover'].data[0], 1) batch_stats = Statistics(loss.data[0], st) (loss, batch_stats) = (loss, batch_stats) loss.backward() self.optim.step() total_stats.update(batch_stats) report_stats.update(batch_stats) if report_func is not None: report_stats = report_func(epoch, i, len(self.train_iter), total_stats.start_time, self.optim.lr, report_stats) if self.model.opt.moving_avg > 0: decay_rate = min(self.model.opt.moving_avg, (1 + epoch) / (1.5 + epoch)) for (p, avg_p) in zip(self.model.parameters(), self.moving_avg): avg_p.mul_(decay_rate).add_(1.0 - decay_rate, p.data) return total_stats
coarse2fine
positive
def test_get_aws_security_credentials_webidentity(mocker): <DeepExtract> try: config = ConfigParser.SafeConfigParser() except AttributeError: config = ConfigParser() if add_test_profile: config.add_section(AWSPROFILE) config = config </DeepExtract> credentials_source = 'webidentity:' + ','.join([ROLE_ARN, WEB_IDENTITY_TOKEN_FILE]) mock_response = {'AccessKeyId': ACCESS_KEY_ID_VAL, 'SecretAccessKey': SECRET_ACCESS_KEY_VAL, 'Token': SESSION_TOKEN_VAL} mocker.patch('watchdog.get_aws_security_credentials_from_webidentity', return_value=mock_response) credentials = watchdog.get_aws_security_credentials(config, credentials_source, 'us-east-1') assert credentials['AccessKeyId'] == ACCESS_KEY_ID_VAL assert credentials['SecretAccessKey'] == SECRET_ACCESS_KEY_VAL assert credentials['Token'] == SESSION_TOKEN_VAL
def test_get_aws_security_credentials_webidentity(mocker): try: config = ConfigParser.SafeConfigParser() except AttributeError: config = ConfigParser() if add_test_profile: config.add_section(AWSPROFILE) config = config credentials_source = 'webidentity:' + ','.join([ROLE_ARN, WEB_IDENTITY_TOKEN_FILE]) mock_response = {'AccessKeyId': ACCESS_KEY_ID_VAL, 'SecretAccessKey': SECRET_ACCESS_KEY_VAL, 'Token': SESSION_TOKEN_VAL} mocker.patch('watchdog.get_aws_security_credentials_from_webidentity', return_value=mock_response) credentials = watchdog.get_aws_security_credentials(config, credentials_source, 'us-east-1') assert credentials['AccessKeyId'] == ACCESS_KEY_ID_VAL assert credentials['SecretAccessKey'] == SECRET_ACCESS_KEY_VAL assert credentials['Token'] == SESSION_TOKEN_VAL
efs-utils
positive
def format_dict(pd): ret = {} ret['pid'] = pd['pid'] ret['time'] = '' if pd['cpu_times'] is not None: <DeepExtract> ctime = timedelta(seconds=sum(pd['cpu_times'])) ret['time'] = '%s:%s.%s' % (ctime.seconds // 60 % 60, str(ctime.seconds % 60).zfill(2), str(ctime.microseconds)[:2]) </DeepExtract> ret['user'] = pd['username'][:12] ret['ni'] = pd['nice'] mem_info = pd['memory_info'] ret['virt'] = bytes2human(getattr(mem_info, 'vms', 0)) ret['res'] = bytes2human(getattr(mem_info, 'rss', 0)) ret['cpu'] = -0.0 if pd['cpu_percent'] is None else pd['cpu_percent'] ret['mem'] = -0.0 if pd['memory_percent'] is not None: ret['mem'] = round(pd['memory_percent'], 1) ret['name'] = pd['name'] or '' return ret
def format_dict(pd): ret = {} ret['pid'] = pd['pid'] ret['time'] = '' if pd['cpu_times'] is not None: ctime = timedelta(seconds=sum(pd['cpu_times'])) ret['time'] = '%s:%s.%s' % (ctime.seconds // 60 % 60, str(ctime.seconds % 60).zfill(2), str(ctime.microseconds)[:2]) ret['user'] = pd['username'][:12] ret['ni'] = pd['nice'] mem_info = pd['memory_info'] ret['virt'] = bytes2human(getattr(mem_info, 'vms', 0)) ret['res'] = bytes2human(getattr(mem_info, 'rss', 0)) ret['cpu'] = -0.0 if pd['cpu_percent'] is None else pd['cpu_percent'] ret['mem'] = -0.0 if pd['memory_percent'] is not None: ret['mem'] = round(pd['memory_percent'], 1) ret['name'] = pd['name'] or '' return ret
clastic
positive
def __init__(self, num_classes, loss, fc_dims=None, dropout_p=None, **kwargs): super(Xception, self).__init__() self.loss = loss self.conv1 = nn.Conv2d(3, 32, 3, 2, 0, bias=False) self.bn1 = nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(32, 64, 3, bias=False) self.bn2 = nn.BatchNorm2d(64) self.block1 = Block(64, 128, 2, 2, start_with_relu=False, grow_first=True) self.block2 = Block(128, 256, 2, 2, start_with_relu=True, grow_first=True) self.block3 = Block(256, 728, 2, 2, start_with_relu=True, grow_first=True) self.block4 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block5 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block6 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block7 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block8 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block9 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block10 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block11 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block12 = Block(728, 1024, 2, 2, start_with_relu=True, grow_first=False) self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1) self.bn3 = nn.BatchNorm2d(1536) self.conv4 = SeparableConv2d(1536, 2048, 3, 1, 1) self.bn4 = nn.BatchNorm2d(2048) self.global_avgpool = nn.AdaptiveAvgPool2d(1) self.feature_dim = 2048 <DeepExtract> if fc_dims is None: self.feature_dim = 2048 self.fc = None assert isinstance(fc_dims, (list, tuple)), 'fc_dims must be either list or tuple, but got {}'.format(type(fc_dims)) layers = [] for dim in fc_dims: layers.append(nn.Linear(2048, dim)) layers.append(nn.BatchNorm1d(dim)) layers.append(nn.ReLU(inplace=True)) if dropout_p is not None: layers.append(nn.Dropout(p=dropout_p)) 2048 = dim self.feature_dim = fc_dims[-1] self.fc = nn.Sequential(*layers) </DeepExtract> self.classifier = nn.Linear(self.feature_dim, num_classes) <DeepExtract> for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) if m.bias is not None: nn.init.constant_(m.bias, 0) </DeepExtract>
def __init__(self, num_classes, loss, fc_dims=None, dropout_p=None, **kwargs): super(Xception, self).__init__() self.loss = loss self.conv1 = nn.Conv2d(3, 32, 3, 2, 0, bias=False) self.bn1 = nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(32, 64, 3, bias=False) self.bn2 = nn.BatchNorm2d(64) self.block1 = Block(64, 128, 2, 2, start_with_relu=False, grow_first=True) self.block2 = Block(128, 256, 2, 2, start_with_relu=True, grow_first=True) self.block3 = Block(256, 728, 2, 2, start_with_relu=True, grow_first=True) self.block4 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block5 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block6 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block7 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block8 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block9 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block10 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block11 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block12 = Block(728, 1024, 2, 2, start_with_relu=True, grow_first=False) self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1) self.bn3 = nn.BatchNorm2d(1536) self.conv4 = SeparableConv2d(1536, 2048, 3, 1, 1) self.bn4 = nn.BatchNorm2d(2048) self.global_avgpool = nn.AdaptiveAvgPool2d(1) self.feature_dim = 2048 if fc_dims is None: self.feature_dim = 2048 self.fc = None assert isinstance(fc_dims, (list, tuple)), 'fc_dims must be either list or tuple, but got {}'.format(type(fc_dims)) layers = [] for dim in fc_dims: layers.append(nn.Linear(2048, dim)) layers.append(nn.BatchNorm1d(dim)) layers.append(nn.ReLU(inplace=True)) if dropout_p is not None: layers.append(nn.Dropout(p=dropout_p)) 2048 = dim self.feature_dim = fc_dims[-1] self.fc = nn.Sequential(*layers) self.classifier = nn.Linear(self.feature_dim, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) if m.bias is not None: nn.init.constant_(m.bias, 0) </DeepExtract>
deep-person-reid
positive
def generate_je_number(self, commit: bool=False) -> str: """ Atomic Transaction. Generates the next Journal Entry document number available. The operation will result in two additional queries if the Journal Entry LedgerModel & EntityUnitModel are not cached in QuerySet via select_related('ledger', 'entity_unit'). Parameters ---------- commit: bool Commits transaction into JournalEntryModel when function is called. Returns ------- str A String, representing the new or current JournalEntryModel instance Document Number. """ if self.can_generate_je_number(): with transaction.atomic(durable=True): state_model = None while not state_model: <DeepExtract> entity_model = EntityModel.objects.get(uuid__exact=self.ledger.entity_id) fy_key = entity_model.get_fy_for_date(dt=self.timestamp) try: LOOKUP = {'entity_model_id__exact': self.ledger.entity_id, 'entity_unit_id__exact': self.entity_unit_id, 'fiscal_year': fy_key, 'key__exact': EntityStateModel.KEY_JOURNAL_ENTRY} state_model_qs = EntityStateModel.objects.filter(**LOOKUP).select_related('entity_model').select_for_update() state_model = state_model_qs.get() state_model.sequence = F('sequence') + 1 state_model.save() state_model.refresh_from_db() state_model = state_model except ObjectDoesNotExist: LOOKUP = {'entity_model_id': entity_model.uuid, 'entity_unit_id': self.entity_unit_id, 'fiscal_year': fy_key, 'key': EntityStateModel.KEY_JOURNAL_ENTRY, 'sequence': 1} state_model = EntityStateModel.objects.create(**LOOKUP) state_model = state_model except IntegrityError as e: if False: raise e </DeepExtract> if self.entity_unit_id: unit_prefix = self.entity_unit.document_prefix else: unit_prefix = DJANGO_LEDGER_JE_NUMBER_NO_UNIT_PREFIX seq = str(state_model.sequence).zfill(DJANGO_LEDGER_DOCUMENT_NUMBER_PADDING) self.je_number = f'{DJANGO_LEDGER_JE_NUMBER_PREFIX}-{state_model.fiscal_year}-{unit_prefix}-{seq}' if commit: <DeepExtract> try: self.generate_je_number(commit=False) if verify: (txs_qs, is_verified) = self.clean(verify=True) if self.is_verified() and post_on_verify: self.mark_as_posted(commit=False, verify=False, raise_exception=True) except ValidationError as e: if self.can_unpost(): self.mark_as_unposted(raise_exception=True) raise JournalEntryValidationError(f'Something went wrong validating journal entry ID: {self.uuid}: {e.message}') except Exception as e: self.posted = False self._verified = False self.save(update_fields=['posted', 'updated'], verify=False) raise JournalEntryValidationError(e) if not self.is_verified() and verify: raise JournalEntryValidationError(message='Cannot save an unverified Journal Entry.') super(JournalEntryModelAbstract, self).save(*args, **kwargs) </DeepExtract> return self.je_number
def generate_je_number(self, commit: bool=False) -> str: """ Atomic Transaction. Generates the next Journal Entry document number available. The operation will result in two additional queries if the Journal Entry LedgerModel & EntityUnitModel are not cached in QuerySet via select_related('ledger', 'entity_unit'). Parameters ---------- commit: bool Commits transaction into JournalEntryModel when function is called. Returns ------- str A String, representing the new or current JournalEntryModel instance Document Number. """ if self.can_generate_je_number(): with transaction.atomic(durable=True): state_model = None while not state_model: entity_model = EntityModel.objects.get(uuid__exact=self.ledger.entity_id) fy_key = entity_model.get_fy_for_date(dt=self.timestamp) try: LOOKUP = {'entity_model_id__exact': self.ledger.entity_id, 'entity_unit_id__exact': self.entity_unit_id, 'fiscal_year': fy_key, 'key__exact': EntityStateModel.KEY_JOURNAL_ENTRY} state_model_qs = EntityStateModel.objects.filter(**LOOKUP).select_related('entity_model').select_for_update() state_model = state_model_qs.get() state_model.sequence = F('sequence') + 1 state_model.save() state_model.refresh_from_db() state_model = state_model except ObjectDoesNotExist: LOOKUP = {'entity_model_id': entity_model.uuid, 'entity_unit_id': self.entity_unit_id, 'fiscal_year': fy_key, 'key': EntityStateModel.KEY_JOURNAL_ENTRY, 'sequence': 1} state_model = EntityStateModel.objects.create(**LOOKUP) state_model = state_model except IntegrityError as e: if False: raise e if self.entity_unit_id: unit_prefix = self.entity_unit.document_prefix else: unit_prefix = DJANGO_LEDGER_JE_NUMBER_NO_UNIT_PREFIX seq = str(state_model.sequence).zfill(DJANGO_LEDGER_DOCUMENT_NUMBER_PADDING) self.je_number = f'{DJANGO_LEDGER_JE_NUMBER_PREFIX}-{state_model.fiscal_year}-{unit_prefix}-{seq}' if commit: try: self.generate_je_number(commit=False) if verify: (txs_qs, is_verified) = self.clean(verify=True) if self.is_verified() and post_on_verify: self.mark_as_posted(commit=False, verify=False, raise_exception=True) except ValidationError as e: if self.can_unpost(): self.mark_as_unposted(raise_exception=True) raise JournalEntryValidationError(f'Something went wrong validating journal entry ID: {self.uuid}: {e.message}') except Exception as e: self.posted = False self._verified = False self.save(update_fields=['posted', 'updated'], verify=False) raise JournalEntryValidationError(e) if not self.is_verified() and verify: raise JournalEntryValidationError(message='Cannot save an unverified Journal Entry.') super(JournalEntryModelAbstract, self).save(*args, **kwargs) return self.je_number
django-ledger
positive
def test_fail_in_restricted_arch(self): <DeepExtract> config_env = {} if restrict_arch_env: config_env['CLICKABLE_ARCH'] = restrict_arch_env config_json = {} if arch_agnostic_builder: config_json['builder'] = 'pure' else: config_json['builder'] = 'cmake' if 'armhf': config_json['restrict_arch'] = 'armhf' cli_args = [] if 'amd64': cli_args += ['--arch', 'amd64'] parser = Clickable.create_parser('Unit Test Call') run_args = parser.parse_args(cli_args) commands = ['no_command'] if build_cmd: commands.append('build') self.setUpConfig(expect_exception=True, mock_config_json=config_json, mock_config_env=config_env, args=run_args, commands=commands) if 'amd64': expected_arch = 'amd64' elif 'armhf': expected_arch = 'armhf' elif arch_agnostic_builder: expected_arch = 'all' elif restrict_arch_env: expected_arch = restrict_arch_env else: expected_arch = 'armhf' if not True: self.assertEqual(expected_arch, self.config.arch) </DeepExtract> <DeepExtract> config_env = {} if restrict_arch_env: config_env['CLICKABLE_ARCH'] = restrict_arch_env config_json = {} if arch_agnostic_builder: config_json['builder'] = 'pure' else: config_json['builder'] = 'cmake' if 'armhf': config_json['restrict_arch'] = 'armhf' cli_args = [] if 'amd64': cli_args += ['--arch', 'amd64'] parser = Clickable.create_parser('Unit Test Call') run_args = parser.parse_args(cli_args) commands = ['no_command'] if False: commands.append('build') self.setUpConfig(expect_exception=True, mock_config_json=config_json, mock_config_env=config_env, args=run_args, commands=commands) if 'amd64': expected_arch = 'amd64' elif 'armhf': expected_arch = 'armhf' elif arch_agnostic_builder: expected_arch = 'all' elif restrict_arch_env: expected_arch = restrict_arch_env else: expected_arch = 'armhf' if not True: self.assertEqual(expected_arch, self.config.arch) </DeepExtract> <DeepExtract> config_env = {} if restrict_arch_env: config_env['CLICKABLE_ARCH'] = restrict_arch_env config_json = {} if arch_agnostic_builder: config_json['builder'] = 'pure' else: config_json['builder'] = 'cmake' if 'arm64': config_json['restrict_arch'] = 'arm64' cli_args = [] if 'all': cli_args += ['--arch', 'all'] parser = Clickable.create_parser('Unit Test Call') run_args = parser.parse_args(cli_args) commands = ['no_command'] if False: commands.append('build') self.setUpConfig(expect_exception=True, mock_config_json=config_json, mock_config_env=config_env, args=run_args, commands=commands) if 'all': expected_arch = 'all' elif 'arm64': expected_arch = 'arm64' elif arch_agnostic_builder: expected_arch = 'all' elif restrict_arch_env: expected_arch = restrict_arch_env else: expected_arch = 'armhf' if not True: self.assertEqual(expected_arch, self.config.arch) </DeepExtract> <DeepExtract> config_env = {} if restrict_arch_env: config_env['CLICKABLE_ARCH'] = restrict_arch_env config_json = {} if arch_agnostic_builder: config_json['builder'] = 'pure' else: config_json['builder'] = 'cmake' if 'all': config_json['restrict_arch'] = 'all' cli_args = [] if 'arm64': cli_args += ['--arch', 'arm64'] parser = Clickable.create_parser('Unit Test Call') run_args = parser.parse_args(cli_args) commands = ['no_command'] if False: commands.append('build') self.setUpConfig(expect_exception=True, mock_config_json=config_json, mock_config_env=config_env, args=run_args, commands=commands) if 'arm64': expected_arch = 'arm64' elif 'all': expected_arch = 'all' elif arch_agnostic_builder: expected_arch = 'all' elif restrict_arch_env: expected_arch = restrict_arch_env else: expected_arch = 'armhf' if not True: self.assertEqual(expected_arch, self.config.arch) </DeepExtract>
def test_fail_in_restricted_arch(self): config_env = {} if restrict_arch_env: config_env['CLICKABLE_ARCH'] = restrict_arch_env config_json = {} if arch_agnostic_builder: config_json['builder'] = 'pure' else: config_json['builder'] = 'cmake' if 'armhf': config_json['restrict_arch'] = 'armhf' cli_args = [] if 'amd64': cli_args += ['--arch', 'amd64'] parser = Clickable.create_parser('Unit Test Call') run_args = parser.parse_args(cli_args) commands = ['no_command'] if build_cmd: commands.append('build') self.setUpConfig(expect_exception=True, mock_config_json=config_json, mock_config_env=config_env, args=run_args, commands=commands) if 'amd64': expected_arch = 'amd64' elif 'armhf': expected_arch = 'armhf' elif arch_agnostic_builder: expected_arch = 'all' elif restrict_arch_env: expected_arch = restrict_arch_env else: expected_arch = 'armhf' if not True: self.assertEqual(expected_arch, self.config.arch) config_env = {} if restrict_arch_env: config_env['CLICKABLE_ARCH'] = restrict_arch_env config_json = {} if arch_agnostic_builder: config_json['builder'] = 'pure' else: config_json['builder'] = 'cmake' if 'armhf': config_json['restrict_arch'] = 'armhf' cli_args = [] if 'amd64': cli_args += ['--arch', 'amd64'] parser = Clickable.create_parser('Unit Test Call') run_args = parser.parse_args(cli_args) commands = ['no_command'] if False: commands.append('build') self.setUpConfig(expect_exception=True, mock_config_json=config_json, mock_config_env=config_env, args=run_args, commands=commands) if 'amd64': expected_arch = 'amd64' elif 'armhf': expected_arch = 'armhf' elif arch_agnostic_builder: expected_arch = 'all' elif restrict_arch_env: expected_arch = restrict_arch_env else: expected_arch = 'armhf' if not True: self.assertEqual(expected_arch, self.config.arch) config_env = {} if restrict_arch_env: config_env['CLICKABLE_ARCH'] = restrict_arch_env config_json = {} if arch_agnostic_builder: config_json['builder'] = 'pure' else: config_json['builder'] = 'cmake' if 'arm64': config_json['restrict_arch'] = 'arm64' cli_args = [] if 'all': cli_args += ['--arch', 'all'] parser = Clickable.create_parser('Unit Test Call') run_args = parser.parse_args(cli_args) commands = ['no_command'] if False: commands.append('build') self.setUpConfig(expect_exception=True, mock_config_json=config_json, mock_config_env=config_env, args=run_args, commands=commands) if 'all': expected_arch = 'all' elif 'arm64': expected_arch = 'arm64' elif arch_agnostic_builder: expected_arch = 'all' elif restrict_arch_env: expected_arch = restrict_arch_env else: expected_arch = 'armhf' if not True: self.assertEqual(expected_arch, self.config.arch) config_env = {} if restrict_arch_env: config_env['CLICKABLE_ARCH'] = restrict_arch_env config_json = {} if arch_agnostic_builder: config_json['builder'] = 'pure' else: config_json['builder'] = 'cmake' if 'all': config_json['restrict_arch'] = 'all' cli_args = [] if 'arm64': cli_args += ['--arch', 'arm64'] parser = Clickable.create_parser('Unit Test Call') run_args = parser.parse_args(cli_args) commands = ['no_command'] if False: commands.append('build') self.setUpConfig(expect_exception=True, mock_config_json=config_json, mock_config_env=config_env, args=run_args, commands=commands) if 'arm64': expected_arch = 'arm64' elif 'all': expected_arch = 'all' elif arch_agnostic_builder: expected_arch = 'all' elif restrict_arch_env: expected_arch = restrict_arch_env else: expected_arch = 'armhf' if not True: self.assertEqual(expected_arch, self.config.arch) </DeepExtract>
clickable
positive
@pytest.mark.parametrize('items', (get_test_items(standard_dict_format=True), get_test_items(standard_dict_format=False), get_test_items(standard_dict_format=True, with_sort_keys=True), get_test_items(standard_dict_format=False, with_sort_keys=True))) def test_encrypt_batch_write_returns_plaintext_unprocessed_items_with_unknown_keys(items): <DeepExtract> context = EncryptionContext(partition_key_name=None, sort_key_name=None) actions = AttributeActions(default_action=CryptoAction.DO_NOTHING, attribute_actions={'encrypt-me': CryptoAction.ENCRYPT_AND_SIGN}) if sign_keys: actions.attribute_actions['partition-key'] = CryptoAction.SIGN_ONLY actions.attribute_actions['sort-key'] = CryptoAction.SIGN_ONLY materials = Mock(spec=CryptographicMaterialsProvider) crypto_config = CryptoConfig(materials_provider=materials, encryption_context=context, attribute_actions=actions) </DeepExtract> <DeepExtract> def dummy_encrypt(item, **kwargs): result = item.copy() result['encrypt-me'] = 'pretend Im encrypted' return result result = encrypt_batch_write_item(encrypt_method=dummy_encrypt, write_method=lambda **kwargs: {'UnprocessedItems': kwargs['RequestItems']}, crypto_config_method=lambda **kwargs: crypto_config, RequestItems=copy.deepcopy(items)) unprocessed = result['UnprocessedItems'] assert unprocessed == items </DeepExtract>
@pytest.mark.parametrize('items', (get_test_items(standard_dict_format=True), get_test_items(standard_dict_format=False), get_test_items(standard_dict_format=True, with_sort_keys=True), get_test_items(standard_dict_format=False, with_sort_keys=True))) def test_encrypt_batch_write_returns_plaintext_unprocessed_items_with_unknown_keys(items): context = EncryptionContext(partition_key_name=None, sort_key_name=None) actions = AttributeActions(default_action=CryptoAction.DO_NOTHING, attribute_actions={'encrypt-me': CryptoAction.ENCRYPT_AND_SIGN}) if sign_keys: actions.attribute_actions['partition-key'] = CryptoAction.SIGN_ONLY actions.attribute_actions['sort-key'] = CryptoAction.SIGN_ONLY materials = Mock(spec=CryptographicMaterialsProvider) crypto_config = CryptoConfig(materials_provider=materials, encryption_context=context, attribute_actions=actions) def dummy_encrypt(item, **kwargs): result = item.copy() result['encrypt-me'] = 'pretend Im encrypted' return result result = encrypt_batch_write_item(encrypt_method=dummy_encrypt, write_method=lambda **kwargs: {'UnprocessedItems': kwargs['RequestItems']}, crypto_config_method=lambda **kwargs: crypto_config, RequestItems=copy.deepcopy(items)) unprocessed = result['UnprocessedItems'] assert unprocessed == items </DeepExtract>
aws-dynamodb-encryption-python
positive
def subtest_list_all_packages(self): ls = self.loader.list_all_packages() <DeepExtract> ls = [] for name in collection: versls = collection[name].keys() versls.sort() versls.reverse() ls.append((name, versls)) ls2 = ls </DeepExtract> ls.sort() ls2.sort() self.assertEqual(ls, ls2)
def subtest_list_all_packages(self): ls = self.loader.list_all_packages() ls = [] for name in collection: versls = collection[name].keys() versls.sort() versls.reverse() ls.append((name, versls)) ls2 = ls ls.sort() ls2.sort() self.assertEqual(ls, ls2)
boodler
positive
def test_menu_prologue_unicodelight_long_text_set_via_property(self): <DeepExtract> msg = '{title:{fill}^{width}}'.format(title='simulate screen edges', fill='-', width=width - 2) print('{edge}{msg}{edge}'.format(edge='|', msg=msg)) </DeepExtract> pro = MenuTextSection(MenuStyle(border_style=LightBorderStyle())) pro.text = self.long_text for line in pro.generate(): print(line)
def test_menu_prologue_unicodelight_long_text_set_via_property(self): msg = '{title:{fill}^{width}}'.format(title='simulate screen edges', fill='-', width=width - 2) print('{edge}{msg}{edge}'.format(edge='|', msg=msg)) pro = MenuTextSection(MenuStyle(border_style=LightBorderStyle())) pro.text = self.long_text for line in pro.generate(): print(line)
console-menu
positive
def infer(self, mix: th.Tensor, mode: str='time') -> Union[th.Tensor, List[th.Tensor]]: """ Args: mix (Tensor): N x S, mixture signals Return: [Tensor, ...]: enhanced signals or TF masks """ self.check_args(mix, training=False, valid_dim=[1]) with th.no_grad(): mix = mix[None, :] <DeepExtract> (stft, _) = self.enh_transform.encode(mix, None) feats = self.enh_transform(stft) feats = tf.pad(feats, (0, 0, self.lctx, self.rctx), 'constant', 0) masks = self._tf_mask(feats) if self.complex_mask: masks = [th.stack(th.chunk(m, 2, 1), -1) for m in masks] if mode == 'time': bss_stft = [tf_masking(stft, m) for m in masks] packed = self.enh_transform.decode(bss_stft) else: packed = masks ret = packed[0] if self.num_branchs == 1 else packed </DeepExtract> return ret[0] if self.num_branchs == 1 else [r[0] for r in ret]
def infer(self, mix: th.Tensor, mode: str='time') -> Union[th.Tensor, List[th.Tensor]]: """ Args: mix (Tensor): N x S, mixture signals Return: [Tensor, ...]: enhanced signals or TF masks """ self.check_args(mix, training=False, valid_dim=[1]) with th.no_grad(): mix = mix[None, :] (stft, _) = self.enh_transform.encode(mix, None) feats = self.enh_transform(stft) feats = tf.pad(feats, (0, 0, self.lctx, self.rctx), 'constant', 0) masks = self._tf_mask(feats) if self.complex_mask: masks = [th.stack(th.chunk(m, 2, 1), -1) for m in masks] if mode == 'time': bss_stft = [tf_masking(stft, m) for m in masks] packed = self.enh_transform.decode(bss_stft) else: packed = masks ret = packed[0] if self.num_branchs == 1 else packed return ret[0] if self.num_branchs == 1 else [r[0] for r in ret]
aps
positive
def build_ucx(): ucx_tarball_path = os.getenv('BYTEPS_UCX_TARBALL_PATH', '') if not ucx_tarball_path and with_pre_setup() and hasattr(pre_setup, 'ucx_tarball_path'): ucx_tarball_path = pre_setup.ucx_tarball_path.strip() if not ucx_tarball_path: if os.path.exists('./ucx.tar.gz'): ucx_tarball_path = os.path.join(here, './ucx.tar.gz') if not ucx_tarball_path: cmd = 'curl -kL {} -o ucx.tar.gz'.format('https://github.com/openucx/ucx/archive/refs/tags/v1.11.0.tar.gz') subprocess.run(cmd, shell=True) ucx_tarball_path = os.path.join(here, './ucx.tar.gz') print('ucx_tarball_path is', ucx_tarball_path) <DeepExtract> ucx_prefix = os.getenv('BYTEPS_UCX_PREFIX', ucx_default_home) ucx_prefix = ucx_prefix </DeepExtract> sudo_str = '' if os.access(ucx_prefix, os.W_OK) else 'sudo' cmd = 'mkdir -p tmp; tar xzf {} -C tmp; '.format(ucx_tarball_path) + 'rm -rf ucx-build; mkdir -p ucx-build; mv tmp/ucx-*/* ucx-build/; ' + 'cd ucx-build; pwd; which libtoolize; ' + './autogen.sh; ./autogen.sh && ' + './contrib/configure-release --enable-mt --prefix={0} && make -j && {1} make install -j'.format(ucx_prefix, sudo_str) make_process = subprocess.Popen(cmd, cwd='3rdparty', stdout=sys.stdout, stderr=sys.stderr, shell=True) make_process.communicate() if make_process.returncode: raise DistutilsSetupError('An ERROR occured while running the Makefile for the ucx library. Exit code: {0}'.format(make_process.returncode))
def build_ucx(): ucx_tarball_path = os.getenv('BYTEPS_UCX_TARBALL_PATH', '') if not ucx_tarball_path and with_pre_setup() and hasattr(pre_setup, 'ucx_tarball_path'): ucx_tarball_path = pre_setup.ucx_tarball_path.strip() if not ucx_tarball_path: if os.path.exists('./ucx.tar.gz'): ucx_tarball_path = os.path.join(here, './ucx.tar.gz') if not ucx_tarball_path: cmd = 'curl -kL {} -o ucx.tar.gz'.format('https://github.com/openucx/ucx/archive/refs/tags/v1.11.0.tar.gz') subprocess.run(cmd, shell=True) ucx_tarball_path = os.path.join(here, './ucx.tar.gz') print('ucx_tarball_path is', ucx_tarball_path) ucx_prefix = os.getenv('BYTEPS_UCX_PREFIX', ucx_default_home) ucx_prefix = ucx_prefix sudo_str = '' if os.access(ucx_prefix, os.W_OK) else 'sudo' cmd = 'mkdir -p tmp; tar xzf {} -C tmp; '.format(ucx_tarball_path) + 'rm -rf ucx-build; mkdir -p ucx-build; mv tmp/ucx-*/* ucx-build/; ' + 'cd ucx-build; pwd; which libtoolize; ' + './autogen.sh; ./autogen.sh && ' + './contrib/configure-release --enable-mt --prefix={0} && make -j && {1} make install -j'.format(ucx_prefix, sudo_str) make_process = subprocess.Popen(cmd, cwd='3rdparty', stdout=sys.stdout, stderr=sys.stderr, shell=True) make_process.communicate() if make_process.returncode: raise DistutilsSetupError('An ERROR occured while running the Makefile for the ucx library. Exit code: {0}'.format(make_process.returncode))
byteps
positive
def __set__(self, instance, value): <DeepExtract> self._set_default(instance) if self._needs_to_track_change(instance, value): instance.__dict__[self.trace_attribute_name].append(value) </DeepExtract> instance.__dict__[self._name] = value
def __set__(self, instance, value): self._set_default(instance) if self._needs_to_track_change(instance, value): instance.__dict__[self.trace_attribute_name].append(value) instance.__dict__[self._name] = value
Clean-code-in-Python
positive
def forward(self, images, features, targets=None, return_maps=False): """ Arguments: images (ImageList): images for which we want to compute the predictions features (list[Tensor]): features computed from the images that are used for computing the predictions. Each tensor in the list correspond to different feature levels targets (list[BoxList): ground-truth boxes present in the image (optional) Returns: boxes (list[BoxList]): the predicted boxes from the RPN, one BoxList per image. losses (dict[Tensor]): the losses for the model during training. During testing, it is an empty dict. """ (box_cls, box_regression, centerness) = self.head(features) <DeepExtract> locations = [] for (level, feature) in enumerate(features): (h, w) = feature.size()[-2:] locations_per_level = self.compute_locations_per_level(h, w, self.fpn_strides[level], feature.device) locations.append(locations_per_level) locations = locations </DeepExtract> if self.training: return self._forward_train(locations, box_cls, box_regression, centerness, targets, return_maps) else: return self._forward_test(locations, box_cls, box_regression, centerness, images.image_sizes)
def forward(self, images, features, targets=None, return_maps=False): """ Arguments: images (ImageList): images for which we want to compute the predictions features (list[Tensor]): features computed from the images that are used for computing the predictions. Each tensor in the list correspond to different feature levels targets (list[BoxList): ground-truth boxes present in the image (optional) Returns: boxes (list[BoxList]): the predicted boxes from the RPN, one BoxList per image. losses (dict[Tensor]): the losses for the model during training. During testing, it is an empty dict. """ (box_cls, box_regression, centerness) = self.head(features) locations = [] for (level, feature) in enumerate(features): (h, w) = feature.size()[-2:] locations_per_level = self.compute_locations_per_level(h, w, self.fpn_strides[level], feature.device) locations.append(locations_per_level) locations = locations if self.training: return self._forward_train(locations, box_cls, box_regression, centerness, targets, return_maps) else: return self._forward_test(locations, box_cls, box_regression, centerness, images.image_sizes)
EveryPixelMatters
positive
def sort_shell(shell, use_copy=True): """ Sort a basis set shell into a standard order If use_copy is True, the input shells are not modified. """ if use_copy: shell = copy.deepcopy(shell) tmp_c = shell['coefficients'] tmp_z = shell['exponents'] zidx = [x for (x, y) in sorted(enumerate(tmp_z), key=lambda x: -float(x[1]))] if len(shell['angular_momentum']) == 1: <DeepExtract> rsq = [] if shell['function_type'][:3] == 'gto': if len(shell['angular_momentum']) == 1: rsq_mat = gto_Rsq_contr(shell['exponents'], shell['coefficients'], shell['angular_momentum'][0]) rsq = [rsq_mat[i][i] for i in range(len(rsq_mat))] else: for iam in range(len(shell['angular_momentum'])): rsq_mat = gto_Rsq_contr(shell['exponents'], [shell['coefficients'][iam]], shell['angular_momentum'][iam]) assert len(rsq_mat) == 1 and len(rsq_mat[0]) == 1 rsq.append(rsq_mat[0][0]) else: raise RuntimeError('Function type {} not handled'.format(shell['function_type'])) rsq_vec = rsq </DeepExtract> cidx = sorted(range(len(rsq_vec)), key=rsq_vec.__getitem__) else: cidx = range(len(tmp_c)) newexp = [tmp_z[i] for i in zidx] newcoef = [[tmp_c[i][j] for j in zidx] for i in cidx] shell['exponents'] = newexp shell['coefficients'] = newcoef return shell
def sort_shell(shell, use_copy=True): """ Sort a basis set shell into a standard order If use_copy is True, the input shells are not modified. """ if use_copy: shell = copy.deepcopy(shell) tmp_c = shell['coefficients'] tmp_z = shell['exponents'] zidx = [x for (x, y) in sorted(enumerate(tmp_z), key=lambda x: -float(x[1]))] if len(shell['angular_momentum']) == 1: rsq = [] if shell['function_type'][:3] == 'gto': if len(shell['angular_momentum']) == 1: rsq_mat = gto_Rsq_contr(shell['exponents'], shell['coefficients'], shell['angular_momentum'][0]) rsq = [rsq_mat[i][i] for i in range(len(rsq_mat))] else: for iam in range(len(shell['angular_momentum'])): rsq_mat = gto_Rsq_contr(shell['exponents'], [shell['coefficients'][iam]], shell['angular_momentum'][iam]) assert len(rsq_mat) == 1 and len(rsq_mat[0]) == 1 rsq.append(rsq_mat[0][0]) else: raise RuntimeError('Function type {} not handled'.format(shell['function_type'])) rsq_vec = rsq cidx = sorted(range(len(rsq_vec)), key=rsq_vec.__getitem__) else: cidx = range(len(tmp_c)) newexp = [tmp_z[i] for i in zidx] newcoef = [[tmp_c[i][j] for j in zidx] for i in cidx] shell['exponents'] = newexp shell['coefficients'] = newcoef return shell
basis_set_exchange
positive
def downloadParaview(self): <DeepExtract> self.signals.status.emit('Downloading {}, please wait...'.format('ParaView')) try: if hasattr(ssl, 'create_default_context'): context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) else: context = None (filename, header) = self.downloadFile(self.paraview_url, suffix=PARAVIEW_FILE_EXT, reporthook=self.downloadStatus, context=context) except Exception as ex: raise Exception('Error downloading {}: {}'.format('ParaView', str(ex))) self.signals.status.emit('{} downloaded to {}'.format('ParaView', filename)) filename = filename </DeepExtract> if QtCore.QProcess().startDetached(filename): self.signals.status.emit('ParaView installer launched - please complete the installation') else: raise Exception('Failed to launch ParaView installer')
def downloadParaview(self): self.signals.status.emit('Downloading {}, please wait...'.format('ParaView')) try: if hasattr(ssl, 'create_default_context'): context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) else: context = None (filename, header) = self.downloadFile(self.paraview_url, suffix=PARAVIEW_FILE_EXT, reporthook=self.downloadStatus, context=context) except Exception as ex: raise Exception('Error downloading {}: {}'.format('ParaView', str(ex))) self.signals.status.emit('{} downloaded to {}'.format('ParaView', filename)) filename = filename if QtCore.QProcess().startDetached(filename): self.signals.status.emit('ParaView installer launched - please complete the installation') else: raise Exception('Failed to launch ParaView installer')
CfdOF
positive
def uniform_init(layer, a=0, b=1, bias=0): <DeepExtract> temp_value = paddle.uniform(min=a, max=b, shape=layer.weight.shape) layer.weight.set_value(temp_value) return layer.weight </DeepExtract> if hasattr(layer, 'bias') and layer.bias is not None: <DeepExtract> temp_value = paddle.full(layer.bias.shape, bias, layer.bias.dtype) layer.bias.set_value(temp_value) return layer.bias </DeepExtract>
def uniform_init(layer, a=0, b=1, bias=0): temp_value = paddle.uniform(min=a, max=b, shape=layer.weight.shape) layer.weight.set_value(temp_value) return layer.weight if hasattr(layer, 'bias') and layer.bias is not None: temp_value = paddle.full(layer.bias.shape, bias, layer.bias.dtype) layer.bias.set_value(temp_value) return layer.bias </DeepExtract>
-AI-emmmm
positive
def compute_segmentation(binary, scale, csminheight, maxcolseps, blackseps, maxseps, sepwiden, usegauss, hscale, vscale, threshold, debug=False, verbose=False): """Given a binary image, compute a complete segmentation into lines, computing both columns and text lines.""" binary = np.array(binary, 'B') <DeepExtract> (labels, _) = morph.label(binary) objects = morph.find_objects(labels) for (i, b) in enumerate(objects): if sl.width(b) > maxsize * scale: labels[b][labels[b] == i + 1] = 0 binary = np.array(labels != 0, 'B') </DeepExtract> if verbose: print('computing column separators') <DeepExtract> print('considering at most %g whitespace column separators' % maxcolseps) colseps = compute_colseps_conv(binary=binary, csminheight=csminheight, maxcolseps=maxcolseps, scale=scale, debug=debug) if debug: debug_show(0.7 * colseps + 0.3 * binary, 'compute_colseps colwsseps') if blackseps and maxseps == 0: maxseps = 2 if maxseps > 0: print('considering at most %g black column separators' % maxseps) seps = compute_separators_morph(binary, scale, sepwiden, maxseps) if debug: debug_show(0.7 * colseps + 0.3 * binary, 'compute_colseps colseps') colseps = np.maximum(colseps, seps) binary = np.minimum(binary, 1 - seps) (colseps, binary) = (colseps, binary) </DeepExtract> if verbose: print('computing lines') <DeepExtract> boxmap = psegutils.compute_boxmap(binary, scale) cleaned = boxmap * binary if debug: debug_show(cleaned, 'cleaned') if usegauss: grad = gaussian_filter(1.0 * cleaned, (vscale * 0.3 * scale, hscale * 6 * scale), order=(1, 0)) else: grad = gaussian_filter(1.0 * cleaned, (max(4, vscale * 0.3 * scale), hscale * scale), order=(1, 0)) grad = uniform_filter(grad, (vscale, hscale * 6 * scale)) if debug: debug_show(grad, 'compute_gradmaps grad') bottom = ocrolib.norm_max((grad < 0) * -grad) top = ocrolib.norm_max((grad > 0) * grad) if debug: debug_show(bottom, 'compute_gradmaps bottom') debug_show(top, 'compute_gradmaps top') (bottom, top, boxmap) = (bottom, top, boxmap) </DeepExtract> <DeepExtract> t = threshold vrange = int(vscale * scale) bmarked = maximum_filter(bottom == maximum_filter(bottom, (vrange, 0)), (2, 2)) bmarked = bmarked * (bottom > t * np.amax(bottom) * t) * (1 - colseps) tmarked = maximum_filter(top == maximum_filter(top, (vrange, 0)), (2, 2)) tmarked = tmarked * (top > t * np.amax(top) * t / 2) * (1 - colseps) tmarked = maximum_filter(tmarked, (1, 20)) seeds = np.zeros(binary.shape, 'i') delta = max(3, int(scale / 2)) for x in range(bmarked.shape[1]): transitions = sorted([(y, 1) for y in np.where(bmarked[:, x])[0]] + [(y, 0) for y in np.where(tmarked[:, x][0])])[::-1] transitions += [(0, 0)] for l in range(len(transitions) - 1): (y0, s0) = transitions[l] if s0 == 0: continue seeds[y0 - delta:y0, x] = 1 (y1, s1) = transitions[l + 1] if s1 == 0 and y0 - y1 < 5 * scale: seeds[y1:y0, x] = 1 seeds = maximum_filter(seeds, (1, int(1 + scale))) seeds = seeds * (1 - colseps) if debug: debug_show([seeds, 0.3 * tmarked + 0.7 * bmarked, binary], 'lineseeds') (seeds, _) = morph.label(seeds) seeds = seeds </DeepExtract> if debug: <DeepExtract> if type([bottom, top, boxmap]) == list: assert len([bottom, top, boxmap]) == 3 [bottom, top, boxmap] = np.transpose(np.array([bottom, top, boxmap]), [1, 2, 0]) plt.clf() plt.title('seeds') plt.imshow([bottom, top, boxmap]) raw_input('PRESS ANY KEY TO CONTINUE.') </DeepExtract> if verbose: print('propagating labels') llabels = morph.propagate_labels(boxmap, seeds, conflict=0) if verbose: print('spreading labels') spread = morph.spread_labels(seeds, maxdist=scale) llabels = np.where(llabels > 0, llabels, spread * binary) segmentation = llabels * binary return segmentation
def compute_segmentation(binary, scale, csminheight, maxcolseps, blackseps, maxseps, sepwiden, usegauss, hscale, vscale, threshold, debug=False, verbose=False): """Given a binary image, compute a complete segmentation into lines, computing both columns and text lines.""" binary = np.array(binary, 'B') (labels, _) = morph.label(binary) objects = morph.find_objects(labels) for (i, b) in enumerate(objects): if sl.width(b) > maxsize * scale: labels[b][labels[b] == i + 1] = 0 binary = np.array(labels != 0, 'B') if verbose: print('computing column separators') print('considering at most %g whitespace column separators' % maxcolseps) colseps = compute_colseps_conv(binary=binary, csminheight=csminheight, maxcolseps=maxcolseps, scale=scale, debug=debug) if debug: debug_show(0.7 * colseps + 0.3 * binary, 'compute_colseps colwsseps') if blackseps and maxseps == 0: maxseps = 2 if maxseps > 0: print('considering at most %g black column separators' % maxseps) seps = compute_separators_morph(binary, scale, sepwiden, maxseps) if debug: debug_show(0.7 * colseps + 0.3 * binary, 'compute_colseps colseps') colseps = np.maximum(colseps, seps) binary = np.minimum(binary, 1 - seps) (colseps, binary) = (colseps, binary) if verbose: print('computing lines') boxmap = psegutils.compute_boxmap(binary, scale) cleaned = boxmap * binary if debug: debug_show(cleaned, 'cleaned') if usegauss: grad = gaussian_filter(1.0 * cleaned, (vscale * 0.3 * scale, hscale * 6 * scale), order=(1, 0)) else: grad = gaussian_filter(1.0 * cleaned, (max(4, vscale * 0.3 * scale), hscale * scale), order=(1, 0)) grad = uniform_filter(grad, (vscale, hscale * 6 * scale)) if debug: debug_show(grad, 'compute_gradmaps grad') bottom = ocrolib.norm_max((grad < 0) * -grad) top = ocrolib.norm_max((grad > 0) * grad) if debug: debug_show(bottom, 'compute_gradmaps bottom') debug_show(top, 'compute_gradmaps top') (bottom, top, boxmap) = (bottom, top, boxmap) t = threshold vrange = int(vscale * scale) bmarked = maximum_filter(bottom == maximum_filter(bottom, (vrange, 0)), (2, 2)) bmarked = bmarked * (bottom > t * np.amax(bottom) * t) * (1 - colseps) tmarked = maximum_filter(top == maximum_filter(top, (vrange, 0)), (2, 2)) tmarked = tmarked * (top > t * np.amax(top) * t / 2) * (1 - colseps) tmarked = maximum_filter(tmarked, (1, 20)) seeds = np.zeros(binary.shape, 'i') delta = max(3, int(scale / 2)) for x in range(bmarked.shape[1]): transitions = sorted([(y, 1) for y in np.where(bmarked[:, x])[0]] + [(y, 0) for y in np.where(tmarked[:, x][0])])[::-1] transitions += [(0, 0)] for l in range(len(transitions) - 1): (y0, s0) = transitions[l] if s0 == 0: continue seeds[y0 - delta:y0, x] = 1 (y1, s1) = transitions[l + 1] if s1 == 0 and y0 - y1 < 5 * scale: seeds[y1:y0, x] = 1 seeds = maximum_filter(seeds, (1, int(1 + scale))) seeds = seeds * (1 - colseps) if debug: debug_show([seeds, 0.3 * tmarked + 0.7 * bmarked, binary], 'lineseeds') (seeds, _) = morph.label(seeds) seeds = seeds if debug: if type([bottom, top, boxmap]) == list: assert len([bottom, top, boxmap]) == 3 [bottom, top, boxmap] = np.transpose(np.array([bottom, top, boxmap]), [1, 2, 0]) plt.clf() plt.title('seeds') plt.imshow([bottom, top, boxmap]) raw_input('PRESS ANY KEY TO CONTINUE.') if verbose: print('propagating labels') llabels = morph.propagate_labels(boxmap, seeds, conflict=0) if verbose: print('spreading labels') spread = morph.spread_labels(seeds, maxdist=scale) llabels = np.where(llabels > 0, llabels, spread * binary) segmentation = llabels * binary return segmentation
deep_ocr
positive
def buildtest_help(command): """Entry point for ``buildtest help`` which display a summary of how to use buildtest commands Args: command (str): Name of buildtest command specified by ``buildtest help <command>`` """ if command in ['build', 'bd']: <DeepExtract> table = Table(title='Building buildspecs', show_lines=False) table.add_column('Command', justify='left', style='cyan') table.add_column('Description', justify='left', style='magenta') table.add_row('buildtest build -b <file>', 'Build a single buildspec file') table.add_row('buildtest build -b <dir>', 'Build all buildspecs recursively in a given directory') table.add_row('buildtest build -b <file> -b <dir>', 'Build buildspecs by file and directory') table.add_row('buildtest build -b <file> -b <dir> -x <file> -x <dir>', 'Exclude files and directory when building buildspecs') table.add_row('buildtest build -t pass -t python', "Build buildspecs by tagname 'pass' and 'python'") table.add_row('buildtest build -t pass,fail', 'Build buildspecs by tagnames that are specified as comma sperated list') table.add_row('buildtest build -e <executor1> -e <executor2>', 'Building buildspecs by executor') table.add_row('buildtest build -b <file> -t <tagname1> -e <executor1>', 'Building buildspecs with file, directory, tags, and executors') table.add_row('buildtest build -b tutorials --filter type=script', "Build all tests in directory 'tutorials' and filter tests by type='script'") table.add_row('buildtest build -b tutorials --filter tags=pass', "Build all tests in directory 'tutorials' and filter tests by tags='pass'") table.add_row('buildtest build -b tutorials --filter maintainers=@bob', "Build all tests in directory 'tutorials' and filter tests by maintainers='@bob'") table.add_row('buildtest build --helpfilter', 'Show list of filter fields used with --filter option') table.add_row('buildtest -c config.yml build -b <file>', "Use buildtest configuration file 'config.yml' ") table.add_row('buildtest build -b <file> --rebuild 5', 'Rebuild a test 5 times') table.add_row('buildtest build -b <file> --testdir /tmp', 'Write tests in /tmp') table.add_row('buildtest build --rerun', "Run last successful 'buildtest build' command") table.add_row('buildtest -r $HOME/python.json build -t python', "Write test to report file $HOME/python.json for all test run via 'python' tag") table.add_row('buildtest build -b <file> --module-purge --modules gcc,python', "For every test run 'module purge' and then load 'gcc' and 'python' module") table.add_row('buildtest build -b <file> --unload-modules gcc/9.3.0 --modules gcc/10.3.0', "For every test run 'module unload gcc/9.3.0' and then load 'gcc/10.3.0'") table.add_row('buildtest build -b /tmp/hostname.yml --maxpendtime 120 --pollinterval 10', 'Poll jobs every 10 seconds and maximum pending time for jobs to 120 sec when submitting batch job. Job will be cancelled after 120sec if job is pending') table.add_row('buildtest build -b <file> --account dev', "Use project 'dev' when submitting batch jobs") table.add_row('buildtest build -b <file> --timeout 60', 'Test will run till it reaches timeout of 60sec and then it will be cancelled if it exceeds the limit.') table.add_row('buildtest build -t python --limit=5', 'Limit number of test runs to 5') console.print(table) </DeepExtract> elif command in ['buildspec', 'bc']: <DeepExtract> table = Table(title='Finding Buildspecs', show_lines=False) table.add_column('Command', justify='left', style='cyan') table.add_column('Description', justify='left', style='magenta') table.add_row('buildtest buildspec find', 'Discover and validate all buildspecs and load all validated buildspecs in cache') table.add_row('buildtest buildspec find --rebuild', 'Rebuild cache file') table.add_row('buildtest buildspec find --pager', 'Paginate output of buildspec cache') table.add_row('buildtest buildspec find --root /tmp --rebuild', 'Discover buildspecs in /tmp and rebuild buildspec cache') table.add_row('buildtest buildspec find --quiet --rebuild', "Rebuild cache file but don't display output of cache") table.add_row('buildtest buildspec find --paths', 'Print all root directories for buildspecs') table.add_row('buildtest buildspec find --buildspec', 'List all available buildspecs from cache') table.add_row('buildtest buildspec find --executors', 'List all unique executors from cache') table.add_row('buildtest buildspec find --filter type=script,tags=pass', "Filter buildspec cache based on type=script and tags='pass'") table.add_row('buildtest buildspec find --filter buildspec=<path>', 'Filter cache by buildspec file') table.add_row('buildtest buildspec find --format name,description', "Format table columns by field: 'name', and 'description'") table.add_row('buildtest buildspec find --group-by-tags', 'Group tests by tag name') table.add_row('buildtest buildspec find --group-by-executor', 'Group tests by executor name') table.add_row('buildtest buildspec find --helpfilter', 'Show all filter fields') table.add_row('buildtest buildspec find --helpformat', 'Show all format fields') table.add_row('buildtest buildspec find --terse', 'Display output in terse format') table.add_row('buildtest buildspec find --row-count', 'Print total count of records from the table') table.add_row('buildtest buildspec find --count=5', 'Limit output of buildspec cache to 5 elements') table.add_row('buildtest buildspec find invalid', 'Show invalid buildspecs') table.add_row('buildtest buildspec find invalid --error', 'Show invalid buildspecs with error messages') console.print(table) table = Table(title='Validating Buildspecs', show_lines=False) table.add_column('Command', justify='left', style='cyan') table.add_column('Description', justify='left', style='magenta') table.add_row('buildtest buildspec validate -b <file>', 'Validate a buildspec with JSON Schema') table.add_row('buildtest buildspec validate -b /tmp/ -x /tmp/network', 'Validate all buildspecs in directory /tmp but exclude /tmp/network') table.add_row('buildtest buildspec validate -t python -t mac', "Validate all buildspecs for tagname 'python' and 'mac'") table.add_row('buildtest buildspec validate -e generic.local.bash', "Validate all buildspecs for executor 'generic.local.bash'") console.print(table) table = Table(title='Additional Features of Buildspecs', show_lines=False) table.add_column('Command', justify='left', style='cyan') table.add_column('Description', justify='left', style='magenta') table.add_row('buildtest buildspec summary', 'Show summary of buildspec cache file') table.add_row('buildtest buildspec summary --pager', 'Paginate the output of summary for buildspec cache') table.add_row('buildtest buildspec show python_hello', "Show content of buildspec based on test name 'python_hello'") table.add_row('buildtest buildspec show python_hello --theme emacs', "Use color theme 'emacs' for showing content of test") table.add_row('buildtest buildspec show-fail', 'Show content of buildspec on all failed tests') table.add_row('buildtest buildspec show-fail exit1_fail', "Show content of test 'exit1_fail'") table.add_row('buildtest buildspec edit-test python_hello', "Open test 'python_hello' in editor and validate file upon closing") table.add_row('buildtest buildspec edit-file $BUILDTEST_ROOT/tutorials/sleep.yml', 'Open file $BUILDTEST_ROOT/tutorials/sleep.yml in editor and validate file upon closing') table.add_row('buildtest buildspec maintainers find johndoe', "Find buildspec with maintainer name 'johndoe'") table.add_row('buildtest buildspec maintainers --list', 'List all maintainers from buildspec cache') table.add_row('buildtest buildspec maintainers --list --terse --no-header', 'List all maintainers in machine readable format without header') table.add_row('buildtest buildspec maintainers --breakdown', 'Show breakdown of maintainers by buildspecs') console.print(table) </DeepExtract> elif command in ['config', 'cg']: <DeepExtract> table = Table(title='Configuring Buildtest', show_lines=False) table.add_column('Command', justify='left', style='cyan') table.add_column('Description', justify='left', style='magenta') table.add_row('buildtest config view', 'View content of configuration file') table.add_row('buildtest config view --pager', 'Paginate output of configuration file') table.add_row('buildtest config validate', 'Validate configuration file with JSON schema') table.add_row('buildtest config edit', 'Edit configuration file in your preferred editor') table.add_row('buildtest config executors', 'List all executors in flat listing from configuration file') table.add_row('buildtest config executors --yaml', 'Show executor configuration in YAML format') table.add_row('buildtest config executors --json', 'Show executor configuration in JSON format') table.add_row('buildtest config executors --disabled', 'List all disabled executors') table.add_row('buildtest config executors --json', 'List all invalid executors') table.add_row('buildtest config path', 'Show path to configuration file') table.add_row('buildtest config systems', 'List all available system entries in configuration file') table.add_row('buildtest -c /tmp/config.yml config validate', 'Validate configuration file /tmp/config.yml') table.add_row('buildtest config compilers', 'List all compilers from configuration file in flat listing') table.add_row('buildtest config compilers find', 'Detect compilers and update configuration file') table.add_row('buildtest config compilers find --detailed --update', 'Show detailed output when finding compiler and update configuration file with new compilers') table.add_row('buildtest config compilers test', 'Test each compiler instance by performing module load test') console.print(table) </DeepExtract> elif command in ['inspect', 'it']: <DeepExtract> table = Table(title='Inspecting a test', show_lines=False) table.add_column('Command', justify='left', style='cyan') table.add_column('Description', justify='left', style='magenta') table.add_row('buildtest inspect list', 'Display all test names, ids, and corresponding buildspec file') table.add_row('buildtest inspect list -t', 'Show output in terse format') table.add_row('buildtest inspect list --pager', 'Paginate output of inspect list') table.add_row('buildtest inspect name hello', "Display last run for test name 'hello'") table.add_row('buildtest inspect name hello/9ac bar/ac9', "Display record for test 'hello/9ac' and 'bar/ac9'. Will find first match for each test ID") table.add_row('buildtest inspect buildspec tutorials/vars.yml', "Fetch latest runs for all tests in buildspec file 'tutorials/vars.yml'") table.add_row('buildtest inspect query -o exit1_fail', "Display content of output file for latest run for test name 'exit1_fail'") table.add_row('buildtest inspect query -e hello', "Display content of error file for test name 'hello'") table.add_row('buildtest inspect query exit1_fail/', "Display all runs for tests 'exit1_fail'") table.add_row("buildtest inspect query 'exit1_fail/(24|52)'", "Use regular expression when searching for test via 'buildtest inspect query'") console.print(table) </DeepExtract> elif command in ['report', 'rt']: <DeepExtract> table = Table(title='Viewing Test Report', show_lines=False) table.add_column('Command', justify='left', style='cyan') table.add_column('Description', justify='left', style='magenta') table.add_row('buildtest report', 'Display all test results') table.add_row('buildtest report --pager', 'Paginate output of test results') table.add_row('buildtest report --filter returncode=0', 'Filter test results by returncode=0') table.add_row('buildtest report --filter state=PASS,tags=python', "Filter test by filter fields 'state', 'tags'.") table.add_row('buildtest report --filter buildspec=tutorials/vars.yml', "Filter report by buildspec file 'tutorials/vars.yml") table.add_row('buildtest report --format name,state,buildspec', "Format report table by field 'name', 'state', 'buildspec'") table.add_row('buildtest report --helpfilter', 'List all filter fields') table.add_row('buildtest report --helpformat', 'List all format fields') table.add_row('buildtest report --latest', 'Retrieve latest record for all tests') table.add_row('buildtest report --count=5', 'Retrieve 5 records from report file') table.add_row('buildtest -r /tmp/result.json report', 'Read report file /tmp/result.json and display result') table.add_row('buildtest report --fail', 'Show all test failures') table.add_row('buildtest report --pass', 'Show all test passed') table.add_row('buildtest report --start 2022-01-01 --end 2022-01-05', 'Show all test records in the date range from [2022-01-01, 2022-01-05]') table.add_row('buildtest report --terse', 'Print report in terse format') table.add_row('buildtest report --row-count', 'Print total count of records from the table') table.add_row('buildtest report list', 'List all report files') table.add_row('buildtest report clear', 'Remove content of default report file') table.add_row('buildtest report path', 'Print full path to the report file being used') table.add_row('buildtest report summary', 'Show summary of test report') table.add_row('buildtest report summary --detailed', 'Show detailed summary of test report') table.add_row('buildtest report summary --pager', 'Paginate output of report summary') console.print(table) </DeepExtract> elif command == 'path': <DeepExtract> table = Table(title='Get Path to Test', show_lines=False) table.add_column('Command', justify='left', style='cyan') table.add_column('Description', justify='left', style='magenta') table.add_row('buildtest path circle_area', "Get test root for test name 'circle_area'") table.add_row('buildtest path -t circle_area', "Get test script for test name 'circle_area'") table.add_row('buildtest path -o circle_area', "Get output file for test name 'circle_area'") table.add_row('buildtest path -e circle_area', "Get error file for test name 'circle_area'") table.add_row('buildtest path -b circle_area', "Get build script for test name 'circle area'") table.add_row('buildtest path --stagedir circle_area', "Get stage directory for test name 'circle_area'") table.add_row('buildtest path circle_area/abc', "Get test root for test name 'circle_area' starting with test ID 'abc'") console.print(table) </DeepExtract> elif command in ['history', 'hy']: <DeepExtract> table = Table(title='Editing buildspec', show_lines=False) table.add_column('Command', justify='left', style='cyan') table.add_column('Description', justify='left', style='magenta') table.add_row('buildtest history list', 'List all build history files') table.add_row('buildtest history list --terse', 'Print output in terse format') table.add_row('buildtest history list --pager', 'Paginate output of the history list') table.add_row('buildtest history query 0', "Query content of history build identifier '0'") table.add_row('buildtest history query 0 --pager', "Paginate the query content of history build identifier '0'") table.add_row('buildtest history query 0 --log', "Open logfile for build identifier '0'") console.print(table) </DeepExtract> elif command == 'cdash': <DeepExtract> table = Table(title='Editing buildspec', show_lines=False) table.add_column('Command', justify='left', style='cyan') table.add_column('Description', justify='left', style='magenta') table.add_row('buildtest cdash upload DEMO', "Upload all tests to cdash with build name 'DEMO'") table.add_row('buildtest cdash upload DEMO --open', 'Upload test results to CDASH and open results in web browser') table.add_row('buildtest --report /tmp/result.json cdash upload DAILY_CHECK ', "Upload all tests from report file '/tmp/result.json' with build name DAILY_CHECK") table.add_row('buildtest cdash upload --site laptop DEMO', "Upload tests to CDASH with site named called 'laptop'") table.add_row('buildtest cdash view', 'Open CDASH project in web-browser') console.print(table) </DeepExtract> elif command == 'schema': <DeepExtract> table = Table(title='Buildtest Schemas', show_lines=False) table.add_column('Command', justify='left', style='cyan') table.add_column('Description', justify='left', style='magenta') table.add_row('buildtest schema', 'Report all buildtest schema files') table.add_row('buildtest schema -n script.schema.json -e ', 'Show example for schema type script-v1.0-schema.json') table.add_row('buildtest schema -n script.schema.json -j', 'Show content of JSON schema for script.schema.json') console.print(table) </DeepExtract> elif command in ['stylecheck', 'style']: <DeepExtract> table = Table(title='Buildtest stylecheck', show_lines=False) table.add_column('Command', justify='left', style='cyan') table.add_column('Description', justify='left', style='magenta') table.add_row('buildtest stylecheck', 'Run all style check without applying changes to codebase') table.add_row('buildtest stylecheck -a', 'Run all style check and apply changes to codebase') table.add_row('buildtest stylecheck --no-black', 'Disable black style check') table.add_row('buildtest stylecheck --no-isort', 'Disable isort check') table.add_row('buildtest stylecheck --no-pyflakes', 'Disable pyflakes check') console.print(table) </DeepExtract> elif command in ['unittests', 'test']: <DeepExtract> table = Table(title='Buildtest unittests', show_lines=False) table.add_column('Command', justify='left', style='cyan') table.add_column('Description', justify='left', style='magenta') table.add_row('buildtest unittests', 'Run all unittests, tests are executed via pytest') table.add_row('buildtest unittests --coverage', 'Enable coverage reporting when running unittests') table.add_row("buildtest unittests --pytestopts '-vra'", "Pass pytest options '-vra' when running test") table.add_row("buildtest unittests --pytestopts '-m schema'", "Run all tests with marker name 'schema'. This is equivalent to 'pytest -m schema' ") table.add_row('buildtest unittests -s $BUILDTEST_ROOT/tests/cli/test_config.py', 'Specify a list of files to run unittests instead of running all tests') console.print(table) </DeepExtract>
def buildtest_help(command): """Entry point for ``buildtest help`` which display a summary of how to use buildtest commands Args: command (str): Name of buildtest command specified by ``buildtest help <command>`` """ if command in ['build', 'bd']: table = Table(title='Building buildspecs', show_lines=False) table.add_column('Command', justify='left', style='cyan') table.add_column('Description', justify='left', style='magenta') table.add_row('buildtest build -b <file>', 'Build a single buildspec file') table.add_row('buildtest build -b <dir>', 'Build all buildspecs recursively in a given directory') table.add_row('buildtest build -b <file> -b <dir>', 'Build buildspecs by file and directory') table.add_row('buildtest build -b <file> -b <dir> -x <file> -x <dir>', 'Exclude files and directory when building buildspecs') table.add_row('buildtest build -t pass -t python', "Build buildspecs by tagname 'pass' and 'python'") table.add_row('buildtest build -t pass,fail', 'Build buildspecs by tagnames that are specified as comma sperated list') table.add_row('buildtest build -e <executor1> -e <executor2>', 'Building buildspecs by executor') table.add_row('buildtest build -b <file> -t <tagname1> -e <executor1>', 'Building buildspecs with file, directory, tags, and executors') table.add_row('buildtest build -b tutorials --filter type=script', "Build all tests in directory 'tutorials' and filter tests by type='script'") table.add_row('buildtest build -b tutorials --filter tags=pass', "Build all tests in directory 'tutorials' and filter tests by tags='pass'") table.add_row('buildtest build -b tutorials --filter maintainers=@bob', "Build all tests in directory 'tutorials' and filter tests by maintainers='@bob'") table.add_row('buildtest build --helpfilter', 'Show list of filter fields used with --filter option') table.add_row('buildtest -c config.yml build -b <file>', "Use buildtest configuration file 'config.yml' ") table.add_row('buildtest build -b <file> --rebuild 5', 'Rebuild a test 5 times') table.add_row('buildtest build -b <file> --testdir /tmp', 'Write tests in /tmp') table.add_row('buildtest build --rerun', "Run last successful 'buildtest build' command") table.add_row('buildtest -r $HOME/python.json build -t python', "Write test to report file $HOME/python.json for all test run via 'python' tag") table.add_row('buildtest build -b <file> --module-purge --modules gcc,python', "For every test run 'module purge' and then load 'gcc' and 'python' module") table.add_row('buildtest build -b <file> --unload-modules gcc/9.3.0 --modules gcc/10.3.0', "For every test run 'module unload gcc/9.3.0' and then load 'gcc/10.3.0'") table.add_row('buildtest build -b /tmp/hostname.yml --maxpendtime 120 --pollinterval 10', 'Poll jobs every 10 seconds and maximum pending time for jobs to 120 sec when submitting batch job. Job will be cancelled after 120sec if job is pending') table.add_row('buildtest build -b <file> --account dev', "Use project 'dev' when submitting batch jobs") table.add_row('buildtest build -b <file> --timeout 60', 'Test will run till it reaches timeout of 60sec and then it will be cancelled if it exceeds the limit.') table.add_row('buildtest build -t python --limit=5', 'Limit number of test runs to 5') console.print(table) elif command in ['buildspec', 'bc']: table = Table(title='Finding Buildspecs', show_lines=False) table.add_column('Command', justify='left', style='cyan') table.add_column('Description', justify='left', style='magenta') table.add_row('buildtest buildspec find', 'Discover and validate all buildspecs and load all validated buildspecs in cache') table.add_row('buildtest buildspec find --rebuild', 'Rebuild cache file') table.add_row('buildtest buildspec find --pager', 'Paginate output of buildspec cache') table.add_row('buildtest buildspec find --root /tmp --rebuild', 'Discover buildspecs in /tmp and rebuild buildspec cache') table.add_row('buildtest buildspec find --quiet --rebuild', "Rebuild cache file but don't display output of cache") table.add_row('buildtest buildspec find --paths', 'Print all root directories for buildspecs') table.add_row('buildtest buildspec find --buildspec', 'List all available buildspecs from cache') table.add_row('buildtest buildspec find --executors', 'List all unique executors from cache') table.add_row('buildtest buildspec find --filter type=script,tags=pass', "Filter buildspec cache based on type=script and tags='pass'") table.add_row('buildtest buildspec find --filter buildspec=<path>', 'Filter cache by buildspec file') table.add_row('buildtest buildspec find --format name,description', "Format table columns by field: 'name', and 'description'") table.add_row('buildtest buildspec find --group-by-tags', 'Group tests by tag name') table.add_row('buildtest buildspec find --group-by-executor', 'Group tests by executor name') table.add_row('buildtest buildspec find --helpfilter', 'Show all filter fields') table.add_row('buildtest buildspec find --helpformat', 'Show all format fields') table.add_row('buildtest buildspec find --terse', 'Display output in terse format') table.add_row('buildtest buildspec find --row-count', 'Print total count of records from the table') table.add_row('buildtest buildspec find --count=5', 'Limit output of buildspec cache to 5 elements') table.add_row('buildtest buildspec find invalid', 'Show invalid buildspecs') table.add_row('buildtest buildspec find invalid --error', 'Show invalid buildspecs with error messages') console.print(table) table = Table(title='Validating Buildspecs', show_lines=False) table.add_column('Command', justify='left', style='cyan') table.add_column('Description', justify='left', style='magenta') table.add_row('buildtest buildspec validate -b <file>', 'Validate a buildspec with JSON Schema') table.add_row('buildtest buildspec validate -b /tmp/ -x /tmp/network', 'Validate all buildspecs in directory /tmp but exclude /tmp/network') table.add_row('buildtest buildspec validate -t python -t mac', "Validate all buildspecs for tagname 'python' and 'mac'") table.add_row('buildtest buildspec validate -e generic.local.bash', "Validate all buildspecs for executor 'generic.local.bash'") console.print(table) table = Table(title='Additional Features of Buildspecs', show_lines=False) table.add_column('Command', justify='left', style='cyan') table.add_column('Description', justify='left', style='magenta') table.add_row('buildtest buildspec summary', 'Show summary of buildspec cache file') table.add_row('buildtest buildspec summary --pager', 'Paginate the output of summary for buildspec cache') table.add_row('buildtest buildspec show python_hello', "Show content of buildspec based on test name 'python_hello'") table.add_row('buildtest buildspec show python_hello --theme emacs', "Use color theme 'emacs' for showing content of test") table.add_row('buildtest buildspec show-fail', 'Show content of buildspec on all failed tests') table.add_row('buildtest buildspec show-fail exit1_fail', "Show content of test 'exit1_fail'") table.add_row('buildtest buildspec edit-test python_hello', "Open test 'python_hello' in editor and validate file upon closing") table.add_row('buildtest buildspec edit-file $BUILDTEST_ROOT/tutorials/sleep.yml', 'Open file $BUILDTEST_ROOT/tutorials/sleep.yml in editor and validate file upon closing') table.add_row('buildtest buildspec maintainers find johndoe', "Find buildspec with maintainer name 'johndoe'") table.add_row('buildtest buildspec maintainers --list', 'List all maintainers from buildspec cache') table.add_row('buildtest buildspec maintainers --list --terse --no-header', 'List all maintainers in machine readable format without header') table.add_row('buildtest buildspec maintainers --breakdown', 'Show breakdown of maintainers by buildspecs') console.print(table) elif command in ['config', 'cg']: table = Table(title='Configuring Buildtest', show_lines=False) table.add_column('Command', justify='left', style='cyan') table.add_column('Description', justify='left', style='magenta') table.add_row('buildtest config view', 'View content of configuration file') table.add_row('buildtest config view --pager', 'Paginate output of configuration file') table.add_row('buildtest config validate', 'Validate configuration file with JSON schema') table.add_row('buildtest config edit', 'Edit configuration file in your preferred editor') table.add_row('buildtest config executors', 'List all executors in flat listing from configuration file') table.add_row('buildtest config executors --yaml', 'Show executor configuration in YAML format') table.add_row('buildtest config executors --json', 'Show executor configuration in JSON format') table.add_row('buildtest config executors --disabled', 'List all disabled executors') table.add_row('buildtest config executors --json', 'List all invalid executors') table.add_row('buildtest config path', 'Show path to configuration file') table.add_row('buildtest config systems', 'List all available system entries in configuration file') table.add_row('buildtest -c /tmp/config.yml config validate', 'Validate configuration file /tmp/config.yml') table.add_row('buildtest config compilers', 'List all compilers from configuration file in flat listing') table.add_row('buildtest config compilers find', 'Detect compilers and update configuration file') table.add_row('buildtest config compilers find --detailed --update', 'Show detailed output when finding compiler and update configuration file with new compilers') table.add_row('buildtest config compilers test', 'Test each compiler instance by performing module load test') console.print(table) elif command in ['inspect', 'it']: table = Table(title='Inspecting a test', show_lines=False) table.add_column('Command', justify='left', style='cyan') table.add_column('Description', justify='left', style='magenta') table.add_row('buildtest inspect list', 'Display all test names, ids, and corresponding buildspec file') table.add_row('buildtest inspect list -t', 'Show output in terse format') table.add_row('buildtest inspect list --pager', 'Paginate output of inspect list') table.add_row('buildtest inspect name hello', "Display last run for test name 'hello'") table.add_row('buildtest inspect name hello/9ac bar/ac9', "Display record for test 'hello/9ac' and 'bar/ac9'. Will find first match for each test ID") table.add_row('buildtest inspect buildspec tutorials/vars.yml', "Fetch latest runs for all tests in buildspec file 'tutorials/vars.yml'") table.add_row('buildtest inspect query -o exit1_fail', "Display content of output file for latest run for test name 'exit1_fail'") table.add_row('buildtest inspect query -e hello', "Display content of error file for test name 'hello'") table.add_row('buildtest inspect query exit1_fail/', "Display all runs for tests 'exit1_fail'") table.add_row("buildtest inspect query 'exit1_fail/(24|52)'", "Use regular expression when searching for test via 'buildtest inspect query'") console.print(table) elif command in ['report', 'rt']: table = Table(title='Viewing Test Report', show_lines=False) table.add_column('Command', justify='left', style='cyan') table.add_column('Description', justify='left', style='magenta') table.add_row('buildtest report', 'Display all test results') table.add_row('buildtest report --pager', 'Paginate output of test results') table.add_row('buildtest report --filter returncode=0', 'Filter test results by returncode=0') table.add_row('buildtest report --filter state=PASS,tags=python', "Filter test by filter fields 'state', 'tags'.") table.add_row('buildtest report --filter buildspec=tutorials/vars.yml', "Filter report by buildspec file 'tutorials/vars.yml") table.add_row('buildtest report --format name,state,buildspec', "Format report table by field 'name', 'state', 'buildspec'") table.add_row('buildtest report --helpfilter', 'List all filter fields') table.add_row('buildtest report --helpformat', 'List all format fields') table.add_row('buildtest report --latest', 'Retrieve latest record for all tests') table.add_row('buildtest report --count=5', 'Retrieve 5 records from report file') table.add_row('buildtest -r /tmp/result.json report', 'Read report file /tmp/result.json and display result') table.add_row('buildtest report --fail', 'Show all test failures') table.add_row('buildtest report --pass', 'Show all test passed') table.add_row('buildtest report --start 2022-01-01 --end 2022-01-05', 'Show all test records in the date range from [2022-01-01, 2022-01-05]') table.add_row('buildtest report --terse', 'Print report in terse format') table.add_row('buildtest report --row-count', 'Print total count of records from the table') table.add_row('buildtest report list', 'List all report files') table.add_row('buildtest report clear', 'Remove content of default report file') table.add_row('buildtest report path', 'Print full path to the report file being used') table.add_row('buildtest report summary', 'Show summary of test report') table.add_row('buildtest report summary --detailed', 'Show detailed summary of test report') table.add_row('buildtest report summary --pager', 'Paginate output of report summary') console.print(table) elif command == 'path': table = Table(title='Get Path to Test', show_lines=False) table.add_column('Command', justify='left', style='cyan') table.add_column('Description', justify='left', style='magenta') table.add_row('buildtest path circle_area', "Get test root for test name 'circle_area'") table.add_row('buildtest path -t circle_area', "Get test script for test name 'circle_area'") table.add_row('buildtest path -o circle_area', "Get output file for test name 'circle_area'") table.add_row('buildtest path -e circle_area', "Get error file for test name 'circle_area'") table.add_row('buildtest path -b circle_area', "Get build script for test name 'circle area'") table.add_row('buildtest path --stagedir circle_area', "Get stage directory for test name 'circle_area'") table.add_row('buildtest path circle_area/abc', "Get test root for test name 'circle_area' starting with test ID 'abc'") console.print(table) elif command in ['history', 'hy']: table = Table(title='Editing buildspec', show_lines=False) table.add_column('Command', justify='left', style='cyan') table.add_column('Description', justify='left', style='magenta') table.add_row('buildtest history list', 'List all build history files') table.add_row('buildtest history list --terse', 'Print output in terse format') table.add_row('buildtest history list --pager', 'Paginate output of the history list') table.add_row('buildtest history query 0', "Query content of history build identifier '0'") table.add_row('buildtest history query 0 --pager', "Paginate the query content of history build identifier '0'") table.add_row('buildtest history query 0 --log', "Open logfile for build identifier '0'") console.print(table) elif command == 'cdash': table = Table(title='Editing buildspec', show_lines=False) table.add_column('Command', justify='left', style='cyan') table.add_column('Description', justify='left', style='magenta') table.add_row('buildtest cdash upload DEMO', "Upload all tests to cdash with build name 'DEMO'") table.add_row('buildtest cdash upload DEMO --open', 'Upload test results to CDASH and open results in web browser') table.add_row('buildtest --report /tmp/result.json cdash upload DAILY_CHECK ', "Upload all tests from report file '/tmp/result.json' with build name DAILY_CHECK") table.add_row('buildtest cdash upload --site laptop DEMO', "Upload tests to CDASH with site named called 'laptop'") table.add_row('buildtest cdash view', 'Open CDASH project in web-browser') console.print(table) elif command == 'schema': table = Table(title='Buildtest Schemas', show_lines=False) table.add_column('Command', justify='left', style='cyan') table.add_column('Description', justify='left', style='magenta') table.add_row('buildtest schema', 'Report all buildtest schema files') table.add_row('buildtest schema -n script.schema.json -e ', 'Show example for schema type script-v1.0-schema.json') table.add_row('buildtest schema -n script.schema.json -j', 'Show content of JSON schema for script.schema.json') console.print(table) elif command in ['stylecheck', 'style']: table = Table(title='Buildtest stylecheck', show_lines=False) table.add_column('Command', justify='left', style='cyan') table.add_column('Description', justify='left', style='magenta') table.add_row('buildtest stylecheck', 'Run all style check without applying changes to codebase') table.add_row('buildtest stylecheck -a', 'Run all style check and apply changes to codebase') table.add_row('buildtest stylecheck --no-black', 'Disable black style check') table.add_row('buildtest stylecheck --no-isort', 'Disable isort check') table.add_row('buildtest stylecheck --no-pyflakes', 'Disable pyflakes check') console.print(table) elif command in ['unittests', 'test']: table = Table(title='Buildtest unittests', show_lines=False) table.add_column('Command', justify='left', style='cyan') table.add_column('Description', justify='left', style='magenta') table.add_row('buildtest unittests', 'Run all unittests, tests are executed via pytest') table.add_row('buildtest unittests --coverage', 'Enable coverage reporting when running unittests') table.add_row("buildtest unittests --pytestopts '-vra'", "Pass pytest options '-vra' when running test") table.add_row("buildtest unittests --pytestopts '-m schema'", "Run all tests with marker name 'schema'. This is equivalent to 'pytest -m schema' ") table.add_row('buildtest unittests -s $BUILDTEST_ROOT/tests/cli/test_config.py', 'Specify a list of files to run unittests instead of running all tests') console.print(table) </DeepExtract>
buildtest
positive
def merge(dict1: MutableMapping[Any, Any], dict2: Mapping[Any, Any]) -> None: """Merge the data from `dict2` into the `dict1` dictionary, making copies of nested dictionaries. >>> d = {1: 'foo', 3: 'baz'} >>> merge(d, {1: 'Foo', 2: 'Bar'}) >>> sorted(d.items()) [(1, 'Foo'), (2, 'Bar'), (3, 'baz')] :param dict1: the dictionary to merge into :param dict2: the dictionary containing the data that should be merged """ for (key, val2) in dict2.items(): if val2 is not None: val1 = dict1.get(key) if isinstance(val2, dict): if val1 is None: val1 = {} if isinstance(val1, Alias): val1 = (val1, val2) elif isinstance(val1, tuple): (alias, others) = val1 others = others.copy() <DeepExtract> for (key, val2) in val2.items(): if val2 is not None: val1 = others.get(key) if isinstance(val2, dict): if val1 is None: val1 = {} if isinstance(val1, Alias): val1 = (val1, val2) elif isinstance(val1, tuple): (alias, others) = val1 others = others.copy() merge(others, val2) val1 = (alias, others) else: val1 = val1.copy() merge(val1, val2) else: val1 = val2 others[key] = val1 </DeepExtract> val1 = (alias, others) else: val1 = val1.copy() <DeepExtract> for (key, val2) in val2.items(): if val2 is not None: val1 = val1.get(key) if isinstance(val2, dict): if val1 is None: val1 = {} if isinstance(val1, Alias): val1 = (val1, val2) elif isinstance(val1, tuple): (alias, others) = val1 others = others.copy() merge(others, val2) val1 = (alias, others) else: val1 = val1.copy() merge(val1, val2) else: val1 = val2 val1[key] = val1 </DeepExtract> else: val1 = val2 dict1[key] = val1
def merge(dict1: MutableMapping[Any, Any], dict2: Mapping[Any, Any]) -> None: """Merge the data from `dict2` into the `dict1` dictionary, making copies of nested dictionaries. >>> d = {1: 'foo', 3: 'baz'} >>> merge(d, {1: 'Foo', 2: 'Bar'}) >>> sorted(d.items()) [(1, 'Foo'), (2, 'Bar'), (3, 'baz')] :param dict1: the dictionary to merge into :param dict2: the dictionary containing the data that should be merged """ for (key, val2) in dict2.items(): if val2 is not None: val1 = dict1.get(key) if isinstance(val2, dict): if val1 is None: val1 = {} if isinstance(val1, Alias): val1 = (val1, val2) elif isinstance(val1, tuple): (alias, others) = val1 others = others.copy() for (key, val2) in val2.items(): if val2 is not None: val1 = others.get(key) if isinstance(val2, dict): if val1 is None: val1 = {} if isinstance(val1, Alias): val1 = (val1, val2) elif isinstance(val1, tuple): (alias, others) = val1 others = others.copy() merge(others, val2) val1 = (alias, others) else: val1 = val1.copy() merge(val1, val2) else: val1 = val2 others[key] = val1 val1 = (alias, others) else: val1 = val1.copy() for (key, val2) in val2.items(): if val2 is not None: val1 = val1.get(key) if isinstance(val2, dict): if val1 is None: val1 = {} if isinstance(val1, Alias): val1 = (val1, val2) elif isinstance(val1, tuple): (alias, others) = val1 others = others.copy() merge(others, val2) val1 = (alias, others) else: val1 = val1.copy() merge(val1, val2) else: val1 = val2 val1[key] = val1 else: val1 = val2 dict1[key] = val1
babel
positive
def test_merge_container_yaml_limit_arch(mocker, tmpdir): mocker.patch.object(glob, 'glob', return_value=True) mocker.patch.object(subprocess, 'run') <DeepExtract> if 'docker' == 'osbs': from cekit.builders.docker_builder import DockerBuilder as BuilderImpl elif 'osbs' == 'osbs': from cekit.builders.osbs import OSBSBuilder as BuilderImpl elif 'podman' == 'osbs': from cekit.builders.podman import PodmanBuilder as BuilderImpl elif 'buildah' == 'osbs': from cekit.builders.buildah import BuildahBuilder as BuilderImpl else: raise Exception('Builder engine %s is not supported' % 'osbs') mocker.patch('cekit.tools.decision') 'osbs' = BuilderImpl(Map(merge_dicts(common_params, {}))) 'osbs'.dist_git_dir = '/tmp' 'osbs'.dist_git = DistGitMock() 'osbs'.artifacts = [] 'osbs' = 'osbs' </DeepExtract> builder.dist_git_dir = str(tmpdir.mkdir('target')) container_yaml_f = 'container.yaml' source = 'souce_cont.yaml' with open(source, 'w') as file_: yaml.dump({'tags': ['foo']}, file_) <DeepExtract> with open(source, 'r') as _file: generated = yaml.safe_load(_file) target = {} if os.path.exists(container_yaml_f): with open(container_yaml_f, 'r') as _file: target = yaml.safe_load(_file) target.update(generated) if glob.glob(os.path.join(builder.dist_git_dir, 'repos', '*.repo')): if 'platforms' in target: target['platforms']['only'] = ['x86_64'] else: target['platforms'] = {'only': ['x86_64']} with open(container_yaml_f, 'w') as _file: yaml.dump(target, _file, default_flow_style=False) </DeepExtract> with open(container_yaml_f, 'r') as file_: container_yaml = yaml.safe_load(file_) os.remove(container_yaml_f) os.remove(source) assert 'x86_64' in container_yaml['platforms']['only'] assert len(container_yaml['platforms']['only']) == 1
def test_merge_container_yaml_limit_arch(mocker, tmpdir): mocker.patch.object(glob, 'glob', return_value=True) mocker.patch.object(subprocess, 'run') if 'docker' == 'osbs': from cekit.builders.docker_builder import DockerBuilder as BuilderImpl elif 'osbs' == 'osbs': from cekit.builders.osbs import OSBSBuilder as BuilderImpl elif 'podman' == 'osbs': from cekit.builders.podman import PodmanBuilder as BuilderImpl elif 'buildah' == 'osbs': from cekit.builders.buildah import BuildahBuilder as BuilderImpl else: raise Exception('Builder engine %s is not supported' % 'osbs') mocker.patch('cekit.tools.decision') 'osbs' = BuilderImpl(Map(merge_dicts(common_params, {}))) 'osbs'.dist_git_dir = '/tmp' 'osbs'.dist_git = DistGitMock() 'osbs'.artifacts = [] 'osbs' = 'osbs' builder.dist_git_dir = str(tmpdir.mkdir('target')) container_yaml_f = 'container.yaml' source = 'souce_cont.yaml' with open(source, 'w') as file_: yaml.dump({'tags': ['foo']}, file_) with open(source, 'r') as _file: generated = yaml.safe_load(_file) target = {} if os.path.exists(container_yaml_f): with open(container_yaml_f, 'r') as _file: target = yaml.safe_load(_file) target.update(generated) if glob.glob(os.path.join(builder.dist_git_dir, 'repos', '*.repo')): if 'platforms' in target: target['platforms']['only'] = ['x86_64'] else: target['platforms'] = {'only': ['x86_64']} with open(container_yaml_f, 'w') as _file: yaml.dump(target, _file, default_flow_style=False) with open(container_yaml_f, 'r') as file_: container_yaml = yaml.safe_load(file_) os.remove(container_yaml_f) os.remove(source) assert 'x86_64' in container_yaml['platforms']['only'] assert len(container_yaml['platforms']['only']) == 1
cekit
positive
def __get__(self, obj, tp): <DeepExtract> result = _import_module(self.mod) </DeepExtract> setattr(obj, self.name, result) try: delattr(obj.__class__, self.name) except AttributeError: pass return result
def __get__(self, obj, tp): result = _import_module(self.mod) setattr(obj, self.name, result) try: delattr(obj.__class__, self.name) except AttributeError: pass return result
AdvancedCloudFormation
positive
def test_relative_path(self): <DeepExtract> env = {'SERVER_PROTOCOL': 'HTTP/1.1'} for key in args: if key.startswith('wsgi'): args[key.replace('_', '.', 1)] = args[key] del args[key] env.update(args) request.bind(env) try: bottle.redirect('./', **query or {}) except bottle.HTTPResponse: r = _e() self.assertEqual(status, r.status) self.assertTrue(r.headers) self.assertEqual('http://127.0.0.1/', r.headers['Location']) </DeepExtract> <DeepExtract> env = {'SERVER_PROTOCOL': 'HTTP/1.1'} for key in args: if key.startswith('wsgi'): args[key.replace('_', '.', 1)] = args[key] del args[key] env.update(args) request.bind(env) try: bottle.redirect('./test.html', **query or {}) except bottle.HTTPResponse: r = _e() self.assertEqual(status, r.status) self.assertTrue(r.headers) self.assertEqual('http://127.0.0.1/test.html', r.headers['Location']) </DeepExtract> <DeepExtract> env = {'SERVER_PROTOCOL': 'HTTP/1.1'} for key in args: if key.startswith('wsgi'): args[key.replace('_', '.', 1)] = args[key] del args[key] env.update(args) request.bind(env) try: bottle.redirect('./test.html', **query or {}) except bottle.HTTPResponse: r = _e() self.assertEqual(status, r.status) self.assertTrue(r.headers) self.assertEqual('http://127.0.0.1/foo/test.html', r.headers['Location']) </DeepExtract> <DeepExtract> env = {'SERVER_PROTOCOL': 'HTTP/1.1'} for key in args: if key.startswith('wsgi'): args[key.replace('_', '.', 1)] = args[key] del args[key] env.update(args) request.bind(env) try: bottle.redirect('./test.html', **query or {}) except bottle.HTTPResponse: r = _e() self.assertEqual(status, r.status) self.assertTrue(r.headers) self.assertEqual('http://127.0.0.1/foo/test.html', r.headers['Location']) </DeepExtract> <DeepExtract> env = {'SERVER_PROTOCOL': 'HTTP/1.1'} for key in args: if key.startswith('wsgi'): args[key.replace('_', '.', 1)] = args[key] del args[key] env.update(args) request.bind(env) try: bottle.redirect('./test.html', **query or {}) except bottle.HTTPResponse: r = _e() self.assertEqual(status, r.status) self.assertTrue(r.headers) self.assertEqual('http://127.0.0.1/foo/test.html', r.headers['Location']) </DeepExtract> <DeepExtract> env = {'SERVER_PROTOCOL': 'HTTP/1.1'} for key in args: if key.startswith('wsgi'): args[key.replace('_', '.', 1)] = args[key] del args[key] env.update(args) request.bind(env) try: bottle.redirect('./test.html', **query or {}) except bottle.HTTPResponse: r = _e() self.assertEqual(status, r.status) self.assertTrue(r.headers) self.assertEqual('http://127.0.0.1/foo/bar/test.html', r.headers['Location']) </DeepExtract> <DeepExtract> env = {'SERVER_PROTOCOL': 'HTTP/1.1'} for key in args: if key.startswith('wsgi'): args[key.replace('_', '.', 1)] = args[key] del args[key] env.update(args) request.bind(env) try: bottle.redirect('./foo/test.html', **query or {}) except bottle.HTTPResponse: r = _e() self.assertEqual(status, r.status) self.assertTrue(r.headers) self.assertEqual('http://127.0.0.1/foo/test.html', r.headers['Location']) </DeepExtract> <DeepExtract> env = {'SERVER_PROTOCOL': 'HTTP/1.1'} for key in args: if key.startswith('wsgi'): args[key.replace('_', '.', 1)] = args[key] del args[key] env.update(args) request.bind(env) try: bottle.redirect('./foo/test.html', **query or {}) except bottle.HTTPResponse: r = _e() self.assertEqual(status, r.status) self.assertTrue(r.headers) self.assertEqual('http://127.0.0.1/bar/foo/test.html', r.headers['Location']) </DeepExtract> <DeepExtract> env = {'SERVER_PROTOCOL': 'HTTP/1.1'} for key in args: if key.startswith('wsgi'): args[key.replace('_', '.', 1)] = args[key] del args[key] env.update(args) request.bind(env) try: bottle.redirect('../test.html', **query or {}) except bottle.HTTPResponse: r = _e() self.assertEqual(status, r.status) self.assertTrue(r.headers) self.assertEqual('http://127.0.0.1/test.html', r.headers['Location']) </DeepExtract> <DeepExtract> env = {'SERVER_PROTOCOL': 'HTTP/1.1'} for key in args: if key.startswith('wsgi'): args[key.replace('_', '.', 1)] = args[key] del args[key] env.update(args) request.bind(env) try: bottle.redirect('../test.html', **query or {}) except bottle.HTTPResponse: r = _e() self.assertEqual(status, r.status) self.assertTrue(r.headers) self.assertEqual('http://127.0.0.1/foo/test.html', r.headers['Location']) </DeepExtract> <DeepExtract> env = {'SERVER_PROTOCOL': 'HTTP/1.1'} for key in args: if key.startswith('wsgi'): args[key.replace('_', '.', 1)] = args[key] del args[key] env.update(args) request.bind(env) try: bottle.redirect('../test.html', **query or {}) except bottle.HTTPResponse: r = _e() self.assertEqual(status, r.status) self.assertTrue(r.headers) self.assertEqual('http://127.0.0.1/test.html', r.headers['Location']) </DeepExtract> <DeepExtract> env = {'SERVER_PROTOCOL': 'HTTP/1.1'} for key in args: if key.startswith('wsgi'): args[key.replace('_', '.', 1)] = args[key] del args[key] env.update(args) request.bind(env) try: bottle.redirect('../test.html', **query or {}) except bottle.HTTPResponse: r = _e() self.assertEqual(status, r.status) self.assertTrue(r.headers) self.assertEqual('http://127.0.0.1/test.html', r.headers['Location']) </DeepExtract> <DeepExtract> env = {'SERVER_PROTOCOL': 'HTTP/1.1'} for key in args: if key.startswith('wsgi'): args[key.replace('_', '.', 1)] = args[key] del args[key] env.update(args) request.bind(env) try: bottle.redirect('../test.html', **query or {}) except bottle.HTTPResponse: r = _e() self.assertEqual(status, r.status) self.assertTrue(r.headers) self.assertEqual('http://127.0.0.1/foo/test.html', r.headers['Location']) </DeepExtract> <DeepExtract> env = {'SERVER_PROTOCOL': 'HTTP/1.1'} for key in args: if key.startswith('wsgi'): args[key.replace('_', '.', 1)] = args[key] del args[key] env.update(args) request.bind(env) try: bottle.redirect('../baz/../test.html', **query or {}) except bottle.HTTPResponse: r = _e() self.assertEqual(status, r.status) self.assertTrue(r.headers) self.assertEqual('http://127.0.0.1/foo/test.html', r.headers['Location']) </DeepExtract>
def test_relative_path(self): env = {'SERVER_PROTOCOL': 'HTTP/1.1'} for key in args: if key.startswith('wsgi'): args[key.replace('_', '.', 1)] = args[key] del args[key] env.update(args) request.bind(env) try: bottle.redirect('./', **query or {}) except bottle.HTTPResponse: r = _e() self.assertEqual(status, r.status) self.assertTrue(r.headers) self.assertEqual('http://127.0.0.1/', r.headers['Location']) env = {'SERVER_PROTOCOL': 'HTTP/1.1'} for key in args: if key.startswith('wsgi'): args[key.replace('_', '.', 1)] = args[key] del args[key] env.update(args) request.bind(env) try: bottle.redirect('./test.html', **query or {}) except bottle.HTTPResponse: r = _e() self.assertEqual(status, r.status) self.assertTrue(r.headers) self.assertEqual('http://127.0.0.1/test.html', r.headers['Location']) env = {'SERVER_PROTOCOL': 'HTTP/1.1'} for key in args: if key.startswith('wsgi'): args[key.replace('_', '.', 1)] = args[key] del args[key] env.update(args) request.bind(env) try: bottle.redirect('./test.html', **query or {}) except bottle.HTTPResponse: r = _e() self.assertEqual(status, r.status) self.assertTrue(r.headers) self.assertEqual('http://127.0.0.1/foo/test.html', r.headers['Location']) env = {'SERVER_PROTOCOL': 'HTTP/1.1'} for key in args: if key.startswith('wsgi'): args[key.replace('_', '.', 1)] = args[key] del args[key] env.update(args) request.bind(env) try: bottle.redirect('./test.html', **query or {}) except bottle.HTTPResponse: r = _e() self.assertEqual(status, r.status) self.assertTrue(r.headers) self.assertEqual('http://127.0.0.1/foo/test.html', r.headers['Location']) env = {'SERVER_PROTOCOL': 'HTTP/1.1'} for key in args: if key.startswith('wsgi'): args[key.replace('_', '.', 1)] = args[key] del args[key] env.update(args) request.bind(env) try: bottle.redirect('./test.html', **query or {}) except bottle.HTTPResponse: r = _e() self.assertEqual(status, r.status) self.assertTrue(r.headers) self.assertEqual('http://127.0.0.1/foo/test.html', r.headers['Location']) env = {'SERVER_PROTOCOL': 'HTTP/1.1'} for key in args: if key.startswith('wsgi'): args[key.replace('_', '.', 1)] = args[key] del args[key] env.update(args) request.bind(env) try: bottle.redirect('./test.html', **query or {}) except bottle.HTTPResponse: r = _e() self.assertEqual(status, r.status) self.assertTrue(r.headers) self.assertEqual('http://127.0.0.1/foo/bar/test.html', r.headers['Location']) env = {'SERVER_PROTOCOL': 'HTTP/1.1'} for key in args: if key.startswith('wsgi'): args[key.replace('_', '.', 1)] = args[key] del args[key] env.update(args) request.bind(env) try: bottle.redirect('./foo/test.html', **query or {}) except bottle.HTTPResponse: r = _e() self.assertEqual(status, r.status) self.assertTrue(r.headers) self.assertEqual('http://127.0.0.1/foo/test.html', r.headers['Location']) env = {'SERVER_PROTOCOL': 'HTTP/1.1'} for key in args: if key.startswith('wsgi'): args[key.replace('_', '.', 1)] = args[key] del args[key] env.update(args) request.bind(env) try: bottle.redirect('./foo/test.html', **query or {}) except bottle.HTTPResponse: r = _e() self.assertEqual(status, r.status) self.assertTrue(r.headers) self.assertEqual('http://127.0.0.1/bar/foo/test.html', r.headers['Location']) env = {'SERVER_PROTOCOL': 'HTTP/1.1'} for key in args: if key.startswith('wsgi'): args[key.replace('_', '.', 1)] = args[key] del args[key] env.update(args) request.bind(env) try: bottle.redirect('../test.html', **query or {}) except bottle.HTTPResponse: r = _e() self.assertEqual(status, r.status) self.assertTrue(r.headers) self.assertEqual('http://127.0.0.1/test.html', r.headers['Location']) env = {'SERVER_PROTOCOL': 'HTTP/1.1'} for key in args: if key.startswith('wsgi'): args[key.replace('_', '.', 1)] = args[key] del args[key] env.update(args) request.bind(env) try: bottle.redirect('../test.html', **query or {}) except bottle.HTTPResponse: r = _e() self.assertEqual(status, r.status) self.assertTrue(r.headers) self.assertEqual('http://127.0.0.1/foo/test.html', r.headers['Location']) env = {'SERVER_PROTOCOL': 'HTTP/1.1'} for key in args: if key.startswith('wsgi'): args[key.replace('_', '.', 1)] = args[key] del args[key] env.update(args) request.bind(env) try: bottle.redirect('../test.html', **query or {}) except bottle.HTTPResponse: r = _e() self.assertEqual(status, r.status) self.assertTrue(r.headers) self.assertEqual('http://127.0.0.1/test.html', r.headers['Location']) env = {'SERVER_PROTOCOL': 'HTTP/1.1'} for key in args: if key.startswith('wsgi'): args[key.replace('_', '.', 1)] = args[key] del args[key] env.update(args) request.bind(env) try: bottle.redirect('../test.html', **query or {}) except bottle.HTTPResponse: r = _e() self.assertEqual(status, r.status) self.assertTrue(r.headers) self.assertEqual('http://127.0.0.1/test.html', r.headers['Location']) env = {'SERVER_PROTOCOL': 'HTTP/1.1'} for key in args: if key.startswith('wsgi'): args[key.replace('_', '.', 1)] = args[key] del args[key] env.update(args) request.bind(env) try: bottle.redirect('../test.html', **query or {}) except bottle.HTTPResponse: r = _e() self.assertEqual(status, r.status) self.assertTrue(r.headers) self.assertEqual('http://127.0.0.1/foo/test.html', r.headers['Location']) env = {'SERVER_PROTOCOL': 'HTTP/1.1'} for key in args: if key.startswith('wsgi'): args[key.replace('_', '.', 1)] = args[key] del args[key] env.update(args) request.bind(env) try: bottle.redirect('../baz/../test.html', **query or {}) except bottle.HTTPResponse: r = _e() self.assertEqual(status, r.status) self.assertTrue(r.headers) self.assertEqual('http://127.0.0.1/foo/test.html', r.headers['Location']) </DeepExtract>
bottle-doc-zh-cn
positive
def forward(self, input, quantize=False): assert self.ratio[0] in self.width_mult_list, str(self.ratio[0]) + ' in? ' + str(self.width_mult_list) <DeepExtract> if min_value is None: min_value = divisor new_v = max(min_value, int(self.in_channels_max * self.ratio[0] + divisor / 2) // divisor * divisor) if new_v < 0.9 * self.in_channels_max * self.ratio[0]: new_v += divisor self.in_channels = new_v </DeepExtract> assert self.ratio[1] in self.width_mult_list, str(self.ratio[1]) + ' in? ' + str(self.width_mult_list) <DeepExtract> if min_value is None: min_value = divisor new_v = max(min_value, int(self.out_channels_max * self.ratio[1] + divisor / 2) // divisor * divisor) if new_v < 0.9 * self.out_channels_max * self.ratio[1]: new_v += divisor self.out_channels = new_v </DeepExtract> weight = self.weight[:self.in_channels, :self.out_channels, :, :] if self.groups != 1: self.groups = self.out_channels if self.bias is not None: bias = self.bias[:self.out_channels] else: bias = self.bias if quantize: if not hasattr(self, 'quantize_input'): self.quantize_input = QuantMeasure(self.num_bits, shape_measure=(1, 1, 1, 1), flatten_dims=(1, -1)) qinput = self.quantize_input(input) weight_qparams = calculate_qparams(weight, num_bits=self.num_bits_weight, flatten_dims=(1, -1), reduce_dim=None) qweight = Quantize(weight, qparams=weight_qparams) if self.bias is not None: qbias = Quantize(bias, num_bits=self.num_bits_weight + self.num_bits, flatten_dims=(0, -1)) else: qbias = None output = F.conv_transpose2d(qinput, qweight, qbias, self.stride, self.padding, self.output_padding, self.groups, self.dilation) else: output = F.conv_transpose2d(input, weight, bias, self.stride, self.padding, self.output_padding, self.groups, self.dilation) return output
def forward(self, input, quantize=False): assert self.ratio[0] in self.width_mult_list, str(self.ratio[0]) + ' in? ' + str(self.width_mult_list) if min_value is None: min_value = divisor new_v = max(min_value, int(self.in_channels_max * self.ratio[0] + divisor / 2) // divisor * divisor) if new_v < 0.9 * self.in_channels_max * self.ratio[0]: new_v += divisor self.in_channels = new_v assert self.ratio[1] in self.width_mult_list, str(self.ratio[1]) + ' in? ' + str(self.width_mult_list) if min_value is None: min_value = divisor new_v = max(min_value, int(self.out_channels_max * self.ratio[1] + divisor / 2) // divisor * divisor) if new_v < 0.9 * self.out_channels_max * self.ratio[1]: new_v += divisor self.out_channels = new_v weight = self.weight[:self.in_channels, :self.out_channels, :, :] if self.groups != 1: self.groups = self.out_channels if self.bias is not None: bias = self.bias[:self.out_channels] else: bias = self.bias if quantize: if not hasattr(self, 'quantize_input'): self.quantize_input = QuantMeasure(self.num_bits, shape_measure=(1, 1, 1, 1), flatten_dims=(1, -1)) qinput = self.quantize_input(input) weight_qparams = calculate_qparams(weight, num_bits=self.num_bits_weight, flatten_dims=(1, -1), reduce_dim=None) qweight = Quantize(weight, qparams=weight_qparams) if self.bias is not None: qbias = Quantize(bias, num_bits=self.num_bits_weight + self.num_bits, flatten_dims=(0, -1)) else: qbias = None output = F.conv_transpose2d(qinput, qweight, qbias, self.stride, self.padding, self.output_padding, self.groups, self.dilation) else: output = F.conv_transpose2d(input, weight, bias, self.stride, self.padding, self.output_padding, self.groups, self.dilation) return output
AGD
positive
def beginResolvingTCP(self, client_id, command, decision, resolve_count): if self.worker_count < self.max_workers: <DeepExtract> hostname = self.extractHostname(command, decision) if hostname: worker = self.resolver_factory.createTCPClient() self.workers[worker.socket] = worker (identifier, all_sent) = worker.resolveHost(hostname) active_time = time.time() self.resolving[worker.w_id, identifier] = (client_id, hostname, hostname, command, decision) self.clients[client_id] = (worker.w_id, identifier, active_time, resolve_count) self.active.append((active_time, client_id, self.worker.socket)) if all_sent: self.poller.addReadSocket('read_resolver', worker.socket) self.resolving[worker.w_id, identifier] = (client_id, hostname, hostname, command, decision) else: self.poller.addWriteSocket('write_resolver', worker.socket) self.sending[worker.socket] = (client_id, hostname, hostname, command, decision) else: identifier = None identifier = identifier </DeepExtract> self.worker_count += 1 else: self.waiting.append((client_id, command, decision, resolve_count)) identifier = None return identifier
def beginResolvingTCP(self, client_id, command, decision, resolve_count): if self.worker_count < self.max_workers: hostname = self.extractHostname(command, decision) if hostname: worker = self.resolver_factory.createTCPClient() self.workers[worker.socket] = worker (identifier, all_sent) = worker.resolveHost(hostname) active_time = time.time() self.resolving[worker.w_id, identifier] = (client_id, hostname, hostname, command, decision) self.clients[client_id] = (worker.w_id, identifier, active_time, resolve_count) self.active.append((active_time, client_id, self.worker.socket)) if all_sent: self.poller.addReadSocket('read_resolver', worker.socket) self.resolving[worker.w_id, identifier] = (client_id, hostname, hostname, command, decision) else: self.poller.addWriteSocket('write_resolver', worker.socket) self.sending[worker.socket] = (client_id, hostname, hostname, command, decision) else: identifier = None identifier = identifier self.worker_count += 1 else: self.waiting.append((client_id, command, decision, resolve_count)) identifier = None return identifier
exaproxy
positive
def __init__(self, net_size): super(ShuffleNetV2, self).__init__() out_channels = configs[net_size]['out_channels'] num_blocks = configs[net_size]['num_blocks'] self.conv1 = nn.Conv2d(3, 24, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(24) self.in_channels = 24 <DeepExtract> layers = [DownBlock(self.in_channels, out_channels[0])] for i in range(num_blocks[0]): layers.append(BasicBlock(out_channels[0])) self.in_channels = out_channels[0] self.layer1 = nn.Sequential(*layers) </DeepExtract> <DeepExtract> layers = [DownBlock(self.in_channels, out_channels[1])] for i in range(num_blocks[1]): layers.append(BasicBlock(out_channels[1])) self.in_channels = out_channels[1] self.layer2 = nn.Sequential(*layers) </DeepExtract> <DeepExtract> layers = [DownBlock(self.in_channels, out_channels[2])] for i in range(num_blocks[2]): layers.append(BasicBlock(out_channels[2])) self.in_channels = out_channels[2] self.layer3 = nn.Sequential(*layers) </DeepExtract> self.conv2 = nn.Conv2d(out_channels[2], out_channels[3], kernel_size=1, stride=1, padding=0, bias=False) self.bn2 = nn.BatchNorm2d(out_channels[3]) self.linear = nn.Linear(out_channels[3], 10)
def __init__(self, net_size): super(ShuffleNetV2, self).__init__() out_channels = configs[net_size]['out_channels'] num_blocks = configs[net_size]['num_blocks'] self.conv1 = nn.Conv2d(3, 24, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(24) self.in_channels = 24 layers = [DownBlock(self.in_channels, out_channels[0])] for i in range(num_blocks[0]): layers.append(BasicBlock(out_channels[0])) self.in_channels = out_channels[0] self.layer1 = nn.Sequential(*layers) layers = [DownBlock(self.in_channels, out_channels[1])] for i in range(num_blocks[1]): layers.append(BasicBlock(out_channels[1])) self.in_channels = out_channels[1] self.layer2 = nn.Sequential(*layers) layers = [DownBlock(self.in_channels, out_channels[2])] for i in range(num_blocks[2]): layers.append(BasicBlock(out_channels[2])) self.in_channels = out_channels[2] self.layer3 = nn.Sequential(*layers) self.conv2 = nn.Conv2d(out_channels[2], out_channels[3], kernel_size=1, stride=1, padding=0, bias=False) self.bn2 = nn.BatchNorm2d(out_channels[3]) self.linear = nn.Linear(out_channels[3], 10)
dhp
positive
def getSignatureKey(key, dateStamp, regionName, serviceName): """ Copied from https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html """ <DeepExtract> kDate = hmac.new(('AWS4' + key).encode('utf-8'), dateStamp.encode('utf-8'), hashlib.sha256).digest() </DeepExtract> <DeepExtract> kRegion = hmac.new(kDate, regionName.encode('utf-8'), hashlib.sha256).digest() </DeepExtract> <DeepExtract> kService = hmac.new(kRegion, serviceName.encode('utf-8'), hashlib.sha256).digest() </DeepExtract> <DeepExtract> kSigning = hmac.new(kService, 'aws4_request'.encode('utf-8'), hashlib.sha256).digest() </DeepExtract> return kSigning
def getSignatureKey(key, dateStamp, regionName, serviceName): """ Copied from https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html """ kDate = hmac.new(('AWS4' + key).encode('utf-8'), dateStamp.encode('utf-8'), hashlib.sha256).digest() kRegion = hmac.new(kDate, regionName.encode('utf-8'), hashlib.sha256).digest() kService = hmac.new(kRegion, serviceName.encode('utf-8'), hashlib.sha256).digest() kSigning = hmac.new(kService, 'aws4_request'.encode('utf-8'), hashlib.sha256).digest() return kSigning
a4kScrapers
positive
def forward(self, sr, hr): def _forward(x): x = self.sub_mean(x) x = self.vgg(x) return x <DeepExtract> sr = self.sub_mean(sr) sr = self.vgg(sr) vgg_sr = sr </DeepExtract> with torch.no_grad(): <DeepExtract> hr.detach() = self.sub_mean(hr.detach()) hr.detach() = self.vgg(hr.detach()) vgg_hr = hr.detach() </DeepExtract> loss = F.mse_loss(vgg_sr, vgg_hr) return loss
def forward(self, sr, hr): def _forward(x): x = self.sub_mean(x) x = self.vgg(x) return x sr = self.sub_mean(sr) sr = self.vgg(sr) vgg_sr = sr with torch.no_grad(): hr.detach() = self.sub_mean(hr.detach()) hr.detach() = self.vgg(hr.detach()) vgg_hr = hr.detach() loss = F.mse_loss(vgg_sr, vgg_hr) return loss
dhp
positive
def add_ir_block(tcns, block_op_types, **kwargs): (t, c, n, s) = tcns assert n == 1 <DeepExtract> ret = _get_divisible_by(int(int(c * self.width_ratio)), self.width_divisor, self.width_divisor) out_depth = ret </DeepExtract> dim_in = self.last_depth <DeepExtract> ret = PRIMITIVES[block_op_types[0]](dim_in, out_depth, expansion=t, stride=s, bn_type=self.bn_type, width_divisor=self.width_divisor, dw_skip_bn=self.dw_skip_bn, dw_skip_relu=self.dw_skip_relu, **kwargs) (op, ret_depth) = (ret, ret.output_depth) </DeepExtract> self.last_depth = ret_depth return op
def add_ir_block(tcns, block_op_types, **kwargs): (t, c, n, s) = tcns assert n == 1 ret = _get_divisible_by(int(int(c * self.width_ratio)), self.width_divisor, self.width_divisor) out_depth = ret dim_in = self.last_depth ret = PRIMITIVES[block_op_types[0]](dim_in, out_depth, expansion=t, stride=s, bn_type=self.bn_type, width_divisor=self.width_divisor, dw_skip_bn=self.dw_skip_bn, dw_skip_relu=self.dw_skip_relu, **kwargs) (op, ret_depth) = (ret, ret.output_depth) self.last_depth = ret_depth return op
ATSS
positive
def breakStatement(self): localctx = BraketPragmasParser.BreakStatementContext(self, self._ctx, self.state) <DeepExtract> if hasattr(localctx, 'enterBraketPragma'): localctx.enterBraketPragma(self) </DeepExtract> try: self.enterOuterAlt(localctx, 1) self.state = 425 self.match(BraketPragmasParser.BREAK) self.state = 426 self.match(BraketPragmasParser.SEMICOLON) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: <DeepExtract> if hasattr(listener, 'exitBraketPragma'): listener.exitBraketPragma(self) </DeepExtract> return localctx
def breakStatement(self): localctx = BraketPragmasParser.BreakStatementContext(self, self._ctx, self.state) if hasattr(localctx, 'enterBraketPragma'): localctx.enterBraketPragma(self) try: self.enterOuterAlt(localctx, 1) self.state = 425 self.match(BraketPragmasParser.BREAK) self.state = 426 self.match(BraketPragmasParser.SEMICOLON) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: if hasattr(listener, 'exitBraketPragma'): listener.exitBraketPragma(self) return localctx
amazon-braket-default-simulator-python
positive
def __get__(self, obj, tp): <DeepExtract> result = _import_module(self.mod) </DeepExtract> setattr(obj, self.name, result) try: delattr(obj.__class__, self.name) except AttributeError: pass return result
def __get__(self, obj, tp): result = _import_module(self.mod) setattr(obj, self.name, result) try: delattr(obj.__class__, self.name) except AttributeError: pass return result
CobaltSplunk
positive
def __resource_descriptor(self, resource_path, methods): """Describes a resource. Args: resource_path: string, the path of the resource (e.g., 'entries.items') methods: list of tuples of type (endpoints.Service, protorpc.remote._RemoteMethodInfo), the methods that serve this resource. Returns: Dictionary describing the resource. """ descriptor = {} method_map = {} sub_resource_index = collections.defaultdict(list) sub_resource_map = {} resource_path_tokens = resource_path.split('.') for (service, protorpc_meth_info) in methods: method_info = getattr(protorpc_meth_info, 'method_info', None) path = method_info.get_path(service.api_info) method_id = method_info.method_id(service.api_info) <DeepExtract> canonical_method_id = method_id.split('.')[-1] </DeepExtract> <DeepExtract> current_resource_path = method_id.split('.')[1:-1] </DeepExtract> if current_resource_path[:len(resource_path_tokens)] != resource_path_tokens: raise api_exceptions.ToolError('Internal consistency error in resource path {0}'.format(current_resource_path)) effective_resource_path = current_resource_path[len(resource_path_tokens):] if effective_resource_path: sub_resource_name = effective_resource_path[0] new_resource_path = '.'.join([resource_path, sub_resource_name]) sub_resource_index[new_resource_path].append((service, protorpc_meth_info)) else: <DeepExtract> descriptor = {} request_message_type = resource_container.ResourceContainer.get_request_message(protorpc_meth_info.remote) request_kind = self.__get_request_kind(method_info) remote_method = protorpc_meth_info.remote method_id = method_info.method_id(service.api_info) path = method_info.get_path(service.api_info) description = protorpc_meth_info.remote.method.__doc__ descriptor['id'] = method_id descriptor['path'] = path descriptor['httpMethod'] = method_info.http_method if description: descriptor['description'] = description descriptor['scopes'] = ['https://www.googleapis.com/auth/userinfo.email'] parameters = self.__params_descriptor(request_message_type, request_kind, path, method_id, method_info.request_params_class) if parameters: descriptor['parameters'] = parameters if method_info.request_params_class: parameter_order = self.__params_order_descriptor(method_info.request_params_class, path, is_params_class=True) else: parameter_order = self.__params_order_descriptor(request_message_type, path, is_params_class=False) if parameter_order: descriptor['parameterOrder'] = parameter_order request_descriptor = self.__request_message_descriptor(request_kind, request_message_type, method_id, method_info.request_body_class) if request_descriptor is not None: descriptor['request'] = request_descriptor response_descriptor = self.__response_message_descriptor(remote_method.response_type(), method_info.method_id(service.api_info)) if response_descriptor is not None: descriptor['response'] = response_descriptor method_map[canonical_method_id] = descriptor </DeepExtract> for (sub_resource, sub_resource_methods) in sub_resource_index.items(): sub_resource_name = sub_resource.split('.')[-1] <DeepExtract> descriptor = {} method_map = {} sub_resource_index = collections.defaultdict(list) sub_resource_map = {} resource_path_tokens = sub_resource.split('.') for (service, protorpc_meth_info) in sub_resource_methods: method_info = getattr(protorpc_meth_info, 'method_info', None) path = method_info.get_path(service.api_info) method_id = method_info.method_id(service.api_info) canonical_method_id = self._get_canonical_method_id(method_id) current_resource_path = self._get_resource_path(method_id) if current_resource_path[:len(resource_path_tokens)] != resource_path_tokens: raise api_exceptions.ToolError('Internal consistency error in resource path {0}'.format(current_resource_path)) effective_resource_path = current_resource_path[len(resource_path_tokens):] if effective_resource_path: sub_resource_name = effective_resource_path[0] new_resource_path = '.'.join([sub_resource, sub_resource_name]) sub_resource_index[new_resource_path].append((service, protorpc_meth_info)) else: method_map[canonical_method_id] = self.__method_descriptor(service, method_info, protorpc_meth_info) for (sub_resource, sub_resource_methods) in sub_resource_index.items(): sub_resource_name = sub_resource.split('.')[-1] sub_resource_map[sub_resource_name] = self.__resource_descriptor(sub_resource, sub_resource_methods) if method_map: descriptor['methods'] = method_map if sub_resource_map: descriptor['resources'] = sub_resource_map sub_resource_map[sub_resource_name] = descriptor </DeepExtract> if method_map: descriptor['methods'] = method_map if sub_resource_map: descriptor['resources'] = sub_resource_map return descriptor
def __resource_descriptor(self, resource_path, methods): """Describes a resource. Args: resource_path: string, the path of the resource (e.g., 'entries.items') methods: list of tuples of type (endpoints.Service, protorpc.remote._RemoteMethodInfo), the methods that serve this resource. Returns: Dictionary describing the resource. """ descriptor = {} method_map = {} sub_resource_index = collections.defaultdict(list) sub_resource_map = {} resource_path_tokens = resource_path.split('.') for (service, protorpc_meth_info) in methods: method_info = getattr(protorpc_meth_info, 'method_info', None) path = method_info.get_path(service.api_info) method_id = method_info.method_id(service.api_info) canonical_method_id = method_id.split('.')[-1] current_resource_path = method_id.split('.')[1:-1] if current_resource_path[:len(resource_path_tokens)] != resource_path_tokens: raise api_exceptions.ToolError('Internal consistency error in resource path {0}'.format(current_resource_path)) effective_resource_path = current_resource_path[len(resource_path_tokens):] if effective_resource_path: sub_resource_name = effective_resource_path[0] new_resource_path = '.'.join([resource_path, sub_resource_name]) sub_resource_index[new_resource_path].append((service, protorpc_meth_info)) else: descriptor = {} request_message_type = resource_container.ResourceContainer.get_request_message(protorpc_meth_info.remote) request_kind = self.__get_request_kind(method_info) remote_method = protorpc_meth_info.remote method_id = method_info.method_id(service.api_info) path = method_info.get_path(service.api_info) description = protorpc_meth_info.remote.method.__doc__ descriptor['id'] = method_id descriptor['path'] = path descriptor['httpMethod'] = method_info.http_method if description: descriptor['description'] = description descriptor['scopes'] = ['https://www.googleapis.com/auth/userinfo.email'] parameters = self.__params_descriptor(request_message_type, request_kind, path, method_id, method_info.request_params_class) if parameters: descriptor['parameters'] = parameters if method_info.request_params_class: parameter_order = self.__params_order_descriptor(method_info.request_params_class, path, is_params_class=True) else: parameter_order = self.__params_order_descriptor(request_message_type, path, is_params_class=False) if parameter_order: descriptor['parameterOrder'] = parameter_order request_descriptor = self.__request_message_descriptor(request_kind, request_message_type, method_id, method_info.request_body_class) if request_descriptor is not None: descriptor['request'] = request_descriptor response_descriptor = self.__response_message_descriptor(remote_method.response_type(), method_info.method_id(service.api_info)) if response_descriptor is not None: descriptor['response'] = response_descriptor method_map[canonical_method_id] = descriptor for (sub_resource, sub_resource_methods) in sub_resource_index.items(): sub_resource_name = sub_resource.split('.')[-1] descriptor = {} method_map = {} sub_resource_index = collections.defaultdict(list) sub_resource_map = {} resource_path_tokens = sub_resource.split('.') for (service, protorpc_meth_info) in sub_resource_methods: method_info = getattr(protorpc_meth_info, 'method_info', None) path = method_info.get_path(service.api_info) method_id = method_info.method_id(service.api_info) canonical_method_id = self._get_canonical_method_id(method_id) current_resource_path = self._get_resource_path(method_id) if current_resource_path[:len(resource_path_tokens)] != resource_path_tokens: raise api_exceptions.ToolError('Internal consistency error in resource path {0}'.format(current_resource_path)) effective_resource_path = current_resource_path[len(resource_path_tokens):] if effective_resource_path: sub_resource_name = effective_resource_path[0] new_resource_path = '.'.join([sub_resource, sub_resource_name]) sub_resource_index[new_resource_path].append((service, protorpc_meth_info)) else: method_map[canonical_method_id] = self.__method_descriptor(service, method_info, protorpc_meth_info) for (sub_resource, sub_resource_methods) in sub_resource_index.items(): sub_resource_name = sub_resource.split('.')[-1] sub_resource_map[sub_resource_name] = self.__resource_descriptor(sub_resource, sub_resource_methods) if method_map: descriptor['methods'] = method_map if sub_resource_map: descriptor['resources'] = sub_resource_map sub_resource_map[sub_resource_name] = descriptor if method_map: descriptor['methods'] = method_map if sub_resource_map: descriptor['resources'] = sub_resource_map return descriptor
endpoints-python
positive
def get_charset_name(self): if not self._mBestGuessProber: <DeepExtract> st = self.get_state() if st == constants.eFoundIt: return 0.99 elif st == constants.eNotMe: return 0.01 bestConf = 0.0 self._mBestGuessProber = None for prober in self._mProbers: if not prober: continue if not prober.active: if constants._debug: sys.stderr.write(prober.get_charset_name() + ' not active\n') continue cf = prober.get_confidence() if constants._debug: sys.stderr.write('%s confidence = %s\n' % (prober.get_charset_name(), cf)) if bestConf < cf: bestConf = cf self._mBestGuessProber = prober if not self._mBestGuessProber: return 0.0 return bestConf </DeepExtract> if not self._mBestGuessProber: return None return self._mBestGuessProber.get_charset_name()
def get_charset_name(self): if not self._mBestGuessProber: st = self.get_state() if st == constants.eFoundIt: return 0.99 elif st == constants.eNotMe: return 0.01 bestConf = 0.0 self._mBestGuessProber = None for prober in self._mProbers: if not prober: continue if not prober.active: if constants._debug: sys.stderr.write(prober.get_charset_name() + ' not active\n') continue cf = prober.get_confidence() if constants._debug: sys.stderr.write('%s confidence = %s\n' % (prober.get_charset_name(), cf)) if bestConf < cf: bestConf = cf self._mBestGuessProber = prober if not self._mBestGuessProber: return 0.0 return bestConf if not self._mBestGuessProber: return None return self._mBestGuessProber.get_charset_name()
acousticbrainz-client
positive
def judge(baseline_dir, tested_dirs, output_dir): judgement_succeeded = True for directory in tested_dirs: <DeepExtract> action_tolerances = get_tolerances(directory) fields = ('label', 'elapsed') df_baseline = group_dataframe_by_action([os.path.join(baseline_dir, 'kpi*.jtl'), os.path.join(baseline_dir, 'selenium*.jtl')], fields) df_tested = group_dataframe_by_action([os.path.join(directory, 'kpi*.jtl'), os.path.join(directory, 'selenium*.jtl')], fields) results = judgement_test_measuring(df_baseline, df_tested, measurement_by_column='elapsed', tolerances=action_tolerances) success_status = all((result.passed for result in results)) save_judgement_results(results, output_dir, baseline_dirname=os.path.basename(baseline_dir), tested_dirname=os.path.basename(directory)) success = success_status </DeepExtract> if not success: judgement_succeeded = False if not judgement_succeeded: raise SystemExit('Judgement has failed. Check judgement table above.')
def judge(baseline_dir, tested_dirs, output_dir): judgement_succeeded = True for directory in tested_dirs: action_tolerances = get_tolerances(directory) fields = ('label', 'elapsed') df_baseline = group_dataframe_by_action([os.path.join(baseline_dir, 'kpi*.jtl'), os.path.join(baseline_dir, 'selenium*.jtl')], fields) df_tested = group_dataframe_by_action([os.path.join(directory, 'kpi*.jtl'), os.path.join(directory, 'selenium*.jtl')], fields) results = judgement_test_measuring(df_baseline, df_tested, measurement_by_column='elapsed', tolerances=action_tolerances) success_status = all((result.passed for result in results)) save_judgement_results(results, output_dir, baseline_dirname=os.path.basename(baseline_dir), tested_dirname=os.path.basename(directory)) success = success_status if not success: judgement_succeeded = False if not judgement_succeeded: raise SystemExit('Judgement has failed. Check judgement table above.')
dc-app-performance-toolkit
positive
def add_node(self, node: GcpResourceType, source: Optional[Json]=None) -> Optional[GcpResourceType]: log.debug(f'{self.name}: add node {node}') node._cloud = self.cloud node._account = self.project <DeepExtract> if isinstance(node, GcpRegion): self.add_edge(node, node=self.project, reverse=True) return if node._zone: self.add_edge(node, node=node._zone, reverse=True) return if node._region: self.add_edge(node, node=node._region, reverse=True) return if source is not None: if InternalZoneProp in source: if (zone := self.zone_by_name.get(source[InternalZoneProp])): node._zone = zone node._region = self.region_by_zone_name[source[InternalZoneProp]] self.add_edge(node, node=zone, reverse=True) return else: log.debug(f'Zone {source[InternalZoneProp]} not found for node: {node}.') if RegionProp in source: region_name = source[RegionProp].rsplit('/', 1)[-1] if (region := self.region_by_name.get(region_name)): node._region = region self.add_edge(node, node=region, reverse=True) return else: log.debug(f'Region {region_name} not found for node: {node}.') if self.region is not None: node._region = self.region self.add_edge(node, node=self.region, reverse=True) return self.add_edge(node, node=self.project, reverse=True) return </DeepExtract> with self.graph_nodes_access: self.graph.add_node(node, source=source or {}) return node
def add_node(self, node: GcpResourceType, source: Optional[Json]=None) -> Optional[GcpResourceType]: log.debug(f'{self.name}: add node {node}') node._cloud = self.cloud node._account = self.project if isinstance(node, GcpRegion): self.add_edge(node, node=self.project, reverse=True) return if node._zone: self.add_edge(node, node=node._zone, reverse=True) return if node._region: self.add_edge(node, node=node._region, reverse=True) return if source is not None: if InternalZoneProp in source: if (zone := self.zone_by_name.get(source[InternalZoneProp])): node._zone = zone node._region = self.region_by_zone_name[source[InternalZoneProp]] self.add_edge(node, node=zone, reverse=True) return else: log.debug(f'Zone {source[InternalZoneProp]} not found for node: {node}.') if RegionProp in source: region_name = source[RegionProp].rsplit('/', 1)[-1] if (region := self.region_by_name.get(region_name)): node._region = region self.add_edge(node, node=region, reverse=True) return else: log.debug(f'Region {region_name} not found for node: {node}.') if self.region is not None: node._region = self.region self.add_edge(node, node=self.region, reverse=True) return self.add_edge(node, node=self.project, reverse=True) return with self.graph_nodes_access: self.graph.add_node(node, source=source or {}) return node
cloudkeeper
positive
def authorized_read_list(self, object_list, bundle): """ Handles checking of permissions to see if the user has authorization to GET this resource. """ try: auth_result = self._meta.authorization.read_list(object_list, bundle) except Unauthorized as e: <DeepExtract> raise ImmediateHttpResponse(response=http.HttpUnauthorized()) </DeepExtract> return auth_result
def authorized_read_list(self, object_list, bundle): """ Handles checking of permissions to see if the user has authorization to GET this resource. """ try: auth_result = self._meta.authorization.read_list(object_list, bundle) except Unauthorized as e: raise ImmediateHttpResponse(response=http.HttpUnauthorized()) return auth_result
django-tastypie
positive
def __init__(self, pipeline): self.name = pipeline.get('name') <DeepExtract> providers = pipeline.get('default_providers') or {} self.default_providers = {'source': {'provider': 'codecommit', **providers.get('source', {})}, 'build': {'provider': 'codebuild', **providers.get('build', {})}, 'deploy': {'provider': 'cloudformation', **providers.get('deploy', {})}} </DeepExtract> self.parameters = pipeline.get('params', {}) self.template_dictionary = {'targets': []} self.notification_endpoint = self.parameters.get('notification_endpoint') self.stage_regions = [] self.top_level_regions = pipeline.get('regions', []) self.completion_trigger = pipeline.get('completion_trigger', {}) self.tags = pipeline.get('tags', {}) self.schedule = self.parameters.get('schedule', {}) if not isinstance(self.completion_trigger.get('pipelines', []), list): self.completion_trigger['pipelines'] = [self.completion_trigger['pipelines']] if not isinstance(self.top_level_regions, list): self.top_level_regions = [self.top_level_regions]
def __init__(self, pipeline): self.name = pipeline.get('name') providers = pipeline.get('default_providers') or {} self.default_providers = {'source': {'provider': 'codecommit', **providers.get('source', {})}, 'build': {'provider': 'codebuild', **providers.get('build', {})}, 'deploy': {'provider': 'cloudformation', **providers.get('deploy', {})}} self.parameters = pipeline.get('params', {}) self.template_dictionary = {'targets': []} self.notification_endpoint = self.parameters.get('notification_endpoint') self.stage_regions = [] self.top_level_regions = pipeline.get('regions', []) self.completion_trigger = pipeline.get('completion_trigger', {}) self.tags = pipeline.get('tags', {}) self.schedule = self.parameters.get('schedule', {}) if not isinstance(self.completion_trigger.get('pipelines', []), list): self.completion_trigger['pipelines'] = [self.completion_trigger['pipelines']] if not isinstance(self.top_level_regions, list): self.top_level_regions = [self.top_level_regions]
aws-deployment-framework
positive
def __init__(self, block, layers, output_layers, num_classes=1000): self.inplanes = 64 super(ResNetVGGm1, self).__init__() self.output_layers = output_layers self.vggmconv1 = nn.Conv2d(3, 96, (7, 7), (2, 2), padding=3) self.vgglrn = SpatialCrossMapLRN(5, 0.0005, 0.75, 2) self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) <DeepExtract> downsample = None if stride != 1 or self.inplanes != 64 * block.expansion: downsample = nn.Sequential(nn.Conv2d(self.inplanes, 64 * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(64 * block.expansion)) layers = [] layers.append(block(self.inplanes, 64, stride, downsample)) self.inplanes = 64 * block.expansion for i in range(1, layers[0]): layers.append(block(self.inplanes, 64)) self.layer1 = nn.Sequential(*layers) </DeepExtract> <DeepExtract> downsample = None if 2 != 1 or self.inplanes != 128 * block.expansion: downsample = nn.Sequential(nn.Conv2d(self.inplanes, 128 * block.expansion, kernel_size=1, stride=2, bias=False), nn.BatchNorm2d(128 * block.expansion)) layers = [] layers.append(block(self.inplanes, 128, 2, downsample)) self.inplanes = 128 * block.expansion for i in range(1, layers[1]): layers.append(block(self.inplanes, 128)) self.layer2 = nn.Sequential(*layers) </DeepExtract> <DeepExtract> downsample = None if 2 != 1 or self.inplanes != 256 * block.expansion: downsample = nn.Sequential(nn.Conv2d(self.inplanes, 256 * block.expansion, kernel_size=1, stride=2, bias=False), nn.BatchNorm2d(256 * block.expansion)) layers = [] layers.append(block(self.inplanes, 256, 2, downsample)) self.inplanes = 256 * block.expansion for i in range(1, layers[2]): layers.append(block(self.inplanes, 256)) self.layer3 = nn.Sequential(*layers) </DeepExtract> <DeepExtract> downsample = None if 2 != 1 or self.inplanes != 512 * block.expansion: downsample = nn.Sequential(nn.Conv2d(self.inplanes, 512 * block.expansion, kernel_size=1, stride=2, bias=False), nn.BatchNorm2d(512 * block.expansion)) layers = [] layers.append(block(self.inplanes, 512, 2, downsample)) self.inplanes = 512 * block.expansion for i in range(1, layers[3]): layers.append(block(self.inplanes, 512)) self.layer4 = nn.Sequential(*layers) </DeepExtract> self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2.0 / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_()
def __init__(self, block, layers, output_layers, num_classes=1000): self.inplanes = 64 super(ResNetVGGm1, self).__init__() self.output_layers = output_layers self.vggmconv1 = nn.Conv2d(3, 96, (7, 7), (2, 2), padding=3) self.vgglrn = SpatialCrossMapLRN(5, 0.0005, 0.75, 2) self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) downsample = None if stride != 1 or self.inplanes != 64 * block.expansion: downsample = nn.Sequential(nn.Conv2d(self.inplanes, 64 * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(64 * block.expansion)) layers = [] layers.append(block(self.inplanes, 64, stride, downsample)) self.inplanes = 64 * block.expansion for i in range(1, layers[0]): layers.append(block(self.inplanes, 64)) self.layer1 = nn.Sequential(*layers) downsample = None if 2 != 1 or self.inplanes != 128 * block.expansion: downsample = nn.Sequential(nn.Conv2d(self.inplanes, 128 * block.expansion, kernel_size=1, stride=2, bias=False), nn.BatchNorm2d(128 * block.expansion)) layers = [] layers.append(block(self.inplanes, 128, 2, downsample)) self.inplanes = 128 * block.expansion for i in range(1, layers[1]): layers.append(block(self.inplanes, 128)) self.layer2 = nn.Sequential(*layers) downsample = None if 2 != 1 or self.inplanes != 256 * block.expansion: downsample = nn.Sequential(nn.Conv2d(self.inplanes, 256 * block.expansion, kernel_size=1, stride=2, bias=False), nn.BatchNorm2d(256 * block.expansion)) layers = [] layers.append(block(self.inplanes, 256, 2, downsample)) self.inplanes = 256 * block.expansion for i in range(1, layers[2]): layers.append(block(self.inplanes, 256)) self.layer3 = nn.Sequential(*layers) downsample = None if 2 != 1 or self.inplanes != 512 * block.expansion: downsample = nn.Sequential(nn.Conv2d(self.inplanes, 512 * block.expansion, kernel_size=1, stride=2, bias=False), nn.BatchNorm2d(512 * block.expansion)) layers = [] layers.append(block(self.inplanes, 512, 2, downsample)) self.inplanes = 512 * block.expansion for i in range(1, layers[3]): layers.append(block(self.inplanes, 512)) self.layer4 = nn.Sequential(*layers) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2.0 / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_()
d3s
positive
def to_airflow_dag(self): """Convert the DAG to its Airflow representation and return the python code.""" <DeepExtract> env = Environment(loader=PackageLoader('bigquery_etl', 'query_scheduling/templates'), extensions=['jinja2.ext.do']) for name in dir(formatters): func = getattr(formatters, name) if not callable(func): continue env.filters[name] = func env = env </DeepExtract> dag_template = env.get_template(PUBLIC_DATA_JSON_DAG_TEMPLATE) args = self.__dict__ return dag_template.render(args)
def to_airflow_dag(self): """Convert the DAG to its Airflow representation and return the python code.""" env = Environment(loader=PackageLoader('bigquery_etl', 'query_scheduling/templates'), extensions=['jinja2.ext.do']) for name in dir(formatters): func = getattr(formatters, name) if not callable(func): continue env.filters[name] = func env = env dag_template = env.get_template(PUBLIC_DATA_JSON_DAG_TEMPLATE) args = self.__dict__ return dag_template.render(args)
bigquery-etl
positive
def __init__(self, initial=None): """Initialize virtual trackball control. initial : quaternion or rotation matrix """ self._axis = None self._axes = None self._radius = 1.0 self._center = [0.0, 0.0] self._vdown = numpy.array([0, 0, 1], dtype=numpy.float64) self._constrain = False if initial is None: self._qdown = numpy.array([0, 0, 0, 1], dtype=numpy.float64) else: initial = numpy.array(initial, dtype=numpy.float64) if initial.shape == (4, 4): <DeepExtract> q = numpy.empty((4,), dtype=numpy.float64) M = numpy.array(initial, dtype=numpy.float64, copy=False)[:4, :4] t = numpy.trace(M) if t > M[3, 3]: q[3] = t q[2] = M[1, 0] - M[0, 1] q[1] = M[0, 2] - M[2, 0] q[0] = M[2, 1] - M[1, 2] else: (i, j, k) = (0, 1, 2) if M[1, 1] > M[0, 0]: (i, j, k) = (1, 2, 0) if M[2, 2] > M[i, i]: (i, j, k) = (2, 0, 1) t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3] q[i] = t q[j] = M[i, j] + M[j, i] q[k] = M[k, i] + M[i, k] q[3] = M[k, j] - M[j, k] q *= 0.5 / math.sqrt(t * M[3, 3]) self._qdown = q </DeepExtract> elif initial.shape == (4,): initial /= vector_norm(initial) self._qdown = initial else: raise ValueError('initial not a quaternion or matrix.') self._qnow = self._qpre = self._qdown
def __init__(self, initial=None): """Initialize virtual trackball control. initial : quaternion or rotation matrix """ self._axis = None self._axes = None self._radius = 1.0 self._center = [0.0, 0.0] self._vdown = numpy.array([0, 0, 1], dtype=numpy.float64) self._constrain = False if initial is None: self._qdown = numpy.array([0, 0, 0, 1], dtype=numpy.float64) else: initial = numpy.array(initial, dtype=numpy.float64) if initial.shape == (4, 4): q = numpy.empty((4,), dtype=numpy.float64) M = numpy.array(initial, dtype=numpy.float64, copy=False)[:4, :4] t = numpy.trace(M) if t > M[3, 3]: q[3] = t q[2] = M[1, 0] - M[0, 1] q[1] = M[0, 2] - M[2, 0] q[0] = M[2, 1] - M[1, 2] else: (i, j, k) = (0, 1, 2) if M[1, 1] > M[0, 0]: (i, j, k) = (1, 2, 0) if M[2, 2] > M[i, i]: (i, j, k) = (2, 0, 1) t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3] q[i] = t q[j] = M[i, j] + M[j, i] q[k] = M[k, i] + M[i, k] q[3] = M[k, j] - M[j, k] q *= 0.5 / math.sqrt(t * M[3, 3]) self._qdown = q elif initial.shape == (4,): initial /= vector_norm(initial) self._qdown = initial else: raise ValueError('initial not a quaternion or matrix.') self._qnow = self._qpre = self._qdown
decentralized-multiarm
positive
@cli.command() @edm_option @runtime_option @toolkit_option @environment_option @editable_option def update(edm, runtime, toolkit, environment, editable): """Update/Reinstall package into environment.""" <DeepExtract> if edm is None: edm = locate_edm() if environment is None: environment = 'envisage-test-{runtime}-{toolkit}'.format(runtime=runtime, toolkit=toolkit) parameters = {'edm': edm, 'runtime': runtime, 'toolkit': toolkit, 'environment': environment} if toolkit not in supported_combinations[runtime]: msg = 'Runtime {runtime} and toolkit {toolkit} not supported by ' + 'test environments' raise click.ClickException(msg.format(**parameters)) parameters = parameters </DeepExtract> if editable: install_cmd = '{edm} run -e {environment} -- python -m pip install --editable . --no-dependencies' else: install_cmd = '{edm} run -e {environment} -- python -m pip install . --no-dependencies' commands = [install_cmd] click.echo("Re-installing in '{environment}'".format(**parameters)) <DeepExtract> for command in commands: click.echo('[EXECUTING] {}'.format(command.format(**parameters))) try: subprocess.check_call([arg.format(**parameters) for arg in command.split()]) except subprocess.CalledProcessError as exc: click.echo(str(exc)) sys.exit(1) </DeepExtract> click.echo('Done update')
@cli.command() @edm_option @runtime_option @toolkit_option @environment_option @editable_option def update(edm, runtime, toolkit, environment, editable): """Update/Reinstall package into environment.""" if edm is None: edm = locate_edm() if environment is None: environment = 'envisage-test-{runtime}-{toolkit}'.format(runtime=runtime, toolkit=toolkit) parameters = {'edm': edm, 'runtime': runtime, 'toolkit': toolkit, 'environment': environment} if toolkit not in supported_combinations[runtime]: msg = 'Runtime {runtime} and toolkit {toolkit} not supported by ' + 'test environments' raise click.ClickException(msg.format(**parameters)) parameters = parameters if editable: install_cmd = '{edm} run -e {environment} -- python -m pip install --editable . --no-dependencies' else: install_cmd = '{edm} run -e {environment} -- python -m pip install . --no-dependencies' commands = [install_cmd] click.echo("Re-installing in '{environment}'".format(**parameters)) for command in commands: click.echo('[EXECUTING] {}'.format(command.format(**parameters))) try: subprocess.check_call([arg.format(**parameters) for arg in command.split()]) except subprocess.CalledProcessError as exc: click.echo(str(exc)) sys.exit(1) click.echo('Done update')
envisage
positive
def fixup_scripts(home_dir): shebang = '#!%s/bin/python' % os.path.normcase(os.path.abspath(home_dir)) new_shebang = '#!/usr/bin/env python%s' % sys.version[:3] if is_win: bin_suffix = 'Scripts' else: bin_suffix = 'bin' bin_dir = os.path.join(home_dir, bin_suffix) <DeepExtract> if is_win: mkdir(home_dir) if ' ' in home_dir: import ctypes GetShortPathName = ctypes.windll.kernel32.GetShortPathNameW size = max(len(home_dir) + 1, 256) buf = ctypes.create_unicode_buffer(size) try: u = unicode except NameError: u = str ret = GetShortPathName(u(home_dir), buf, size) if not ret: print('Error: the path "%s" has a space in it' % home_dir) print('We could not determine the short pathname for it.') print('Exiting.') sys.exit(3) home_dir = str(buf.value) lib_dir = join(home_dir, 'Lib') inc_dir = join(home_dir, 'Include') bin_dir = join(home_dir, 'Scripts') if is_jython: lib_dir = join(home_dir, 'Lib') inc_dir = join(home_dir, 'Include') bin_dir = join(home_dir, 'bin') elif is_pypy: lib_dir = home_dir inc_dir = join(home_dir, 'include') bin_dir = join(home_dir, 'bin') elif not is_win: lib_dir = join(home_dir, 'lib', py_version) multiarch_exec = '/usr/bin/multiarch-platform' if is_executable_file(multiarch_exec): p = subprocess.Popen(multiarch_exec, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = p.communicate() inc_dir = join(home_dir, 'include', stdout.strip(), py_version + abiflags) else: inc_dir = join(home_dir, 'include', py_version + abiflags) bin_dir = join(home_dir, 'bin') (home_dir, lib_dir, inc_dir, bin_dir) = (home_dir, lib_dir, inc_dir, bin_dir) </DeepExtract> for filename in os.listdir(bin_dir): filename = os.path.join(bin_dir, filename) if not os.path.isfile(filename): continue f = open(filename, 'rb') try: try: lines = f.read().decode('utf-8').splitlines() except UnicodeDecodeError: continue finally: f.close() if not lines: logger.warn('Script %s is an empty file' % filename) continue if not lines[0].strip().startswith(shebang): if os.path.basename(filename) in OK_ABS_SCRIPTS: logger.debug('Cannot make script %s relative' % filename) elif lines[0].strip() == new_shebang: logger.info('Script %s has already been made relative' % filename) else: logger.warn("Script %s cannot be made relative (it's not a normal script that starts with %s)" % (filename, shebang)) continue logger.notify('Making script %s relative' % filename) <DeepExtract> activate = "import os; activate_this=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'activate_this.py'); execfile(activate_this, dict(__file__=activate_this)); del os, activate_this" activate_at = None for (idx, line) in reversed(list(enumerate([new_shebang] + lines[1:]))): if line.split()[:3] == ['from', '__future__', 'import']: activate_at = idx + 1 break if activate_at is None: activate_at = 1 script = [new_shebang] + lines[1:][:activate_at] + ['', activate, ''] + [new_shebang] + lines[1:][activate_at:] </DeepExtract> f = open(filename, 'wb') f.write('\n'.join(script).encode('utf-8')) f.close()
def fixup_scripts(home_dir): shebang = '#!%s/bin/python' % os.path.normcase(os.path.abspath(home_dir)) new_shebang = '#!/usr/bin/env python%s' % sys.version[:3] if is_win: bin_suffix = 'Scripts' else: bin_suffix = 'bin' bin_dir = os.path.join(home_dir, bin_suffix) if is_win: mkdir(home_dir) if ' ' in home_dir: import ctypes GetShortPathName = ctypes.windll.kernel32.GetShortPathNameW size = max(len(home_dir) + 1, 256) buf = ctypes.create_unicode_buffer(size) try: u = unicode except NameError: u = str ret = GetShortPathName(u(home_dir), buf, size) if not ret: print('Error: the path "%s" has a space in it' % home_dir) print('We could not determine the short pathname for it.') print('Exiting.') sys.exit(3) home_dir = str(buf.value) lib_dir = join(home_dir, 'Lib') inc_dir = join(home_dir, 'Include') bin_dir = join(home_dir, 'Scripts') if is_jython: lib_dir = join(home_dir, 'Lib') inc_dir = join(home_dir, 'Include') bin_dir = join(home_dir, 'bin') elif is_pypy: lib_dir = home_dir inc_dir = join(home_dir, 'include') bin_dir = join(home_dir, 'bin') elif not is_win: lib_dir = join(home_dir, 'lib', py_version) multiarch_exec = '/usr/bin/multiarch-platform' if is_executable_file(multiarch_exec): p = subprocess.Popen(multiarch_exec, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = p.communicate() inc_dir = join(home_dir, 'include', stdout.strip(), py_version + abiflags) else: inc_dir = join(home_dir, 'include', py_version + abiflags) bin_dir = join(home_dir, 'bin') (home_dir, lib_dir, inc_dir, bin_dir) = (home_dir, lib_dir, inc_dir, bin_dir) for filename in os.listdir(bin_dir): filename = os.path.join(bin_dir, filename) if not os.path.isfile(filename): continue f = open(filename, 'rb') try: try: lines = f.read().decode('utf-8').splitlines() except UnicodeDecodeError: continue finally: f.close() if not lines: logger.warn('Script %s is an empty file' % filename) continue if not lines[0].strip().startswith(shebang): if os.path.basename(filename) in OK_ABS_SCRIPTS: logger.debug('Cannot make script %s relative' % filename) elif lines[0].strip() == new_shebang: logger.info('Script %s has already been made relative' % filename) else: logger.warn("Script %s cannot be made relative (it's not a normal script that starts with %s)" % (filename, shebang)) continue logger.notify('Making script %s relative' % filename) activate = "import os; activate_this=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'activate_this.py'); execfile(activate_this, dict(__file__=activate_this)); del os, activate_this" activate_at = None for (idx, line) in reversed(list(enumerate([new_shebang] + lines[1:]))): if line.split()[:3] == ['from', '__future__', 'import']: activate_at = idx + 1 break if activate_at is None: activate_at = 1 script = [new_shebang] + lines[1:][:activate_at] + ['', activate, ''] + [new_shebang] + lines[1:][activate_at:] f = open(filename, 'wb') f.write('\n'.join(script).encode('utf-8')) f.close()
cxxtest
positive
def find_unreachable(self): def mark_reachable_from(s): if s in reachable: return reachable.add(s) for p in self.Prodnames.get(s, []): for r in p.prod: <DeepExtract> if r in reachable: return reachable.add(r) for p in self.Prodnames.get(r, []): for r in p.prod: mark_reachable_from(r) </DeepExtract> reachable = set() <DeepExtract> if self.Productions[0].prod[0] in reachable: return reachable.add(self.Productions[0].prod[0]) for p in self.Prodnames.get(self.Productions[0].prod[0], []): for r in p.prod: mark_reachable_from(r) </DeepExtract> return [s for s in self.Nonterminals if s not in reachable]
def find_unreachable(self): def mark_reachable_from(s): if s in reachable: return reachable.add(s) for p in self.Prodnames.get(s, []): for r in p.prod: if r in reachable: return reachable.add(r) for p in self.Prodnames.get(r, []): for r in p.prod: mark_reachable_from(r) reachable = set() if self.Productions[0].prod[0] in reachable: return reachable.add(self.Productions[0].prod[0]) for p in self.Prodnames.get(self.Productions[0].prod[0], []): for r in p.prod: mark_reachable_from(r) return [s for s in self.Nonterminals if s not in reachable]
demo2program
positive
def LineKernel(dim, angle, linetype): kernelwidth = dim kernelCenter = int(math.floor(dim / 2)) <DeepExtract> numDistinctLines = kernelCenter * 4 angle = math.fmod(angle, 180.0) validLineAngles = np.linspace(0, 180, numDistinctLines, endpoint=False) angle = nearestValue(angle, validLineAngles) angle = angle </DeepExtract> kernel = np.zeros((kernelwidth, kernelwidth), dtype=np.float32) lineAnchors = lineDict.lines[dim][angle] if linetype == 'right': lineAnchors[0] = kernelCenter lineAnchors[1] = kernelCenter if linetype == 'left': lineAnchors[2] = kernelCenter lineAnchors[3] = kernelCenter (rr, cc) = line(lineAnchors[0], lineAnchors[1], lineAnchors[2], lineAnchors[3]) kernel[rr, cc] = 1 normalizationFactor = np.count_nonzero(kernel) kernel = kernel / normalizationFactor return kernel
def LineKernel(dim, angle, linetype): kernelwidth = dim kernelCenter = int(math.floor(dim / 2)) numDistinctLines = kernelCenter * 4 angle = math.fmod(angle, 180.0) validLineAngles = np.linspace(0, 180, numDistinctLines, endpoint=False) angle = nearestValue(angle, validLineAngles) angle = angle kernel = np.zeros((kernelwidth, kernelwidth), dtype=np.float32) lineAnchors = lineDict.lines[dim][angle] if linetype == 'right': lineAnchors[0] = kernelCenter lineAnchors[1] = kernelCenter if linetype == 'left': lineAnchors[2] = kernelCenter lineAnchors[3] = kernelCenter (rr, cc) = line(lineAnchors[0], lineAnchors[1], lineAnchors[2], lineAnchors[3]) kernel[rr, cc] = 1 normalizationFactor = np.count_nonzero(kernel) kernel = kernel / normalizationFactor return kernel
DeepFaceLab_Linux
positive
def closePlot(): """ closePlot(): Close the active plot window. """ <DeepExtract> mw = getMainWindow() if not mw: mdi = None childs = mw.children() for c in childs: if isinstance(c, PySide.QtGui.QMdiArea): mdi = c mdi = None </DeepExtract> if not mdi: return None sub = mdi.activeSubWindow() if not sub: return None for i in sub.children(): if i.metaObject().className() == 'Plot': sub.close()
def closePlot(): """ closePlot(): Close the active plot window. """ mw = getMainWindow() if not mw: mdi = None childs = mw.children() for c in childs: if isinstance(c, PySide.QtGui.QMdiArea): mdi = c mdi = None if not mdi: return None sub = mdi.activeSubWindow() if not sub: return None for i in sub.children(): if i.metaObject().className() == 'Plot': sub.close()
CfdOF
positive
def pad_iamge(self, image): canvas = np.zeros((*self.image_size, 3), np.float32) <DeepExtract> (height, width) = self.image_size if self.mode == 'keep_ratio': width = max(width, int(height / image.shape[0] * image.shape[1] / 32 + 0.5) * 32) if self.mode == 'pad': width = min(width, max(int(height / image.shape[0] * image.shape[1] / 32 + 0.5) * 32, 32)) (height, width) = (height, width) </DeepExtract> image = cv2.resize(image, (width, height)) canvas[:, :width, :] = image return canvas
def pad_iamge(self, image): canvas = np.zeros((*self.image_size, 3), np.float32) (height, width) = self.image_size if self.mode == 'keep_ratio': width = max(width, int(height / image.shape[0] * image.shape[1] / 32 + 0.5) * 32) if self.mode == 'pad': width = min(width, max(int(height / image.shape[0] * image.shape[1] / 32 + 0.5) * 32, 32)) (height, width) = (height, width) image = cv2.resize(image, (width, height)) canvas[:, :width, :] = image return canvas
DB
positive
def testConv2DSameOdd(self): (n, n2) = (5, 3) <DeepExtract> if None in [1, n, n, 1]: x = tf.placeholder(tf.float32, (1, n, n, 1)) else: x = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(n), [n, 1]) + np.reshape(np.arange(n), [1, n]), [1, n, n, 1]), [1, 1, 1, 1])) </DeepExtract> <DeepExtract> if None in [1, 3, 3, 1]: w = tf.placeholder(tf.float32, (1, 3, 3, 1)) else: w = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(3), [3, 1]) + np.reshape(np.arange(3), [1, 3]), [1, 3, 3, 1]), [1, 1, 1, 1])) </DeepExtract> w = tf.reshape(w, [3, 3, 1, 1]) tf.get_variable('Conv/weights', initializer=w) tf.get_variable('Conv/biases', initializer=tf.zeros([1])) tf.get_variable_scope().reuse_variables() y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv') y1_expected = tf.to_float([[14, 28, 43, 58, 34], [28, 48, 66, 84, 46], [43, 66, 84, 102, 55], [58, 84, 102, 120, 64], [34, 46, 55, 64, 30]]) y1_expected = tf.reshape(y1_expected, [1, n, n, 1]) y2 = resnet_utils.subsample(y1, 2) y2_expected = tf.to_float([[14, 43, 34], [43, 84, 55], [34, 55, 30]]) y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1]) y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv') y3_expected = y2_expected y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv') y4_expected = y2_expected with self.test_session() as sess: sess.run(tf.global_variables_initializer()) self.assertAllClose(y1.eval(), y1_expected.eval()) self.assertAllClose(y2.eval(), y2_expected.eval()) self.assertAllClose(y3.eval(), y3_expected.eval()) self.assertAllClose(y4.eval(), y4_expected.eval())
def testConv2DSameOdd(self): (n, n2) = (5, 3) if None in [1, n, n, 1]: x = tf.placeholder(tf.float32, (1, n, n, 1)) else: x = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(n), [n, 1]) + np.reshape(np.arange(n), [1, n]), [1, n, n, 1]), [1, 1, 1, 1])) if None in [1, 3, 3, 1]: w = tf.placeholder(tf.float32, (1, 3, 3, 1)) else: w = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(3), [3, 1]) + np.reshape(np.arange(3), [1, 3]), [1, 3, 3, 1]), [1, 1, 1, 1])) w = tf.reshape(w, [3, 3, 1, 1]) tf.get_variable('Conv/weights', initializer=w) tf.get_variable('Conv/biases', initializer=tf.zeros([1])) tf.get_variable_scope().reuse_variables() y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv') y1_expected = tf.to_float([[14, 28, 43, 58, 34], [28, 48, 66, 84, 46], [43, 66, 84, 102, 55], [58, 84, 102, 120, 64], [34, 46, 55, 64, 30]]) y1_expected = tf.reshape(y1_expected, [1, n, n, 1]) y2 = resnet_utils.subsample(y1, 2) y2_expected = tf.to_float([[14, 43, 34], [43, 84, 55], [34, 55, 30]]) y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1]) y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv') y3_expected = y2_expected y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv') y4_expected = y2_expected with self.test_session() as sess: sess.run(tf.global_variables_initializer()) self.assertAllClose(y1.eval(), y1_expected.eval()) self.assertAllClose(y2.eval(), y2_expected.eval()) self.assertAllClose(y3.eval(), y3_expected.eval()) self.assertAllClose(y4.eval(), y4_expected.eval())
caad_18
positive
def read_one(result_file_path): with open(result_file_path) as f: lines = f.read().splitlines() one = [] for line in lines: (file_path, cate_id, prob, x, y, w, h) = line.split() (image_id, level_id, crop_name) = os.path.splitext(os.path.basename(file_path))[0].split('_', 2) level_id = int(level_id) (cx, cy, cw, ch) = levelmap[level_id, crop_name] cate_id = settings.NUM_CHAR_CATES if proposal_output else int(cate_id) - 1 (x, y, w, h, prob) = (float(x), float(y), float(w) - float(x), float(h) - float(y), float(prob)) longsize = max(w, h) size_range = size_ranges[level_id] if longsize < size_range[0] or size_range[1] <= longsize: continue rm = removal[level_id] if cx != 0 and x < rm or (cy != 0 and y < rm) or (cx + cw != imshape[1] and x + w + rm >= cw) or (cy + ch != imshape[0] and y + h + rm >= ch): continue <DeepExtract> (x, y, w, h) = (x + cx, y + cy, w, h) (x1, y1) = (x + w, y + h) (x0, y0) = (max(0, x), max(0, y)) (x1, y1) = (min(imshape[1], x1), min(imshape[0], y1)) real_bbox = (x0, y0, x1 - x0, y1 - y0) </DeepExtract> if real_bbox[2] > 0 and real_bbox[3] > 0: all[image_id].append({'image_id': image_id, 'cate_id': cate_id, 'prob': prob, 'bbox': real_bbox})
def read_one(result_file_path): with open(result_file_path) as f: lines = f.read().splitlines() one = [] for line in lines: (file_path, cate_id, prob, x, y, w, h) = line.split() (image_id, level_id, crop_name) = os.path.splitext(os.path.basename(file_path))[0].split('_', 2) level_id = int(level_id) (cx, cy, cw, ch) = levelmap[level_id, crop_name] cate_id = settings.NUM_CHAR_CATES if proposal_output else int(cate_id) - 1 (x, y, w, h, prob) = (float(x), float(y), float(w) - float(x), float(h) - float(y), float(prob)) longsize = max(w, h) size_range = size_ranges[level_id] if longsize < size_range[0] or size_range[1] <= longsize: continue rm = removal[level_id] if cx != 0 and x < rm or (cy != 0 and y < rm) or (cx + cw != imshape[1] and x + w + rm >= cw) or (cy + ch != imshape[0] and y + h + rm >= ch): continue (x, y, w, h) = (x + cx, y + cy, w, h) (x1, y1) = (x + w, y + h) (x0, y0) = (max(0, x), max(0, y)) (x1, y1) = (min(imshape[1], x1), min(imshape[0], y1)) real_bbox = (x0, y0, x1 - x0, y1 - y0) if real_bbox[2] > 0 and real_bbox[3] > 0: all[image_id].append({'image_id': image_id, 'cate_id': cate_id, 'prob': prob, 'bbox': real_bbox})
ctw-baseline
positive
def generate_query(self, db, max_cond=4): max_cond = min(len(self.header), max_cond) sel_index = random.choice(list(range(len(self.header)))) query = Query(-1, agg_ops.index('')) <DeepExtract> sel_str = 'col{}'.format(query.sel_index) if query.sel_index >= 0 else '*' agg_str = sel_str agg_op = agg_ops[query.agg_index] if agg_op: agg_str = '{}({})'.format(agg_op, sel_str) where_str = ' AND '.join(['col{} {} :col{}'.format(i, cond_ops[o], i) for (i, o, v) in query.conditions]) where_map = {'col{}'.format(i): v for (i, o, v) in query.conditions} if lower: where_map = {k: v.lower() if isinstance(v, str) else v for (k, v) in where_map.items()} if where_map: where_str = 'WHERE ' + where_str if query.sel_index >= 0: query_str = 'SELECT {agg_str} AS result FROM {name} {where_str}'.format(agg_str=agg_str, name=self.name, where_str=where_str) results = [r.result for r in db.query(query_str, **where_map)] else: query_str = 'SELECT {agg_str} FROM {name} {where_str}'.format(agg_str=agg_str, name=self.name, where_str=where_str) results = [[getattr(r, 'col{}'.format(i)) for i in range(len(self.header))] for r in db.query(query_str, **where_map)] </DeepExtract> condition_options = list(range(len(self.header))) condition_options.remove(sel_index) for i in range(max_cond): if not results: break cond_index = random.choice(condition_options) if self.types[cond_index] == 'text': cond_op = cond_ops.index('=') else: cond_op = random.choice(list(range(len(cond_ops)))) cond_val = random.choice([r[cond_index] for r in results]) query.conditions.append((cond_index, cond_op, cond_val)) <DeepExtract> sel_str = 'col{}'.format(query.sel_index) if query.sel_index >= 0 else '*' agg_str = sel_str agg_op = agg_ops[query.agg_index] if agg_op: agg_str = '{}({})'.format(agg_op, sel_str) where_str = ' AND '.join(['col{} {} :col{}'.format(i, cond_ops[o], i) for (i, o, v) in query.conditions]) where_map = {'col{}'.format(i): v for (i, o, v) in query.conditions} if lower: where_map = {k: v.lower() if isinstance(v, str) else v for (k, v) in where_map.items()} if where_map: where_str = 'WHERE ' + where_str if query.sel_index >= 0: query_str = 'SELECT {agg_str} AS result FROM {name} {where_str}'.format(agg_str=agg_str, name=self.name, where_str=where_str) new_results = [r.result for r in db.query(query_str, **where_map)] else: query_str = 'SELECT {agg_str} FROM {name} {where_str}'.format(agg_str=agg_str, name=self.name, where_str=where_str) new_results = [[getattr(r, 'col{}'.format(i)) for i in range(len(self.header))] for r in db.query(query_str, **where_map)] </DeepExtract> if [r[sel_index] for r in new_results] != [r[sel_index] for r in results]: condition_options.remove(cond_index) results = new_results else: query.conditions.pop() if self.types[sel_index] == 'text': query.agg_index = agg_ops.index('') else: query.agg_index = random.choice(list(range(len(agg_ops)))) query.sel_index = sel_index <DeepExtract> sel_str = 'col{}'.format(query.sel_index) if query.sel_index >= 0 else '*' agg_str = sel_str agg_op = agg_ops[query.agg_index] if agg_op: agg_str = '{}({})'.format(agg_op, sel_str) where_str = ' AND '.join(['col{} {} :col{}'.format(i, cond_ops[o], i) for (i, o, v) in query.conditions]) where_map = {'col{}'.format(i): v for (i, o, v) in query.conditions} if lower: where_map = {k: v.lower() if isinstance(v, str) else v for (k, v) in where_map.items()} if where_map: where_str = 'WHERE ' + where_str if query.sel_index >= 0: query_str = 'SELECT {agg_str} AS result FROM {name} {where_str}'.format(agg_str=agg_str, name=self.name, where_str=where_str) results = [r.result for r in db.query(query_str, **where_map)] else: query_str = 'SELECT {agg_str} FROM {name} {where_str}'.format(agg_str=agg_str, name=self.name, where_str=where_str) results = [[getattr(r, 'col{}'.format(i)) for i in range(len(self.header))] for r in db.query(query_str, **where_map)] </DeepExtract> return (query, results)
def generate_query(self, db, max_cond=4): max_cond = min(len(self.header), max_cond) sel_index = random.choice(list(range(len(self.header)))) query = Query(-1, agg_ops.index('')) sel_str = 'col{}'.format(query.sel_index) if query.sel_index >= 0 else '*' agg_str = sel_str agg_op = agg_ops[query.agg_index] if agg_op: agg_str = '{}({})'.format(agg_op, sel_str) where_str = ' AND '.join(['col{} {} :col{}'.format(i, cond_ops[o], i) for (i, o, v) in query.conditions]) where_map = {'col{}'.format(i): v for (i, o, v) in query.conditions} if lower: where_map = {k: v.lower() if isinstance(v, str) else v for (k, v) in where_map.items()} if where_map: where_str = 'WHERE ' + where_str if query.sel_index >= 0: query_str = 'SELECT {agg_str} AS result FROM {name} {where_str}'.format(agg_str=agg_str, name=self.name, where_str=where_str) results = [r.result for r in db.query(query_str, **where_map)] else: query_str = 'SELECT {agg_str} FROM {name} {where_str}'.format(agg_str=agg_str, name=self.name, where_str=where_str) results = [[getattr(r, 'col{}'.format(i)) for i in range(len(self.header))] for r in db.query(query_str, **where_map)] condition_options = list(range(len(self.header))) condition_options.remove(sel_index) for i in range(max_cond): if not results: break cond_index = random.choice(condition_options) if self.types[cond_index] == 'text': cond_op = cond_ops.index('=') else: cond_op = random.choice(list(range(len(cond_ops)))) cond_val = random.choice([r[cond_index] for r in results]) query.conditions.append((cond_index, cond_op, cond_val)) sel_str = 'col{}'.format(query.sel_index) if query.sel_index >= 0 else '*' agg_str = sel_str agg_op = agg_ops[query.agg_index] if agg_op: agg_str = '{}({})'.format(agg_op, sel_str) where_str = ' AND '.join(['col{} {} :col{}'.format(i, cond_ops[o], i) for (i, o, v) in query.conditions]) where_map = {'col{}'.format(i): v for (i, o, v) in query.conditions} if lower: where_map = {k: v.lower() if isinstance(v, str) else v for (k, v) in where_map.items()} if where_map: where_str = 'WHERE ' + where_str if query.sel_index >= 0: query_str = 'SELECT {agg_str} AS result FROM {name} {where_str}'.format(agg_str=agg_str, name=self.name, where_str=where_str) new_results = [r.result for r in db.query(query_str, **where_map)] else: query_str = 'SELECT {agg_str} FROM {name} {where_str}'.format(agg_str=agg_str, name=self.name, where_str=where_str) new_results = [[getattr(r, 'col{}'.format(i)) for i in range(len(self.header))] for r in db.query(query_str, **where_map)] if [r[sel_index] for r in new_results] != [r[sel_index] for r in results]: condition_options.remove(cond_index) results = new_results else: query.conditions.pop() if self.types[sel_index] == 'text': query.agg_index = agg_ops.index('') else: query.agg_index = random.choice(list(range(len(agg_ops)))) query.sel_index = sel_index sel_str = 'col{}'.format(query.sel_index) if query.sel_index >= 0 else '*' agg_str = sel_str agg_op = agg_ops[query.agg_index] if agg_op: agg_str = '{}({})'.format(agg_op, sel_str) where_str = ' AND '.join(['col{} {} :col{}'.format(i, cond_ops[o], i) for (i, o, v) in query.conditions]) where_map = {'col{}'.format(i): v for (i, o, v) in query.conditions} if lower: where_map = {k: v.lower() if isinstance(v, str) else v for (k, v) in where_map.items()} if where_map: where_str = 'WHERE ' + where_str if query.sel_index >= 0: query_str = 'SELECT {agg_str} AS result FROM {name} {where_str}'.format(agg_str=agg_str, name=self.name, where_str=where_str) results = [r.result for r in db.query(query_str, **where_map)] else: query_str = 'SELECT {agg_str} FROM {name} {where_str}'.format(agg_str=agg_str, name=self.name, where_str=where_str) results = [[getattr(r, 'col{}'.format(i)) for i in range(len(self.header))] for r in db.query(query_str, **where_map)] return (query, results)
coarse2fine
positive
def evaluate_detections(self, all_boxes, output_dir): <DeepExtract> for (cls_ind, cls) in enumerate(self.classes): if cls == '__background__': continue print('Writing "{}" vg results file'.format(cls)) filename = self._get_vg_results_file_template(output_dir).format(cls) with open(filename, 'wt') as f: for (im_ind, index) in enumerate(self.image_index): dets = all_boxes[cls_ind][im_ind] if dets == []: continue for k in xrange(dets.shape[0]): f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.format(str(index), dets[k, -1], dets[k, 0] + 1, dets[k, 1] + 1, dets[k, 2] + 1, dets[k, 3] + 1)) </DeepExtract> <DeepExtract> aps = [] nposs = [] thresh = [] use_07_metric = False print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No')) if not os.path.isdir(output_dir): os.mkdir(output_dir) gt_roidb = self.gt_roidb() if eval_attributes: classes = self._attributes else: classes = self._classes for (i, cls) in enumerate(classes): if cls == '__background__' or cls == '__no_attribute__': continue filename = self._get_vg_results_file_template(output_dir).format(cls) (rec, prec, ap, scores, npos) = vg_eval(filename, gt_roidb, self.image_index, i, ovthresh=0.5, use_07_metric=use_07_metric, eval_attributes=eval_attributes) if npos > 1: f = np.nan_to_num(prec * rec / (prec + rec)) thresh += [scores[np.argmax(f)]] else: thresh += [0] aps += [ap] nposs += [float(npos)] print('AP for {} = {:.4f} (npos={:,})'.format(cls, ap, npos)) if pickle: with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f: pickle.dump({'rec': rec, 'prec': prec, 'ap': ap, 'scores': scores, 'npos': npos}, f) thresh = np.array(thresh) avg_thresh = np.mean(thresh[thresh != 0]) thresh[thresh == 0] = avg_thresh if eval_attributes: filename = 'attribute_thresholds_' + self._image_set + '.txt' else: filename = 'object_thresholds_' + self._image_set + '.txt' path = os.path.join(output_dir, filename) with open(path, 'wt') as f: for (i, cls) in enumerate(classes[1:]): f.write('{:s} {:.3f}\n'.format(cls, thresh[i])) weights = np.array(nposs) weights /= weights.sum() print('Mean AP = {:.4f}'.format(np.mean(aps))) print('Weighted Mean AP = {:.4f}'.format(np.average(aps, weights=weights))) print('Mean Detection Threshold = {:.3f}'.format(avg_thresh)) print('~~~~~~~~') print('Results:') for (ap, npos) in zip(aps, nposs): print('{:.3f}\t{:.3f}'.format(ap, npos)) print('{:.3f}'.format(np.mean(aps))) print('~~~~~~~~') print('') print('--------------------------------------------------------------') print('Results computed with the **unofficial** PASCAL VOC Python eval code.') print('--------------------------------------------------------------') </DeepExtract> if self.config['cleanup']: for cls in self._classes: if cls == '__background__': continue filename = self._get_vg_results_file_template(output_dir).format(cls) os.remove(filename)
def evaluate_detections(self, all_boxes, output_dir): for (cls_ind, cls) in enumerate(self.classes): if cls == '__background__': continue print('Writing "{}" vg results file'.format(cls)) filename = self._get_vg_results_file_template(output_dir).format(cls) with open(filename, 'wt') as f: for (im_ind, index) in enumerate(self.image_index): dets = all_boxes[cls_ind][im_ind] if dets == []: continue for k in xrange(dets.shape[0]): f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.format(str(index), dets[k, -1], dets[k, 0] + 1, dets[k, 1] + 1, dets[k, 2] + 1, dets[k, 3] + 1)) aps = [] nposs = [] thresh = [] use_07_metric = False print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No')) if not os.path.isdir(output_dir): os.mkdir(output_dir) gt_roidb = self.gt_roidb() if eval_attributes: classes = self._attributes else: classes = self._classes for (i, cls) in enumerate(classes): if cls == '__background__' or cls == '__no_attribute__': continue filename = self._get_vg_results_file_template(output_dir).format(cls) (rec, prec, ap, scores, npos) = vg_eval(filename, gt_roidb, self.image_index, i, ovthresh=0.5, use_07_metric=use_07_metric, eval_attributes=eval_attributes) if npos > 1: f = np.nan_to_num(prec * rec / (prec + rec)) thresh += [scores[np.argmax(f)]] else: thresh += [0] aps += [ap] nposs += [float(npos)] print('AP for {} = {:.4f} (npos={:,})'.format(cls, ap, npos)) if pickle: with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f: pickle.dump({'rec': rec, 'prec': prec, 'ap': ap, 'scores': scores, 'npos': npos}, f) thresh = np.array(thresh) avg_thresh = np.mean(thresh[thresh != 0]) thresh[thresh == 0] = avg_thresh if eval_attributes: filename = 'attribute_thresholds_' + self._image_set + '.txt' else: filename = 'object_thresholds_' + self._image_set + '.txt' path = os.path.join(output_dir, filename) with open(path, 'wt') as f: for (i, cls) in enumerate(classes[1:]): f.write('{:s} {:.3f}\n'.format(cls, thresh[i])) weights = np.array(nposs) weights /= weights.sum() print('Mean AP = {:.4f}'.format(np.mean(aps))) print('Weighted Mean AP = {:.4f}'.format(np.average(aps, weights=weights))) print('Mean Detection Threshold = {:.3f}'.format(avg_thresh)) print('~~~~~~~~') print('Results:') for (ap, npos) in zip(aps, nposs): print('{:.3f}\t{:.3f}'.format(ap, npos)) print('{:.3f}'.format(np.mean(aps))) print('~~~~~~~~') print('') print('--------------------------------------------------------------') print('Results computed with the **unofficial** PASCAL VOC Python eval code.') print('--------------------------------------------------------------') if self.config['cleanup']: for cls in self._classes: if cls == '__background__': continue filename = self._get_vg_results_file_template(output_dir).format(cls) os.remove(filename)
cascade-rcnn-fpn-faster_rcnn-pytorch1.0
positive
@_make_expr_internal.register(instrs.CALL_FUNCTION) def _make_expr_call_function(toplevel, stack_builders): <DeepExtract> out = [] for _ in range(toplevel.keyword): value = make_expr(stack_builders) load_kwname = stack_builders.pop() if not isinstance(load_kwname, instrs.LOAD_CONST): raise DecompilationError('Expected a LOAD_CONST, but got %r' % load_kwname) if not isinstance(load_kwname.arg, str): raise DecompilationError('Expected LOAD_CONST of a str, but got %r.' % load_kwname) out.append(ast.keyword(arg=load_kwname.arg, value=value)) out.reverse() keywords = out </DeepExtract> <DeepExtract> out = [make_expr(stack_builders) for _ in range(toplevel.positional)] out.reverse() positionals = out </DeepExtract> return ast.Call(func=make_expr(stack_builders), args=positionals, keywords=keywords, starargs=None, kwargs=None)
@_make_expr_internal.register(instrs.CALL_FUNCTION) def _make_expr_call_function(toplevel, stack_builders): out = [] for _ in range(toplevel.keyword): value = make_expr(stack_builders) load_kwname = stack_builders.pop() if not isinstance(load_kwname, instrs.LOAD_CONST): raise DecompilationError('Expected a LOAD_CONST, but got %r' % load_kwname) if not isinstance(load_kwname.arg, str): raise DecompilationError('Expected LOAD_CONST of a str, but got %r.' % load_kwname) out.append(ast.keyword(arg=load_kwname.arg, value=value)) out.reverse() keywords = out out = [make_expr(stack_builders) for _ in range(toplevel.positional)] out.reverse() positionals = out return ast.Call(func=make_expr(stack_builders), args=positionals, keywords=keywords, starargs=None, kwargs=None)
codetransformer
positive
def handle(self, handler_input): <DeepExtract> if is_user_on_session(handler_input) and has_active_adventure(handler_input): table = boto3.resource('dynamodb').Table('AdvgGameStats') table.update_item(Key={'PlayerNumber': handler_input.attributes_manager.session_attributes['user']['Items'][0]['PlayerNumber'], 'CountryId': get_country_id(handler_input.attributes_manager.session_attributes['country'])}, UpdateExpression='set EnergyLevel = :e, MoneyLevel=:m, QuestionNumber=:q, CurrentTurns=:c', ConditionExpression='ActiveFlag=:a', ExpressionAttributeValues={':e': handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['EnergyLevel'], ':m': handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['MoneyLevel'], ':q': handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['QuestionNumber'], ':c': handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['CurrentTurns'], ':a': 'Y'}, ReturnValues='UPDATED_NEW') maxTurns = handler_input.attributes_manager.session_attributes['user']['Items'][0]['MaxTurns'] currentTurns = handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['CurrentTurns'] if currentTurns > maxTurns: table = boto3.resource('dynamodb').Table('AdvgUsers') table.update_item(Key={'UserId': handler_input.attributes_manager.session_attributes['user']['Items'][0]['UserId'], 'PlayerNumber': handler_input.attributes_manager.session_attributes['user']['Items'][0]['PlayerNumber']}, UpdateExpression='set MaxTurns = :m', ExpressionAttributeValues={':m': currentTurns}) </DeepExtract> speak_output = 'Goodbye!' + getRandomFact() + '. New adventures to Egypt, England, and Greece coming soon!' return handler_input.response_builder.speak(speak_output).response
def handle(self, handler_input): if is_user_on_session(handler_input) and has_active_adventure(handler_input): table = boto3.resource('dynamodb').Table('AdvgGameStats') table.update_item(Key={'PlayerNumber': handler_input.attributes_manager.session_attributes['user']['Items'][0]['PlayerNumber'], 'CountryId': get_country_id(handler_input.attributes_manager.session_attributes['country'])}, UpdateExpression='set EnergyLevel = :e, MoneyLevel=:m, QuestionNumber=:q, CurrentTurns=:c', ConditionExpression='ActiveFlag=:a', ExpressionAttributeValues={':e': handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['EnergyLevel'], ':m': handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['MoneyLevel'], ':q': handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['QuestionNumber'], ':c': handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['CurrentTurns'], ':a': 'Y'}, ReturnValues='UPDATED_NEW') maxTurns = handler_input.attributes_manager.session_attributes['user']['Items'][0]['MaxTurns'] currentTurns = handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['CurrentTurns'] if currentTurns > maxTurns: table = boto3.resource('dynamodb').Table('AdvgUsers') table.update_item(Key={'UserId': handler_input.attributes_manager.session_attributes['user']['Items'][0]['UserId'], 'PlayerNumber': handler_input.attributes_manager.session_attributes['user']['Items'][0]['PlayerNumber']}, UpdateExpression='set MaxTurns = :m', ExpressionAttributeValues={':m': currentTurns}) speak_output = 'Goodbye!' + getRandomFact() + '. New adventures to Egypt, England, and Greece coming soon!' return handler_input.response_builder.speak(speak_output).response
Course_Alexa_Skill_Builder
positive
def listen_unit(self, unit): <DeepExtract> try: conf = self.load_sysd_unit_conf(unit) if conf is not None: conf = conf conf = self.load_sysd_template_conf(unit) if conf is not None: conf = conf conf = self.load_sysv_unit_conf(unit) if conf is not None: conf = conf except Exception as e: logg.warning('%s not loaded: %s', unit, e) conf = None </DeepExtract> if conf is None: logg.debug('unit could not be loaded (%s)', unit) logg.error('Unit %s not found.', unit) return False if self.not_user_conf(conf): logg.error('Unit %s not for --user mode', unit) return False return self.listen_unit_from(conf)
def listen_unit(self, unit): try: conf = self.load_sysd_unit_conf(unit) if conf is not None: conf = conf conf = self.load_sysd_template_conf(unit) if conf is not None: conf = conf conf = self.load_sysv_unit_conf(unit) if conf is not None: conf = conf except Exception as e: logg.warning('%s not loaded: %s', unit, e) conf = None if conf is None: logg.debug('unit could not be loaded (%s)', unit) logg.error('Unit %s not found.', unit) return False if self.not_user_conf(conf): logg.error('Unit %s not for --user mode', unit) return False return self.listen_unit_from(conf)
deployment
positive
def link_file(source, target): <DeepExtract> print('[%s] %s' % (log_type, 'Linking %s -> %s' % (source, target))) </DeepExtract> <DeepExtract> command_string = ['ln', '-s', source, target] close = None if isinstance(['ln', '-s', source, target], (list, tuple)): command_string = subprocess.list2cmdline(['ln', '-s', source, target]) if shell: ['ln', '-s', source, target] = command_string else: sys.stderr.write('Deprecation warning! Switch arguments to a list for common.execute()\n\n') if not hide_log: print('%s @ %s > %s' % (USER_NAME, HOSTNAME, command_string)) if isinstance(stdin, (bytes, str)): (stdin, close) = temporary_file_helper(stdin) stdout = subprocess.PIPE stderr = subprocess.STDOUT if drop or kill: devnull = open(os.devnull, 'w') stdout = devnull stderr = devnull start = time.time() p = subprocess.Popen(['ln', '-s', source, target], stdin=stdin or subprocess.PIPE, stdout=stdout, stderr=stderr, shell=shell, **kwargs) if kill: delta = 0.5 for _ in range(int(timeout / delta) + 1): time.sleep(delta) if p.poll() is not None: return log('Killing process', str(p.pid)) try: p.kill() time.sleep(0.5) except OSError: pass elif wait: output = '' if not stdin: try: p.stdin.write(os.linesep.encode('ascii')) except IOError: pass while p.poll() is None: line = p.stdout.readline().decode('ascii', 'ignore') if line: output += line if not (hide_log or mute): print(line.rstrip()) output += p.stdout.read().decode('ascii', 'ignore') output = output.strip() end = time.time() run_time = end - start if run_time < MIN_EXECUTION_TIME: time.sleep(MIN_EXECUTION_TIME - run_time) if not (hide_log or mute): if p.returncode != 0: print('exit code = %d' % p.returncode) print('') if close: close() return (p.returncode, output) else: if close: close() return p </DeepExtract>
def link_file(source, target): print('[%s] %s' % (log_type, 'Linking %s -> %s' % (source, target))) command_string = ['ln', '-s', source, target] close = None if isinstance(['ln', '-s', source, target], (list, tuple)): command_string = subprocess.list2cmdline(['ln', '-s', source, target]) if shell: ['ln', '-s', source, target] = command_string else: sys.stderr.write('Deprecation warning! Switch arguments to a list for common.execute()\n\n') if not hide_log: print('%s @ %s > %s' % (USER_NAME, HOSTNAME, command_string)) if isinstance(stdin, (bytes, str)): (stdin, close) = temporary_file_helper(stdin) stdout = subprocess.PIPE stderr = subprocess.STDOUT if drop or kill: devnull = open(os.devnull, 'w') stdout = devnull stderr = devnull start = time.time() p = subprocess.Popen(['ln', '-s', source, target], stdin=stdin or subprocess.PIPE, stdout=stdout, stderr=stderr, shell=shell, **kwargs) if kill: delta = 0.5 for _ in range(int(timeout / delta) + 1): time.sleep(delta) if p.poll() is not None: return log('Killing process', str(p.pid)) try: p.kill() time.sleep(0.5) except OSError: pass elif wait: output = '' if not stdin: try: p.stdin.write(os.linesep.encode('ascii')) except IOError: pass while p.poll() is None: line = p.stdout.readline().decode('ascii', 'ignore') if line: output += line if not (hide_log or mute): print(line.rstrip()) output += p.stdout.read().decode('ascii', 'ignore') output = output.strip() end = time.time() run_time = end - start if run_time < MIN_EXECUTION_TIME: time.sleep(MIN_EXECUTION_TIME - run_time) if not (hide_log or mute): if p.returncode != 0: print('exit code = %d' % p.returncode) print('') if close: close() return (p.returncode, output) else: if close: close() return p </DeepExtract>
detection-rules
positive
def resolveName(self, paramtag, datatag, wantSubItems=True, wantImportance=True, raiseError=True, base='base', returnJobItem=False): if paramtag: if isinstance(paramtag, str): paramtag = paramtag.split('_') paramtags = [base] + sorted(paramtag) else: paramtag = [base] paramtags = [base] name = '_'.join(paramtags) + '_' + self.normalizeDataTag(datatag) <DeepExtract> for jobItem in self.items(wantSubItems, wantImportance): if jobItem.normed_name == name and jobItem is not exclude: jobItem = jobItem jobItem = None </DeepExtract> if jobItem is not None: return (jobItem.name, jobItem)[returnJobItem] if raiseError: raise Exception('No match for paramtag, datatag... ' + '_'.join(paramtag) + ', ' + datatag) else: return None
def resolveName(self, paramtag, datatag, wantSubItems=True, wantImportance=True, raiseError=True, base='base', returnJobItem=False): if paramtag: if isinstance(paramtag, str): paramtag = paramtag.split('_') paramtags = [base] + sorted(paramtag) else: paramtag = [base] paramtags = [base] name = '_'.join(paramtags) + '_' + self.normalizeDataTag(datatag) for jobItem in self.items(wantSubItems, wantImportance): if jobItem.normed_name == name and jobItem is not exclude: jobItem = jobItem jobItem = None if jobItem is not None: return (jobItem.name, jobItem)[returnJobItem] if raiseError: raise Exception('No match for paramtag, datatag... ' + '_'.join(paramtag) + ', ' + datatag) else: return None
cobaya
positive
def plot_regressions(ksi, eta, x, y, sigma_x, sigma_y, add_regression_lines=False, alpha_in=1, beta_in=0.5, basis='linear'): figure = plt.figure(figsize=(8, 6)) ax = figure.add_subplot(111) ax.scatter(x, y, alpha=0.5) ax.errorbar(x, y, xerr=sigma_x, yerr=sigma_y, alpha=0.3, ls='') ax.set_xlabel('x') ax.set_ylabel('y') x0 = np.linspace(np.min(x) - 0.5, np.max(x) + 0.5, 20) if alpha_in is not None and beta_in is not None: if basis == 'linear': y0 = alpha_in + x0 * beta_in elif basis == 'poly': y0 = alpha_in + beta_in[0] * x0 + beta_in[1] * x0 * x0 + beta_in[2] * x0 * x0 * x0 ax.plot(x0, y0, color='black', label='True regression') else: y0 = None if add_regression_lines: for (label, data, *target) in [['fit no errors', x, y, 1], ['fit y errors only', x, y, sigma_y], ['fit x errors only', y, x, sigma_x]]: linreg = LinearRegression() linreg.fit(data[:, None], *target) if label == 'fit x errors only' and y0 is not None: x_fit = linreg.predict(y0[:, None]) ax.plot(x_fit, y0, label=label) else: y_fit = linreg.predict(x0[:, None]) ax.plot(x0, y_fit, label=label) X = np.vstack((x, y)).T dX = np.zeros((len(x), 2, 2)) dX[:, 0, 0] = sigma_x dX[:, 1, 1] = sigma_y def min_func(beta): return -TLS_logL(beta, X, dX) beta_fit = optimize.fmin(min_func, x0=[-1, 1]) <DeepExtract> b = np.dot(beta_fit, beta_fit) / beta_fit[1] m = -beta_fit[0] / beta_fit[1] (m_fit, b_fit) = (m, b) </DeepExtract> x_fit = np.linspace(-10, 10, 20) ax.plot(x_fit, m_fit * x_fit + b_fit, label='TLS') ax.set_xlim(np.min(x) - 0.5, np.max(x) + 0.5) ax.set_ylim(np.min(y) - 0.5, np.max(y) + 0.5) ax.legend()
def plot_regressions(ksi, eta, x, y, sigma_x, sigma_y, add_regression_lines=False, alpha_in=1, beta_in=0.5, basis='linear'): figure = plt.figure(figsize=(8, 6)) ax = figure.add_subplot(111) ax.scatter(x, y, alpha=0.5) ax.errorbar(x, y, xerr=sigma_x, yerr=sigma_y, alpha=0.3, ls='') ax.set_xlabel('x') ax.set_ylabel('y') x0 = np.linspace(np.min(x) - 0.5, np.max(x) + 0.5, 20) if alpha_in is not None and beta_in is not None: if basis == 'linear': y0 = alpha_in + x0 * beta_in elif basis == 'poly': y0 = alpha_in + beta_in[0] * x0 + beta_in[1] * x0 * x0 + beta_in[2] * x0 * x0 * x0 ax.plot(x0, y0, color='black', label='True regression') else: y0 = None if add_regression_lines: for (label, data, *target) in [['fit no errors', x, y, 1], ['fit y errors only', x, y, sigma_y], ['fit x errors only', y, x, sigma_x]]: linreg = LinearRegression() linreg.fit(data[:, None], *target) if label == 'fit x errors only' and y0 is not None: x_fit = linreg.predict(y0[:, None]) ax.plot(x_fit, y0, label=label) else: y_fit = linreg.predict(x0[:, None]) ax.plot(x0, y_fit, label=label) X = np.vstack((x, y)).T dX = np.zeros((len(x), 2, 2)) dX[:, 0, 0] = sigma_x dX[:, 1, 1] = sigma_y def min_func(beta): return -TLS_logL(beta, X, dX) beta_fit = optimize.fmin(min_func, x0=[-1, 1]) b = np.dot(beta_fit, beta_fit) / beta_fit[1] m = -beta_fit[0] / beta_fit[1] (m_fit, b_fit) = (m, b) x_fit = np.linspace(-10, 10, 20) ax.plot(x_fit, m_fit * x_fit + b_fit, label='TLS') ax.set_xlim(np.min(x) - 0.5, np.max(x) + 0.5) ax.set_ylim(np.min(y) - 0.5, np.max(y) + 0.5) ax.legend()
astroML
positive
@write_lock(config_lock) def switch_tags(self, tag1, tag2): <DeepExtract> c = eval(repr(self.config), {}, {}) </DeepExtract> t1_idx = c['tagorder'].index(tag1) t2_idx = c['tagorder'].index(tag2) c['tagorder'][t1_idx] = tag2 c['tagorder'][t2_idx] = tag1 <DeepExtract> self.prot_configs({'CantoCurses': c}, True) </DeepExtract> <DeepExtract> prevtags = self.vars['curtags'] sorted_tags = [] r = re.compile(self.config['tags']) for tag in self.vars['strtags']: if tag not in self.config['tagorder']: continue elif r.match(tag): sorted_tags.append((self.config['tagorder'].index(tag), tag)) sorted_tags.sort() self.set_var('curtags', [x for (i, x) in sorted_tags]) if not self.vars['curtags']: log.warn("NOTE: Current 'tags' setting eliminated all tags!") if prevtags != self.vars['curtags']: log.debug('Evaluated Tags Changed:\n%s\n', json.dumps(self.vars['curtags'], indent=4)) call_hook('curses_eval_tags_changed', []) </DeepExtract>
@write_lock(config_lock) def switch_tags(self, tag1, tag2): c = eval(repr(self.config), {}, {}) t1_idx = c['tagorder'].index(tag1) t2_idx = c['tagorder'].index(tag2) c['tagorder'][t1_idx] = tag2 c['tagorder'][t2_idx] = tag1 self.prot_configs({'CantoCurses': c}, True) prevtags = self.vars['curtags'] sorted_tags = [] r = re.compile(self.config['tags']) for tag in self.vars['strtags']: if tag not in self.config['tagorder']: continue elif r.match(tag): sorted_tags.append((self.config['tagorder'].index(tag), tag)) sorted_tags.sort() self.set_var('curtags', [x for (i, x) in sorted_tags]) if not self.vars['curtags']: log.warn("NOTE: Current 'tags' setting eliminated all tags!") if prevtags != self.vars['curtags']: log.debug('Evaluated Tags Changed:\n%s\n', json.dumps(self.vars['curtags'], indent=4)) call_hook('curses_eval_tags_changed', []) </DeepExtract>
canto-curses
positive
@pytest.mark.parametrize(['vasprun_parser'], [('relax',)], indirect=True) def test_create_node_energies_relax(fresh_aiida_env, vasprun_parser): """Check that the node composer works for the energies node and contain the correct energies for each ionic step.""" node_settings_key = 'energies' assert NODES[node_settings_key]['link_name'] == 'energies' assert NODES[node_settings_key]['type'] == 'core.array' <DeepExtract> requested_node = {NODES[node_settings_key]['link_name']: NODES[node_settings_key]} parsed_quantities = {} equivalent_keys = {} for parser in [vasprun_parser]: for item in NODES[node_settings_key]['quantities']: if item in parser.PARSABLE_QUANTITIES: parsed_quantities[item] = parser.get_quantity(item) equivalent_keys[item] = [item] composed_nodes = NodeComposer(requested_node, equivalent_keys, parsed_quantities) data_class = get_data_class(NODES[node_settings_key]['type']) assert NODES[node_settings_key]['link_name'] in composed_nodes.successful assert isinstance(composed_nodes.successful[NODES[node_settings_key]['link_name']], data_class) composed_nodes = composed_nodes </DeepExtract> energies = composed_nodes.successful['energies'] energies_ext = energies.get_array('energy_extrapolated') assert set(energies.get_arraynames()) == set(['energy_extrapolated', 'energy_extrapolated_electronic', 'electronic_steps']) test_array = np.array([-42.91113348, -43.27757545, -43.36648855, -43.37734069, -43.38062479, -43.38334165, -43.38753003, -43.38708193, -43.38641449, -43.38701639, -43.38699488, -43.38773717, -43.38988315, -43.3898822, -43.39011239, -43.39020751, -43.39034244, -43.39044584, -43.39087657]) np.testing.assert_allclose(test_array, energies_ext, atol=0.0, rtol=1e-07) assert energies_ext.shape == test_array.shape np.testing.assert_allclose(np.ones(19, dtype=int), energies.get_array('electronic_steps'), atol=0.0, rtol=1e-07) test_array = np.array([-0.00236637, -0.00048614, -0.00047201, -0.00043261, -0.00041668, -0.00042584, -0.00043637, -0.00042806, -0.00042762, -0.00043875, -0.00042731, -0.00042705, -0.00043064, -0.00043051, -0.00043161, -0.00043078, -0.00043053, -0.00043149, -0.00043417]) with np.testing.assert_raises(AssertionError): np.testing.assert_allclose(test_array, energies.get_array('energy_extrapolated'), atol=0.0, rtol=1e-07) test_array = np.array([-42.911133, -43.277575, -43.366489, -43.377341, -43.380625, -43.383342, -43.38753, -43.387082, -43.386414, -43.387016, -43.386995, -43.387737, -43.389883, -43.389882, -43.390112, -43.390208, -43.390342, -43.390446, -43.390877]) np.testing.assert_allclose(test_array, energies.get_array('energy_extrapolated'), atol=0.0, rtol=1e-07)
@pytest.mark.parametrize(['vasprun_parser'], [('relax',)], indirect=True) def test_create_node_energies_relax(fresh_aiida_env, vasprun_parser): """Check that the node composer works for the energies node and contain the correct energies for each ionic step.""" node_settings_key = 'energies' assert NODES[node_settings_key]['link_name'] == 'energies' assert NODES[node_settings_key]['type'] == 'core.array' requested_node = {NODES[node_settings_key]['link_name']: NODES[node_settings_key]} parsed_quantities = {} equivalent_keys = {} for parser in [vasprun_parser]: for item in NODES[node_settings_key]['quantities']: if item in parser.PARSABLE_QUANTITIES: parsed_quantities[item] = parser.get_quantity(item) equivalent_keys[item] = [item] composed_nodes = NodeComposer(requested_node, equivalent_keys, parsed_quantities) data_class = get_data_class(NODES[node_settings_key]['type']) assert NODES[node_settings_key]['link_name'] in composed_nodes.successful assert isinstance(composed_nodes.successful[NODES[node_settings_key]['link_name']], data_class) composed_nodes = composed_nodes energies = composed_nodes.successful['energies'] energies_ext = energies.get_array('energy_extrapolated') assert set(energies.get_arraynames()) == set(['energy_extrapolated', 'energy_extrapolated_electronic', 'electronic_steps']) test_array = np.array([-42.91113348, -43.27757545, -43.36648855, -43.37734069, -43.38062479, -43.38334165, -43.38753003, -43.38708193, -43.38641449, -43.38701639, -43.38699488, -43.38773717, -43.38988315, -43.3898822, -43.39011239, -43.39020751, -43.39034244, -43.39044584, -43.39087657]) np.testing.assert_allclose(test_array, energies_ext, atol=0.0, rtol=1e-07) assert energies_ext.shape == test_array.shape np.testing.assert_allclose(np.ones(19, dtype=int), energies.get_array('electronic_steps'), atol=0.0, rtol=1e-07) test_array = np.array([-0.00236637, -0.00048614, -0.00047201, -0.00043261, -0.00041668, -0.00042584, -0.00043637, -0.00042806, -0.00042762, -0.00043875, -0.00042731, -0.00042705, -0.00043064, -0.00043051, -0.00043161, -0.00043078, -0.00043053, -0.00043149, -0.00043417]) with np.testing.assert_raises(AssertionError): np.testing.assert_allclose(test_array, energies.get_array('energy_extrapolated'), atol=0.0, rtol=1e-07) test_array = np.array([-42.911133, -43.277575, -43.366489, -43.377341, -43.380625, -43.383342, -43.38753, -43.387082, -43.386414, -43.387016, -43.386995, -43.387737, -43.389883, -43.389882, -43.390112, -43.390208, -43.390342, -43.390446, -43.390877]) np.testing.assert_allclose(test_array, energies.get_array('energy_extrapolated'), atol=0.0, rtol=1e-07)
aiida-vasp
positive
@action(detail=False, methods=['post']) def login(self, request): serializer = api_serializers.LoginSerializer(data=self.request.data, context={'request': self.request}) serializer.is_valid(raise_exception=True) user = serializer.validated_data['user'] <DeepExtract> serializer = api_serializers.LoginSerializer(data=self.request.data, context={'request': self.request}) serializer.is_valid(raise_exception=True) user = serializer.validated_data['user'] login(request, user) return Response(None, status=status.HTTP_202_ACCEPTED) </DeepExtract> return Response(None, status=status.HTTP_202_ACCEPTED)
@action(detail=False, methods=['post']) def login(self, request): serializer = api_serializers.LoginSerializer(data=self.request.data, context={'request': self.request}) serializer.is_valid(raise_exception=True) user = serializer.validated_data['user'] serializer = api_serializers.LoginSerializer(data=self.request.data, context={'request': self.request}) serializer.is_valid(raise_exception=True) user = serializer.validated_data['user'] login(request, user) return Response(None, status=status.HTTP_202_ACCEPTED) return Response(None, status=status.HTTP_202_ACCEPTED)
aiarena-web
positive
def set_active_profile(workspace_path, profile_name): """Set a profile in a given workspace to be active. :param workspace_path: The exact path to the root of a catkin_tools workspace :type workspace_path: str :param profile_name: The catkin_tools metadata profile name to activate :type profile_name: str """ <DeepExtract> migrate_metadata(workspace_path) if workspace_path is not None: profiles_path = get_profiles_path(workspace_path) profiles_yaml_file_path = os.path.join(profiles_path, PROFILES_YML_FILE_NAME) if os.path.exists(profiles_yaml_file_path): with open(profiles_yaml_file_path, 'r') as profiles_file: profiles_data = yaml.safe_load(profiles_file) profiles_data = {} </DeepExtract> profiles_data['active'] = profile_name <DeepExtract> if workspace_path is None: profiles_path = None profiles_path = os.path.join(workspace_path, METADATA_DIR_NAME, 'profiles') </DeepExtract> profiles_yaml_file_path = os.path.join(profiles_path, PROFILES_YML_FILE_NAME) with open(profiles_yaml_file_path, 'w') as profiles_file: yaml.dump(profiles_data, profiles_file, default_flow_style=False)
def set_active_profile(workspace_path, profile_name): """Set a profile in a given workspace to be active. :param workspace_path: The exact path to the root of a catkin_tools workspace :type workspace_path: str :param profile_name: The catkin_tools metadata profile name to activate :type profile_name: str """ migrate_metadata(workspace_path) if workspace_path is not None: profiles_path = get_profiles_path(workspace_path) profiles_yaml_file_path = os.path.join(profiles_path, PROFILES_YML_FILE_NAME) if os.path.exists(profiles_yaml_file_path): with open(profiles_yaml_file_path, 'r') as profiles_file: profiles_data = yaml.safe_load(profiles_file) profiles_data = {} profiles_data['active'] = profile_name if workspace_path is None: profiles_path = None profiles_path = os.path.join(workspace_path, METADATA_DIR_NAME, 'profiles') profiles_yaml_file_path = os.path.join(profiles_path, PROFILES_YML_FILE_NAME) with open(profiles_yaml_file_path, 'w') as profiles_file: yaml.dump(profiles_data, profiles_file, default_flow_style=False)
catkin_tools
positive
def _qr_economic(r): a_shape = (r.shape[0], r.shape[1]) a_n_blocks = (r._n_blocks[0], r._n_blocks[1]) b_size = r._reg_shape <DeepExtract> a = eye(r.shape[0], a_shape[1], b_size, dtype=None) aux_a = eye(r._n_blocks[0], r._n_blocks[1], (1, 1), dtype=np.uint8) (q, q_type) = (a, aux_a) </DeepExtract> r_type = full((r._n_blocks[0], r._n_blocks[1]), (1, 1), OTHER) act_q_list = [] sub_q_list = {} for i in range(a_n_blocks[1]): <DeepExtract> if dislib.__gpu_available__: qr_func = _qr_task_gpu else: qr_func = _qr_task (q_aux, r_aux) = qr_func(r._blocks[i][i], r_type._blocks[i][i], b_size, mode=mode, t=True) (act_q_type, act_q, r_type_block, r_block) = (_type_block(OTHER), q_aux, _type_block(OTHER), r_aux) </DeepExtract> r_type.replace_block(i, i, r_type_block) r.replace_block(i, i, r_block) act_q_list.append((act_q_type, act_q)) for j in range(i + 1, a_n_blocks[1]): <DeepExtract> if dislib.__gpu_available__: dot_func = _dot_task_gpu else: dot_func = _dot_task result = dot_func(act_q, r._blocks[i][j], transpose_result=transpose_result, transpose_a=transpose_a, transpose_b=transpose_b) (r_type_block, r_block) = (_type_block(OTHER), result) </DeepExtract> r_type.replace_block(i, j, r_type_block) r.replace_block(i, j, r_block) for j in range(i + 1, r._n_blocks[0]): sub_q = [[np.array([0]), np.array([0])], [np.array([0]), np.array([0])]] sub_q_type = [[_type_block(OTHER), _type_block(OTHER)], [_type_block(OTHER), _type_block(OTHER)]] <DeepExtract> if dislib.__gpu_available__: little_qr_func = _little_qr_task_gpu else: little_qr_func = _little_qr_task (sub_q00, sub_q01, sub_q10, sub_q11, aa, bb) = little_qr_func(r._blocks[i][i], r_type._blocks[i][i], r._blocks[j][i], r_type._blocks[j][i], b_size) (sub_q[0][0], sub_q[0][1], sub_q[1][0], sub_q[1][1], r_type_block1, r_block1, r_type_block2, r_block2) = (sub_q00, sub_q01, sub_q10, sub_q11, _type_block(OTHER), aa, _type_block(OTHER), bb) </DeepExtract> r_type.replace_block(i, i, r_type_block1) r.replace_block(i, i, r_block1) r_type.replace_block(j, i, r_type_block2) r.replace_block(j, i, r_block2) sub_q_list[j, i] = (sub_q_type, sub_q) for k in range(i + 1, a_n_blocks[1]): <DeepExtract> n_blocks = (len(sub_q), len([[r._blocks[i][k]], [r._blocks[j][k]]][0])) c = Array._get_out_blocks(n_blocks) type_c = Array._get_out_blocks(n_blocks) if transpose_a: sub_q = list(map(list, zip(*sub_q))) if transpose_b: [[r._blocks[i][k]], [r._blocks[j][k]]] = list(map(list, zip(*[[r._blocks[i][k]], [r._blocks[j][k]]]))) for i in range(n_blocks[0]): for j in range(n_blocks[1]): hblock = sub_q[i] vblock = [[[r._blocks[i][k]], [r._blocks[j][k]]][k][j] for k in range(len([[r._blocks[i][k]], [r._blocks[j][k]]]))] c[i][j] = _multiply_block_groups(hblock, vblock, transpose_a, transpose_b) ([[r_type_block1], [r_type_block2]], [[r_block1], [r_block2]]) = (type_c, c) </DeepExtract> r_type.replace_block(i, k, r_type_block1) r.replace_block(i, k, r_block1) r_type.replace_block(j, k, r_type_block2) r.replace_block(j, k, r_block2) for i in reversed(range(len(act_q_list))): for j in reversed(range(i + 1, r._n_blocks[0])): for k in range(q._n_blocks[1]): <DeepExtract> n_blocks = (len(sub_q_list[j, i][1]), len([[q._blocks[i][k]], [q._blocks[j][k]]][0])) c = Array._get_out_blocks(n_blocks) type_c = Array._get_out_blocks(n_blocks) if True: sub_q_list[j, i][1] = list(map(list, zip(*sub_q_list[j, i][1]))) if transpose_b: [[q._blocks[i][k]], [q._blocks[j][k]]] = list(map(list, zip(*[[q._blocks[i][k]], [q._blocks[j][k]]]))) for i in range(n_blocks[0]): for j in range(n_blocks[1]): hblock = sub_q_list[j, i][1][i] vblock = [[[q._blocks[i][k]], [q._blocks[j][k]]][k][j] for k in range(len([[q._blocks[i][k]], [q._blocks[j][k]]]))] c[i][j] = _multiply_block_groups(hblock, vblock, True, transpose_b) ([[q_type_block1], [q_type_block2]], [[q_block1], [q_block2]]) = (type_c, c) </DeepExtract> q_type.replace_block(i, k, q_type_block1) q.replace_block(i, k, q_block1) q_type.replace_block(j, k, q_type_block2) q.replace_block(j, k, q_block2) compss_delete_object(sub_q_list[j, i][0][0]) compss_delete_object(sub_q_list[j, i][0][1]) compss_delete_object(sub_q_list[j, i][1][0]) compss_delete_object(sub_q_list[j, i][1][1]) del sub_q_list[j, i] for k in range(q._n_blocks[1]): <DeepExtract> if dislib.__gpu_available__: dot_func = _dot_task_gpu else: dot_func = _dot_task result = dot_func(act_q_list[i][1], q._blocks[i][k], transpose_result=transpose_result, transpose_a=True, transpose_b=transpose_b) (q_type_block, q_block) = (_type_block(OTHER), result) </DeepExtract> q_type.replace_block(i, k, q_type_block) q.replace_block(i, k, q_block) compss_delete_object(act_q_list[i][0]) compss_delete_object(act_q_list[i][1]) remove_last_rows(r, r.shape[0] - r.shape[1]) return (q, r)
def _qr_economic(r): a_shape = (r.shape[0], r.shape[1]) a_n_blocks = (r._n_blocks[0], r._n_blocks[1]) b_size = r._reg_shape a = eye(r.shape[0], a_shape[1], b_size, dtype=None) aux_a = eye(r._n_blocks[0], r._n_blocks[1], (1, 1), dtype=np.uint8) (q, q_type) = (a, aux_a) r_type = full((r._n_blocks[0], r._n_blocks[1]), (1, 1), OTHER) act_q_list = [] sub_q_list = {} for i in range(a_n_blocks[1]): if dislib.__gpu_available__: qr_func = _qr_task_gpu else: qr_func = _qr_task (q_aux, r_aux) = qr_func(r._blocks[i][i], r_type._blocks[i][i], b_size, mode=mode, t=True) (act_q_type, act_q, r_type_block, r_block) = (_type_block(OTHER), q_aux, _type_block(OTHER), r_aux) r_type.replace_block(i, i, r_type_block) r.replace_block(i, i, r_block) act_q_list.append((act_q_type, act_q)) for j in range(i + 1, a_n_blocks[1]): if dislib.__gpu_available__: dot_func = _dot_task_gpu else: dot_func = _dot_task result = dot_func(act_q, r._blocks[i][j], transpose_result=transpose_result, transpose_a=transpose_a, transpose_b=transpose_b) (r_type_block, r_block) = (_type_block(OTHER), result) r_type.replace_block(i, j, r_type_block) r.replace_block(i, j, r_block) for j in range(i + 1, r._n_blocks[0]): sub_q = [[np.array([0]), np.array([0])], [np.array([0]), np.array([0])]] sub_q_type = [[_type_block(OTHER), _type_block(OTHER)], [_type_block(OTHER), _type_block(OTHER)]] if dislib.__gpu_available__: little_qr_func = _little_qr_task_gpu else: little_qr_func = _little_qr_task (sub_q00, sub_q01, sub_q10, sub_q11, aa, bb) = little_qr_func(r._blocks[i][i], r_type._blocks[i][i], r._blocks[j][i], r_type._blocks[j][i], b_size) (sub_q[0][0], sub_q[0][1], sub_q[1][0], sub_q[1][1], r_type_block1, r_block1, r_type_block2, r_block2) = (sub_q00, sub_q01, sub_q10, sub_q11, _type_block(OTHER), aa, _type_block(OTHER), bb) r_type.replace_block(i, i, r_type_block1) r.replace_block(i, i, r_block1) r_type.replace_block(j, i, r_type_block2) r.replace_block(j, i, r_block2) sub_q_list[j, i] = (sub_q_type, sub_q) for k in range(i + 1, a_n_blocks[1]): n_blocks = (len(sub_q), len([[r._blocks[i][k]], [r._blocks[j][k]]][0])) c = Array._get_out_blocks(n_blocks) type_c = Array._get_out_blocks(n_blocks) if transpose_a: sub_q = list(map(list, zip(*sub_q))) if transpose_b: [[r._blocks[i][k]], [r._blocks[j][k]]] = list(map(list, zip(*[[r._blocks[i][k]], [r._blocks[j][k]]]))) for i in range(n_blocks[0]): for j in range(n_blocks[1]): hblock = sub_q[i] vblock = [[[r._blocks[i][k]], [r._blocks[j][k]]][k][j] for k in range(len([[r._blocks[i][k]], [r._blocks[j][k]]]))] c[i][j] = _multiply_block_groups(hblock, vblock, transpose_a, transpose_b) ([[r_type_block1], [r_type_block2]], [[r_block1], [r_block2]]) = (type_c, c) r_type.replace_block(i, k, r_type_block1) r.replace_block(i, k, r_block1) r_type.replace_block(j, k, r_type_block2) r.replace_block(j, k, r_block2) for i in reversed(range(len(act_q_list))): for j in reversed(range(i + 1, r._n_blocks[0])): for k in range(q._n_blocks[1]): n_blocks = (len(sub_q_list[j, i][1]), len([[q._blocks[i][k]], [q._blocks[j][k]]][0])) c = Array._get_out_blocks(n_blocks) type_c = Array._get_out_blocks(n_blocks) if True: sub_q_list[j, i][1] = list(map(list, zip(*sub_q_list[j, i][1]))) if transpose_b: [[q._blocks[i][k]], [q._blocks[j][k]]] = list(map(list, zip(*[[q._blocks[i][k]], [q._blocks[j][k]]]))) for i in range(n_blocks[0]): for j in range(n_blocks[1]): hblock = sub_q_list[j, i][1][i] vblock = [[[q._blocks[i][k]], [q._blocks[j][k]]][k][j] for k in range(len([[q._blocks[i][k]], [q._blocks[j][k]]]))] c[i][j] = _multiply_block_groups(hblock, vblock, True, transpose_b) ([[q_type_block1], [q_type_block2]], [[q_block1], [q_block2]]) = (type_c, c) q_type.replace_block(i, k, q_type_block1) q.replace_block(i, k, q_block1) q_type.replace_block(j, k, q_type_block2) q.replace_block(j, k, q_block2) compss_delete_object(sub_q_list[j, i][0][0]) compss_delete_object(sub_q_list[j, i][0][1]) compss_delete_object(sub_q_list[j, i][1][0]) compss_delete_object(sub_q_list[j, i][1][1]) del sub_q_list[j, i] for k in range(q._n_blocks[1]): if dislib.__gpu_available__: dot_func = _dot_task_gpu else: dot_func = _dot_task result = dot_func(act_q_list[i][1], q._blocks[i][k], transpose_result=transpose_result, transpose_a=True, transpose_b=transpose_b) (q_type_block, q_block) = (_type_block(OTHER), result) q_type.replace_block(i, k, q_type_block) q.replace_block(i, k, q_block) compss_delete_object(act_q_list[i][0]) compss_delete_object(act_q_list[i][1]) remove_last_rows(r, r.shape[0] - r.shape[1]) return (q, r)
dislib
positive
def write_version(self, version_uid: VersionUid, data: str, overwrite: Optional[bool]=False) -> None: key = version_uid.storage_object_to_path() metadata_key = key + self._META_SUFFIX if not overwrite: try: <DeepExtract> raise NotImplementedError </DeepExtract> except FileNotFoundError: pass else: raise FileExistsError('Version {} already exists in storage.'.format(version_uid)) data_bytes = data.encode('utf-8') size = len(data_bytes) <DeepExtract> if self._active_transforms is not None: transforms_metadata = [] for transform in self._active_transforms: (data_encapsulated, materials) = transform.encapsulate(data=data_bytes) if data_encapsulated: transforms_metadata.append({'name': transform.name, 'module': transform.module, 'materials': materials}) data_bytes = data_encapsulated (data_bytes, transforms_metadata) = (data_bytes, transforms_metadata) else: (data_bytes, transforms_metadata) = (data_bytes, []) </DeepExtract> <DeepExtract> timestamp = datetime.datetime.utcnow().isoformat(timespec='microseconds') + 'Z' metadata: Dict = {self._CREATED_KEY: timestamp, self._METADATA_VERSION_KEY: str(VERSIONS.object_metadata.current), self._MODIFIED_KEY: timestamp, self._OBJECT_SIZE_KEY: len(data_bytes), self._SIZE_KEY: size} if checksum: metadata[self._CHECKSUM_KEY] = checksum if transforms_metadata: metadata[self._TRANSFORMS_KEY] = transforms_metadata if self._dict_hmac: self._dict_hmac.add_digest(metadata) (metadata, metadata_json) = (metadata, json.dumps(metadata, separators=(',', ':')).encode('utf-8')) </DeepExtract> try: <DeepExtract> raise NotImplementedError </DeepExtract> <DeepExtract> raise NotImplementedError </DeepExtract> except: try: <DeepExtract> raise NotImplementedError </DeepExtract> <DeepExtract> raise NotImplementedError </DeepExtract> except FileNotFoundError: pass raise if self._consistency_check_writes: <DeepExtract> data_actual = self._read_object(key) metadata_actual_json = self._read_object(metadata_key) self._decode_metadata(metadata_json=metadata_actual_json, key=key, data_length=len(data_actual)) if data_bytes != data_actual: raise ValueError('Written and read data of {} differ.'.format(key)) </DeepExtract>
def write_version(self, version_uid: VersionUid, data: str, overwrite: Optional[bool]=False) -> None: key = version_uid.storage_object_to_path() metadata_key = key + self._META_SUFFIX if not overwrite: try: raise NotImplementedError except FileNotFoundError: pass else: raise FileExistsError('Version {} already exists in storage.'.format(version_uid)) data_bytes = data.encode('utf-8') size = len(data_bytes) if self._active_transforms is not None: transforms_metadata = [] for transform in self._active_transforms: (data_encapsulated, materials) = transform.encapsulate(data=data_bytes) if data_encapsulated: transforms_metadata.append({'name': transform.name, 'module': transform.module, 'materials': materials}) data_bytes = data_encapsulated (data_bytes, transforms_metadata) = (data_bytes, transforms_metadata) else: (data_bytes, transforms_metadata) = (data_bytes, []) timestamp = datetime.datetime.utcnow().isoformat(timespec='microseconds') + 'Z' metadata: Dict = {self._CREATED_KEY: timestamp, self._METADATA_VERSION_KEY: str(VERSIONS.object_metadata.current), self._MODIFIED_KEY: timestamp, self._OBJECT_SIZE_KEY: len(data_bytes), self._SIZE_KEY: size} if checksum: metadata[self._CHECKSUM_KEY] = checksum if transforms_metadata: metadata[self._TRANSFORMS_KEY] = transforms_metadata if self._dict_hmac: self._dict_hmac.add_digest(metadata) (metadata, metadata_json) = (metadata, json.dumps(metadata, separators=(',', ':')).encode('utf-8')) try: raise NotImplementedError raise NotImplementedError except: try: raise NotImplementedError raise NotImplementedError except FileNotFoundError: pass raise if self._consistency_check_writes: data_actual = self._read_object(key) metadata_actual_json = self._read_object(metadata_key) self._decode_metadata(metadata_json=metadata_actual_json, key=key, data_length=len(data_actual)) if data_bytes != data_actual: raise ValueError('Written and read data of {} differ.'.format(key)) </DeepExtract>
benji
positive
def convert_examples_to_features(examples, seq_length, tokenizer): """Loads a data file into a list of `InputBatch`s.""" features = [] for (ex_index, example) in enumerate(examples): tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) if tokens_b: <DeepExtract> while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= seq_length - 3: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() </DeepExtract> elif len(tokens_a) > seq_length - 2: tokens_a = tokens_a[0:seq_length - 2] tokens = [] input_type_ids = [] tokens.append('[CLS]') input_type_ids.append(0) for token in tokens_a: tokens.append(token) input_type_ids.append(0) tokens.append('[SEP]') input_type_ids.append(0) if tokens_b: for token in tokens_b: tokens.append(token) input_type_ids.append(1) tokens.append('[SEP]') input_type_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1] * len(input_ids) while len(input_ids) < seq_length: input_ids.append(0) input_mask.append(0) input_type_ids.append(0) assert len(input_ids) == seq_length assert len(input_mask) == seq_length assert len(input_type_ids) == seq_length if ex_index < 5: tf.logging.info('*** Example ***') tf.logging.info('unique_id: %s' % example.unique_id) tf.logging.info('tokens: %s' % ' '.join([tokenization.printable_text(x) for x in tokens])) tf.logging.info('input_ids: %s' % ' '.join([str(x) for x in input_ids])) tf.logging.info('input_mask: %s' % ' '.join([str(x) for x in input_mask])) tf.logging.info('input_type_ids: %s' % ' '.join([str(x) for x in input_type_ids])) features.append(InputFeatures(unique_id=example.unique_id, tokens=tokens, input_ids=input_ids, input_mask=input_mask, input_type_ids=input_type_ids)) return features
def convert_examples_to_features(examples, seq_length, tokenizer): """Loads a data file into a list of `InputBatch`s.""" features = [] for (ex_index, example) in enumerate(examples): tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) if tokens_b: while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= seq_length - 3: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() elif len(tokens_a) > seq_length - 2: tokens_a = tokens_a[0:seq_length - 2] tokens = [] input_type_ids = [] tokens.append('[CLS]') input_type_ids.append(0) for token in tokens_a: tokens.append(token) input_type_ids.append(0) tokens.append('[SEP]') input_type_ids.append(0) if tokens_b: for token in tokens_b: tokens.append(token) input_type_ids.append(1) tokens.append('[SEP]') input_type_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1] * len(input_ids) while len(input_ids) < seq_length: input_ids.append(0) input_mask.append(0) input_type_ids.append(0) assert len(input_ids) == seq_length assert len(input_mask) == seq_length assert len(input_type_ids) == seq_length if ex_index < 5: tf.logging.info('*** Example ***') tf.logging.info('unique_id: %s' % example.unique_id) tf.logging.info('tokens: %s' % ' '.join([tokenization.printable_text(x) for x in tokens])) tf.logging.info('input_ids: %s' % ' '.join([str(x) for x in input_ids])) tf.logging.info('input_mask: %s' % ' '.join([str(x) for x in input_mask])) tf.logging.info('input_type_ids: %s' % ' '.join([str(x) for x in input_type_ids])) features.append(InputFeatures(unique_id=example.unique_id, tokens=tokens, input_ids=input_ids, input_mask=input_mask, input_type_ids=input_type_ids)) return features
bert-for-task
positive
def save(self): saved = False message = '' if self.changes and (self.annotation or self.corrections) and self.config.currentFile and self.image: if self.annotation: self.annotation.imgWidth = self.image.width() self.annotation.imgHeight = self.image.height() filename = self.config.currentLabelFile if not filename: <DeepExtract> if not self.config.cityName: filename = '' if not self.config.labelPath: filename = '' if not self.config.currentFile: filename = '' if not self.isLabelPathValid(self.config.labelPath): filename = '' if not os.path.isdir(self.config.labelPath): filename = '' labelDir = self.config.labelPath if self.config.gtType: ext = self.gtExt.format('_' + self.config.gtType) else: ext = self.gtExt.format('') filename = os.path.basename(self.config.currentFile) filename = filename.replace(self.imageExt, ext) filename = os.path.join(labelDir, filename) filename = os.path.normpath(filename) filename = filename </DeepExtract> if filename: proceed = True if os.path.isfile(filename) and self.config.showSaveWarning: msgBox = QtGui.QMessageBox(self) msgBox.setWindowTitle('Overwriting') msgBox.setText('Saving overwrites the original file and it cannot be reversed. Do you want to continue?') msgBox.addButton(QtGui.QMessageBox.Cancel) okAndNeverAgainButton = msgBox.addButton('OK and never ask again', QtGui.QMessageBox.AcceptRole) okButton = msgBox.addButton(QtGui.QMessageBox.Ok) msgBox.setDefaultButton(QtGui.QMessageBox.Ok) msgBox.setIcon(QtGui.QMessageBox.Warning) msgBox.exec_() if msgBox.clickedButton() == okButton: pass elif msgBox.clickedButton() == okAndNeverAgainButton: self.config.showSaveWarning = False else: message += 'Nothing saved, no harm has been done. ' proceed = False if proceed: try: self.annotation.toJsonFile(filename) saved = True message += 'Saved labels to {0} '.format(filename) except IOError as e: message += 'Error writing labels to {0}. Message: {1} '.format(filename, e.strerror) else: message += 'Error writing labels. Cannot generate a valid filename. ' if self.corrections or self.config.currentCorrectionFile: filename = self.config.currentCorrectionFile if not filename: <DeepExtract> if not self.config.correctionPath: filename = '' if not self.config.currentFile: filename = '' correctionDir = self.config.correctionPath if not os.path.isdir(correctionDir): if True: os.makedirs(correctionDir) if not os.path.isdir(correctionDir): filename = '' else: filename = '' filename = os.path.basename(self.config.currentFile) filename = filename.replace(self.imageExt, '.xml') filename = os.path.join(correctionDir, filename) filename = os.path.normpath(filename) filename = filename </DeepExtract> if filename: root = ET.Element('correction') root.text = '\n' root.tail = '\n' filenameNode = ET.SubElement(root, 'filename') filenameNode.text = os.path.basename(self.config.currentFile) filenameNode.tail = '\n' folderNode = ET.SubElement(root, 'folder') folderNode.text = 'StereoDataset/' + self.config.cityName folderNode.tail = '\n' sourceNode = ET.SubElement(root, 'source') sourceNode.text = '\n' sourceNode.tail = '\n' sourceImageNode = ET.SubElement(sourceNode, 'sourceImage') sourceImageNode.text = 'Label Cities' sourceImageNode.tail = '\n' sourceAnnotationNode = ET.SubElement(sourceNode, 'sourceAnnotation') sourceAnnotationNode.text = 'mcLabelTool' sourceAnnotationNode.tail = '\n' imagesizeNode = ET.SubElement(root, 'imagesize') imagesizeNode.text = '\n' imagesizeNode.tail = '\n' nrowsNode = ET.SubElement(imagesizeNode, 'nrows') nrowsNode.text = str(self.image.height()) nrowsNode.tail = '\n' ncolsNode = ET.SubElement(imagesizeNode, 'ncols') ncolsNode.text = str(self.image.height()) ncolsNode.tail = '\n' for correction in self.corrections: correction.appendToXMLNode(root) self.correctionXML = ET.ElementTree(root) try: self.correctionXML.write(filename) saved = True message += 'Saved corrections to {0} '.format(filename) except IOError as e: message += 'Error writing corrections to {0}. Message: {1} '.format(filename, e.strerror) else: message += 'Error writing corrections. Cannot generate a valid filename. ' if saved: <DeepExtract> self.changes = [] self.changedLayer = [] self.changedPolygon = [] for act in self.actChanges: act.setEnabled(False) </DeepExtract> else: message += 'Nothing to save ' saved = True self.statusBar().showMessage(message) return saved
def save(self): saved = False message = '' if self.changes and (self.annotation or self.corrections) and self.config.currentFile and self.image: if self.annotation: self.annotation.imgWidth = self.image.width() self.annotation.imgHeight = self.image.height() filename = self.config.currentLabelFile if not filename: if not self.config.cityName: filename = '' if not self.config.labelPath: filename = '' if not self.config.currentFile: filename = '' if not self.isLabelPathValid(self.config.labelPath): filename = '' if not os.path.isdir(self.config.labelPath): filename = '' labelDir = self.config.labelPath if self.config.gtType: ext = self.gtExt.format('_' + self.config.gtType) else: ext = self.gtExt.format('') filename = os.path.basename(self.config.currentFile) filename = filename.replace(self.imageExt, ext) filename = os.path.join(labelDir, filename) filename = os.path.normpath(filename) filename = filename if filename: proceed = True if os.path.isfile(filename) and self.config.showSaveWarning: msgBox = QtGui.QMessageBox(self) msgBox.setWindowTitle('Overwriting') msgBox.setText('Saving overwrites the original file and it cannot be reversed. Do you want to continue?') msgBox.addButton(QtGui.QMessageBox.Cancel) okAndNeverAgainButton = msgBox.addButton('OK and never ask again', QtGui.QMessageBox.AcceptRole) okButton = msgBox.addButton(QtGui.QMessageBox.Ok) msgBox.setDefaultButton(QtGui.QMessageBox.Ok) msgBox.setIcon(QtGui.QMessageBox.Warning) msgBox.exec_() if msgBox.clickedButton() == okButton: pass elif msgBox.clickedButton() == okAndNeverAgainButton: self.config.showSaveWarning = False else: message += 'Nothing saved, no harm has been done. ' proceed = False if proceed: try: self.annotation.toJsonFile(filename) saved = True message += 'Saved labels to {0} '.format(filename) except IOError as e: message += 'Error writing labels to {0}. Message: {1} '.format(filename, e.strerror) else: message += 'Error writing labels. Cannot generate a valid filename. ' if self.corrections or self.config.currentCorrectionFile: filename = self.config.currentCorrectionFile if not filename: if not self.config.correctionPath: filename = '' if not self.config.currentFile: filename = '' correctionDir = self.config.correctionPath if not os.path.isdir(correctionDir): if True: os.makedirs(correctionDir) if not os.path.isdir(correctionDir): filename = '' else: filename = '' filename = os.path.basename(self.config.currentFile) filename = filename.replace(self.imageExt, '.xml') filename = os.path.join(correctionDir, filename) filename = os.path.normpath(filename) filename = filename if filename: root = ET.Element('correction') root.text = '\n' root.tail = '\n' filenameNode = ET.SubElement(root, 'filename') filenameNode.text = os.path.basename(self.config.currentFile) filenameNode.tail = '\n' folderNode = ET.SubElement(root, 'folder') folderNode.text = 'StereoDataset/' + self.config.cityName folderNode.tail = '\n' sourceNode = ET.SubElement(root, 'source') sourceNode.text = '\n' sourceNode.tail = '\n' sourceImageNode = ET.SubElement(sourceNode, 'sourceImage') sourceImageNode.text = 'Label Cities' sourceImageNode.tail = '\n' sourceAnnotationNode = ET.SubElement(sourceNode, 'sourceAnnotation') sourceAnnotationNode.text = 'mcLabelTool' sourceAnnotationNode.tail = '\n' imagesizeNode = ET.SubElement(root, 'imagesize') imagesizeNode.text = '\n' imagesizeNode.tail = '\n' nrowsNode = ET.SubElement(imagesizeNode, 'nrows') nrowsNode.text = str(self.image.height()) nrowsNode.tail = '\n' ncolsNode = ET.SubElement(imagesizeNode, 'ncols') ncolsNode.text = str(self.image.height()) ncolsNode.tail = '\n' for correction in self.corrections: correction.appendToXMLNode(root) self.correctionXML = ET.ElementTree(root) try: self.correctionXML.write(filename) saved = True message += 'Saved corrections to {0} '.format(filename) except IOError as e: message += 'Error writing corrections to {0}. Message: {1} '.format(filename, e.strerror) else: message += 'Error writing corrections. Cannot generate a valid filename. ' if saved: self.changes = [] self.changedLayer = [] self.changedPolygon = [] for act in self.actChanges: act.setEnabled(False) else: message += 'Nothing to save ' saved = True self.statusBar().showMessage(message) return saved
bisenetv2-tensorflow
positive
@pyqtSlot(bool) def toggle_toolbox_visibility(self, checked): toolbox_action_item = self.sender() tab_toolbox = toolbox_action_item.tab_toolbox if checked: <DeepExtract> dock_widget = tab_toolbox.dock_widget if tab_toolbox.get_display_state() == TabToolbox.DS_TAB: self.tab_widget.removeTab(self.tab_widget.indexOf(tab_toolbox)) if tab_toolbox.get_display_state() != TabToolbox.DS_TOOLBOX: self.addDockWidget(tab_toolbox.preferred_dock_area(), dock_widget) dock_widget.setWidget(tab_toolbox) dock_widget.show() tab_toolbox.tab_action_item.setChecked(False) tab_toolbox.toolbox_action_item.setChecked(True) tab_toolbox.set_display_state(TabToolbox.DS_TOOLBOX) </DeepExtract> else: <DeepExtract> dock_widget = tab_toolbox.dock_widget if tab_toolbox.get_display_state() == TabToolbox.DS_TAB: self.tab_widget.removeTab(self.tab_widget.indexOf(tab_toolbox)) elif tab_toolbox.get_display_state() == TabToolbox.DS_TOOLBOX: self.removeDockWidget(dock_widget) dock_widget.hide() tab_toolbox.toolbox_action_item.setChecked(False) tab_toolbox.tab_action_item.setChecked(False) tab_toolbox.toolbox_action_item.setChecked(False) tab_toolbox.set_display_state(TabToolbox.DS_HIDDEN) </DeepExtract>
@pyqtSlot(bool) def toggle_toolbox_visibility(self, checked): toolbox_action_item = self.sender() tab_toolbox = toolbox_action_item.tab_toolbox if checked: dock_widget = tab_toolbox.dock_widget if tab_toolbox.get_display_state() == TabToolbox.DS_TAB: self.tab_widget.removeTab(self.tab_widget.indexOf(tab_toolbox)) if tab_toolbox.get_display_state() != TabToolbox.DS_TOOLBOX: self.addDockWidget(tab_toolbox.preferred_dock_area(), dock_widget) dock_widget.setWidget(tab_toolbox) dock_widget.show() tab_toolbox.tab_action_item.setChecked(False) tab_toolbox.toolbox_action_item.setChecked(True) tab_toolbox.set_display_state(TabToolbox.DS_TOOLBOX) else: dock_widget = tab_toolbox.dock_widget if tab_toolbox.get_display_state() == TabToolbox.DS_TAB: self.tab_widget.removeTab(self.tab_widget.indexOf(tab_toolbox)) elif tab_toolbox.get_display_state() == TabToolbox.DS_TOOLBOX: self.removeDockWidget(dock_widget) dock_widget.hide() tab_toolbox.toolbox_action_item.setChecked(False) tab_toolbox.tab_action_item.setChecked(False) tab_toolbox.toolbox_action_item.setChecked(False) tab_toolbox.set_display_state(TabToolbox.DS_HIDDEN) </DeepExtract>
crazyflie-clients-python
positive
def createOrganizationPolicyObjectsGroup(apiKey, organizationId, body=None): url = '/organizations/' + str(organizationId) + '/policyObjects/groups' <DeepExtract> if p_retry > API_MAX_RETRIES: if FLAG_REQUEST_VERBOSE: print('ERROR: Reached max retries') (success, errors, headers, response) = (False, None, None, None) bearerString = 'Bearer ' + str(apiKey) headers = {'Authorization': bearerString} if not p_additionalHeaders is None: headers.update(p_additionalHeaders) query = '' if not p_queryItems is None: qArrayFix = {} for item in p_queryItems: if isinstance(p_queryItems[item], list): qArrayFix['%s[]' % item] = p_queryItems[item] else: qArrayFix[item] = p_queryItems[item] query = '?' + urlencode(qArrayFix, True) url = API_BASE_URL + url + query verb = 'post'.upper() session = NoRebuildAuthSession() verbs = {'DELETE': {'function': session.delete, 'hasBody': False}, 'GET': {'function': session.get, 'hasBody': False}, 'POST': {'function': session.post, 'hasBody': True}, 'PUT': {'function': session.put, 'hasBody': True}} try: if FLAG_REQUEST_VERBOSE: print(verb, url) if verb in verbs: if verbs[verb]['hasBody'] and (not body is None): r = verbs[verb]['function'](url, headers=headers, json=body, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)) else: r = verbs[verb]['function'](url, headers=headers, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)) else: (success, errors, headers, response) = (False, None, None, None) except: (success, errors, headers, response) = (False, None, None, None) if FLAG_REQUEST_VERBOSE: print(r.status_code) success = r.status_code in range(200, 299) errors = None responseHeaders = None responseBody = None if r.status_code == API_STATUS_RATE_LIMIT: retryInterval = API_RETRY_DEFAULT_WAIT if 'Retry-After' in r.headers: retryInterval = r.headers['Retry-After'] if 'retry-after' in r.headers: retryInterval = r.headers['retry-after'] if FLAG_REQUEST_VERBOSE: print('INFO: Hit max request rate. Retrying %s after %s seconds' % (p_retry + 1, retryInterval)) time.sleep(int(retryInterval)) (success, errors, responseHeaders, responseBody) = merakiRequest(apiKey, 'post', url, p_additionalHeaders, p_queryItems, body, FLAG_REQUEST_VERBOSE, p_retry + 1) (success, errors, headers, response) = (success, errors, responseHeaders, responseBody) try: rjson = r.json() except: rjson = None if not rjson is None: if 'errors' in rjson: errors = rjson['errors'] if FLAG_REQUEST_VERBOSE: print(errors) else: responseBody = rjson if 'Link' in r.headers: parsedLinks = utils.parse_header_links(r.headers['Link']) for link in parsedLinks: if link['rel'] == 'next': if FLAG_REQUEST_VERBOSE: print('Next page:', link['url']) splitLink = link['url'].split('/api/v1') (success, errors, responseHeaders, nextBody) = merakiRequest(apiKey, 'post', splitLink[1], p_additionalHeaders=p_additionalHeaders, p_requestBody=body, p_verbose=FLAG_REQUEST_VERBOSE) if success: if not responseBody is None: responseBody = responseBody + nextBody else: responseBody = None (success, errors, headers, response) = (success, errors, responseHeaders, responseBody) </DeepExtract> return (success, errors, response)
def createOrganizationPolicyObjectsGroup(apiKey, organizationId, body=None): url = '/organizations/' + str(organizationId) + '/policyObjects/groups' if p_retry > API_MAX_RETRIES: if FLAG_REQUEST_VERBOSE: print('ERROR: Reached max retries') (success, errors, headers, response) = (False, None, None, None) bearerString = 'Bearer ' + str(apiKey) headers = {'Authorization': bearerString} if not p_additionalHeaders is None: headers.update(p_additionalHeaders) query = '' if not p_queryItems is None: qArrayFix = {} for item in p_queryItems: if isinstance(p_queryItems[item], list): qArrayFix['%s[]' % item] = p_queryItems[item] else: qArrayFix[item] = p_queryItems[item] query = '?' + urlencode(qArrayFix, True) url = API_BASE_URL + url + query verb = 'post'.upper() session = NoRebuildAuthSession() verbs = {'DELETE': {'function': session.delete, 'hasBody': False}, 'GET': {'function': session.get, 'hasBody': False}, 'POST': {'function': session.post, 'hasBody': True}, 'PUT': {'function': session.put, 'hasBody': True}} try: if FLAG_REQUEST_VERBOSE: print(verb, url) if verb in verbs: if verbs[verb]['hasBody'] and (not body is None): r = verbs[verb]['function'](url, headers=headers, json=body, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)) else: r = verbs[verb]['function'](url, headers=headers, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)) else: (success, errors, headers, response) = (False, None, None, None) except: (success, errors, headers, response) = (False, None, None, None) if FLAG_REQUEST_VERBOSE: print(r.status_code) success = r.status_code in range(200, 299) errors = None responseHeaders = None responseBody = None if r.status_code == API_STATUS_RATE_LIMIT: retryInterval = API_RETRY_DEFAULT_WAIT if 'Retry-After' in r.headers: retryInterval = r.headers['Retry-After'] if 'retry-after' in r.headers: retryInterval = r.headers['retry-after'] if FLAG_REQUEST_VERBOSE: print('INFO: Hit max request rate. Retrying %s after %s seconds' % (p_retry + 1, retryInterval)) time.sleep(int(retryInterval)) (success, errors, responseHeaders, responseBody) = merakiRequest(apiKey, 'post', url, p_additionalHeaders, p_queryItems, body, FLAG_REQUEST_VERBOSE, p_retry + 1) (success, errors, headers, response) = (success, errors, responseHeaders, responseBody) try: rjson = r.json() except: rjson = None if not rjson is None: if 'errors' in rjson: errors = rjson['errors'] if FLAG_REQUEST_VERBOSE: print(errors) else: responseBody = rjson if 'Link' in r.headers: parsedLinks = utils.parse_header_links(r.headers['Link']) for link in parsedLinks: if link['rel'] == 'next': if FLAG_REQUEST_VERBOSE: print('Next page:', link['url']) splitLink = link['url'].split('/api/v1') (success, errors, responseHeaders, nextBody) = merakiRequest(apiKey, 'post', splitLink[1], p_additionalHeaders=p_additionalHeaders, p_requestBody=body, p_verbose=FLAG_REQUEST_VERBOSE) if success: if not responseBody is None: responseBody = responseBody + nextBody else: responseBody = None (success, errors, headers, response) = (success, errors, responseHeaders, responseBody) return (success, errors, response)
automation-scripts
positive
def redistribute(blocks): n = len(blocks) <DeepExtract> i = 0 n = len(blocks) largest = blocks[0] for x in range(1, n): if blocks[x] > largest: largest = blocks[x] i = x largest = i </DeepExtract> amount = blocks[largest] blocks[largest] = 0 for x in range(largest + 1, largest + 1 + amount): blocks[x % n] += 1
def redistribute(blocks): n = len(blocks) i = 0 n = len(blocks) largest = blocks[0] for x in range(1, n): if blocks[x] > largest: largest = blocks[x] i = x largest = i amount = blocks[largest] blocks[largest] = 0 for x in range(largest + 1, largest + 1 + amount): blocks[x % n] += 1
advent_of_code_2017
positive
def get_coding_scheme_warnings(self, silent=False): """Check if the coding scheme has detected any errors. Meaningless for default coding scheme (0) """ err = self.read_reg(self.REGS.EFUSE_REG_DEC_STATUS) & self.REGS.EFUSE_REG_DEC_STATUS_MASK for block in self.blocks: if block.id != 0: block.num_errors = 0 block.fail = err != 0 if not silent and block.fail: print('Error(s) in BLOCK%d [ERRORS:%d FAIL:%d]' % (block.id, block.num_errors, block.fail)) if (self.debug or err) and (not silent): <DeepExtract> print('') print('{:27} 0x{:08x}'.format('EFUSE_REG_DEC_STATUS', self.read_reg(self.REGS.EFUSE_REG_DEC_STATUS))) </DeepExtract> return err != 0
def get_coding_scheme_warnings(self, silent=False): """Check if the coding scheme has detected any errors. Meaningless for default coding scheme (0) """ err = self.read_reg(self.REGS.EFUSE_REG_DEC_STATUS) & self.REGS.EFUSE_REG_DEC_STATUS_MASK for block in self.blocks: if block.id != 0: block.num_errors = 0 block.fail = err != 0 if not silent and block.fail: print('Error(s) in BLOCK%d [ERRORS:%d FAIL:%d]' % (block.id, block.num_errors, block.fail)) if (self.debug or err) and (not silent): print('') print('{:27} 0x{:08x}'.format('EFUSE_REG_DEC_STATUS', self.read_reg(self.REGS.EFUSE_REG_DEC_STATUS))) return err != 0
esptool
positive
def AddSDOServerToCurrent(self): if self.CurrentNode.IsEntry(4608): indexlist = [self.GetLineFromIndex(4609)] if None not in indexlist: <DeepExtract> disable_buffer = node != None if node == None: node = self.CurrentNode for index in indexlist: infos = self.GetEntryInfos(index) if infos['struct'] & OD_MultipleSubindexes: if infos['struct'] & OD_IdenticalSubindexes: subentry_infos = self.GetSubentryInfos(index, 1) if 'default' in subentry_infos: default = subentry_infos['default'] else: default = self.GetTypeDefaultValue(subentry_infos['type']) node.AddEntry(index, value=[]) if 'nbmin' in subentry_infos: for i in xrange(subentry_infos['nbmin']): node.AddEntry(index, i + 1, default) else: node.AddEntry(index, 1, default) else: i = 1 subentry_infos = self.GetSubentryInfos(index, i) while subentry_infos: if 'default' in subentry_infos: default = subentry_infos['default'] else: default = self.GetTypeDefaultValue(subentry_infos['type']) node.AddEntry(index, i, default) i += 1 subentry_infos = self.GetSubentryInfos(index, i) else: subentry_infos = self.GetSubentryInfos(index, 0) if 'default' in subentry_infos: default = subentry_infos['default'] else: default = self.GetTypeDefaultValue(subentry_infos['type']) node.AddEntry(index, 0, default) for index in []: self.RemoveCurrentVariable(index) if not disable_buffer: self.BufferCurrentNode() return None </DeepExtract> else: <DeepExtract> disable_buffer = node != None if node == None: node = self.CurrentNode for index in [4608]: infos = self.GetEntryInfos(index) if infos['struct'] & OD_MultipleSubindexes: if infos['struct'] & OD_IdenticalSubindexes: subentry_infos = self.GetSubentryInfos(index, 1) if 'default' in subentry_infos: default = subentry_infos['default'] else: default = self.GetTypeDefaultValue(subentry_infos['type']) node.AddEntry(index, value=[]) if 'nbmin' in subentry_infos: for i in xrange(subentry_infos['nbmin']): node.AddEntry(index, i + 1, default) else: node.AddEntry(index, 1, default) else: i = 1 subentry_infos = self.GetSubentryInfos(index, i) while subentry_infos: if 'default' in subentry_infos: default = subentry_infos['default'] else: default = self.GetTypeDefaultValue(subentry_infos['type']) node.AddEntry(index, i, default) i += 1 subentry_infos = self.GetSubentryInfos(index, i) else: subentry_infos = self.GetSubentryInfos(index, 0) if 'default' in subentry_infos: default = subentry_infos['default'] else: default = self.GetTypeDefaultValue(subentry_infos['type']) node.AddEntry(index, 0, default) for index in []: self.RemoveCurrentVariable(index) if not disable_buffer: self.BufferCurrentNode() return None </DeepExtract>
def AddSDOServerToCurrent(self): if self.CurrentNode.IsEntry(4608): indexlist = [self.GetLineFromIndex(4609)] if None not in indexlist: disable_buffer = node != None if node == None: node = self.CurrentNode for index in indexlist: infos = self.GetEntryInfos(index) if infos['struct'] & OD_MultipleSubindexes: if infos['struct'] & OD_IdenticalSubindexes: subentry_infos = self.GetSubentryInfos(index, 1) if 'default' in subentry_infos: default = subentry_infos['default'] else: default = self.GetTypeDefaultValue(subentry_infos['type']) node.AddEntry(index, value=[]) if 'nbmin' in subentry_infos: for i in xrange(subentry_infos['nbmin']): node.AddEntry(index, i + 1, default) else: node.AddEntry(index, 1, default) else: i = 1 subentry_infos = self.GetSubentryInfos(index, i) while subentry_infos: if 'default' in subentry_infos: default = subentry_infos['default'] else: default = self.GetTypeDefaultValue(subentry_infos['type']) node.AddEntry(index, i, default) i += 1 subentry_infos = self.GetSubentryInfos(index, i) else: subentry_infos = self.GetSubentryInfos(index, 0) if 'default' in subentry_infos: default = subentry_infos['default'] else: default = self.GetTypeDefaultValue(subentry_infos['type']) node.AddEntry(index, 0, default) for index in []: self.RemoveCurrentVariable(index) if not disable_buffer: self.BufferCurrentNode() return None else: disable_buffer = node != None if node == None: node = self.CurrentNode for index in [4608]: infos = self.GetEntryInfos(index) if infos['struct'] & OD_MultipleSubindexes: if infos['struct'] & OD_IdenticalSubindexes: subentry_infos = self.GetSubentryInfos(index, 1) if 'default' in subentry_infos: default = subentry_infos['default'] else: default = self.GetTypeDefaultValue(subentry_infos['type']) node.AddEntry(index, value=[]) if 'nbmin' in subentry_infos: for i in xrange(subentry_infos['nbmin']): node.AddEntry(index, i + 1, default) else: node.AddEntry(index, 1, default) else: i = 1 subentry_infos = self.GetSubentryInfos(index, i) while subentry_infos: if 'default' in subentry_infos: default = subentry_infos['default'] else: default = self.GetTypeDefaultValue(subentry_infos['type']) node.AddEntry(index, i, default) i += 1 subentry_infos = self.GetSubentryInfos(index, i) else: subentry_infos = self.GetSubentryInfos(index, 0) if 'default' in subentry_infos: default = subentry_infos['default'] else: default = self.GetTypeDefaultValue(subentry_infos['type']) node.AddEntry(index, 0, default) for index in []: self.RemoveCurrentVariable(index) if not disable_buffer: self.BufferCurrentNode() return None </DeepExtract>
CANFestivino
positive
def process_feature(self, feature): """Write a InputFeature to the TFRecordWriter as a tf.train.Example.""" self.num_features += 1 def create_int_feature(values): feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return feature features = collections.OrderedDict() <DeepExtract> feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list([feature.unique_id]))) features['unique_ids'] = feature </DeepExtract> <DeepExtract> feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(feature.input_ids))) features['input_ids'] = feature </DeepExtract> <DeepExtract> feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(feature.input_mask))) features['input_mask'] = feature </DeepExtract> <DeepExtract> feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(feature.segment_ids))) features['segment_ids'] = feature </DeepExtract> if self.is_training: <DeepExtract> feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list([feature.start_position]))) features['start_positions'] = feature </DeepExtract> <DeepExtract> feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list([feature.end_position]))) features['end_positions'] = feature </DeepExtract> impossible = 0 if feature.is_impossible: impossible = 1 <DeepExtract> feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list([impossible]))) features['is_impossible'] = feature </DeepExtract> tf_example = tf.train.Example(features=tf.train.Features(feature=features)) self._writer.write(tf_example.SerializeToString())
def process_feature(self, feature): """Write a InputFeature to the TFRecordWriter as a tf.train.Example.""" self.num_features += 1 def create_int_feature(values): feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return feature features = collections.OrderedDict() feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list([feature.unique_id]))) features['unique_ids'] = feature feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(feature.input_ids))) features['input_ids'] = feature feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(feature.input_mask))) features['input_mask'] = feature feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(feature.segment_ids))) features['segment_ids'] = feature if self.is_training: feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list([feature.start_position]))) features['start_positions'] = feature feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list([feature.end_position]))) features['end_positions'] = feature impossible = 0 if feature.is_impossible: impossible = 1 feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list([impossible]))) features['is_impossible'] = feature tf_example = tf.train.Example(features=tf.train.Features(feature=features)) self._writer.write(tf_example.SerializeToString())
DistillBERT
positive
def voc_eval_rel(detpath, annopath, imagesetfile, classname, cachedir, ovthresh=0.5, use_07_metric=False): """rec, prec, ap = voc_eval(detpath, annopath, imagesetfile, classname, [ovthresh], [use_07_metric]) Top level function that does the PASCAL VOC evaluation. detpath: Path to detections detpath.format(classname) should produce the detection results file. annopath: Path to annotations annopath.format(imagename) should be the xml annotations file. imagesetfile: Text file containing the list of images, one image per line. classname: Category name (duh) cachedir: Directory for caching the annotations [ovthresh]: Overlap threshold (default = 0.5) [use_07_metric]: Whether to use VOC07's 11 point AP computation (default False) """ if not os.path.isdir(cachedir): os.mkdir(cachedir) cachefile = os.path.join(cachedir, '%s_annots.pkl' % imagesetfile) with open(imagesetfile, 'r') as f: lines = f.readlines() imagenames = [x.strip() for x in lines] if not os.path.isfile(cachefile): recs = {} for (i, imagename) in enumerate(imagenames): <DeepExtract> tree = ET.parse(annopath.format(imagename)) objects = [] for obj in tree.findall('object'): obj_struct = {} obj_struct['name'] = obj.find('name').text obj_struct['pose'] = obj.find('pose').text obj_struct['truncated'] = int(obj.find('truncated').text) obj_struct['difficult'] = int(obj.find('difficult').text) bbox = obj.find('bndbox') obj_struct['bbox'] = [int(bbox.find('xmin').text), int(bbox.find('ymin').text), int(bbox.find('xmax').text), int(bbox.find('ymax').text)] objects.append(obj_struct) recs[imagename] = objects </DeepExtract> if i % 100 == 0: print('Reading annotation for {:d}/{:d}'.format(i + 1, len(imagenames))) print('Saving cached annotations to {:s}'.format(cachefile)) with open(cachefile, 'wb') as f: pickle.dump(recs, f) else: print(cachefile) with open(cachefile, 'rb') as f: try: recs = pickle.load(f) except: recs = pickle.load(f, encoding='bytes') class_recs = {} npos = 0 for imagename in imagenames: R = [obj for obj in recs[imagename] if obj['name'] == classname] bbox = np.array([x['bbox'] for x in R]) difficult = np.array([x['difficult'] for x in R]).astype(np.bool) det = [False] * len(R) npos = npos + sum(~difficult) class_recs[imagename] = {'bbox': bbox, 'difficult': difficult, 'det': det} detfile = detpath.format(classname) with open(detfile, 'r') as f: lines = f.readlines() splitlines = [x.strip().split(' ') for x in lines] image_ids = [x[0] for x in splitlines] confidence = np.array([float(x[1]) for x in splitlines]) BB = np.array([[float(z) for z in x[2:]] for x in splitlines]) print('BB', BB) print('confidence', confidence) nd = len(image_ids) tp = np.zeros(nd) fp = np.zeros(nd) if BB.shape[0] > 0: sorted_ind = np.argsort(-confidence) sorted_scores = np.sort(-confidence) BB = BB[sorted_ind, :] image_ids = [image_ids[x] for x in sorted_ind] for d in range(nd): R = class_recs[image_ids[d]] bb = BB[d, :].astype(float) ovmax = -np.inf BBGT = R['bbox'].astype(float) if BBGT.size > 0: ixmin = np.maximum(BBGT[:, 0], bb[0]) iymin = np.maximum(BBGT[:, 1], bb[1]) ixmax = np.minimum(BBGT[:, 2], bb[2]) iymax = np.minimum(BBGT[:, 3], bb[3]) iw = np.maximum(ixmax - ixmin + 1.0, 0.0) ih = np.maximum(iymax - iymin + 1.0, 0.0) inters = iw * ih uni = (bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0) + (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0) - inters overlaps = inters / uni ovmax = np.max(overlaps) jmax = np.argmax(overlaps) if ovmax > ovthresh: if not R['difficult'][jmax]: if not R['det'][jmax]: tp[d] = 1.0 R['det'][jmax] = 1 else: fp[d] = 1.0 else: fp[d] = 1.0 fp = np.cumsum(fp) tp = np.cumsum(tp) rec = tp / float(npos) prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps) <DeepExtract> if use_07_metric: ap = 0.0 for t in np.arange(0.0, 1.1, 0.1): if np.sum(rec >= t) == 0: p = 0 else: p = np.max(prec[rec >= t]) ap = ap + p / 11.0 else: mrec = np.concatenate(([0.0], rec, [1.0])) mpre = np.concatenate(([0.0], prec, [0.0])) for i in range(mpre.size - 1, 0, -1): mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) i = np.where(mrec[1:] != mrec[:-1])[0] ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) ap = ap </DeepExtract> return (rec, prec, ap)
def voc_eval_rel(detpath, annopath, imagesetfile, classname, cachedir, ovthresh=0.5, use_07_metric=False): """rec, prec, ap = voc_eval(detpath, annopath, imagesetfile, classname, [ovthresh], [use_07_metric]) Top level function that does the PASCAL VOC evaluation. detpath: Path to detections detpath.format(classname) should produce the detection results file. annopath: Path to annotations annopath.format(imagename) should be the xml annotations file. imagesetfile: Text file containing the list of images, one image per line. classname: Category name (duh) cachedir: Directory for caching the annotations [ovthresh]: Overlap threshold (default = 0.5) [use_07_metric]: Whether to use VOC07's 11 point AP computation (default False) """ if not os.path.isdir(cachedir): os.mkdir(cachedir) cachefile = os.path.join(cachedir, '%s_annots.pkl' % imagesetfile) with open(imagesetfile, 'r') as f: lines = f.readlines() imagenames = [x.strip() for x in lines] if not os.path.isfile(cachefile): recs = {} for (i, imagename) in enumerate(imagenames): tree = ET.parse(annopath.format(imagename)) objects = [] for obj in tree.findall('object'): obj_struct = {} obj_struct['name'] = obj.find('name').text obj_struct['pose'] = obj.find('pose').text obj_struct['truncated'] = int(obj.find('truncated').text) obj_struct['difficult'] = int(obj.find('difficult').text) bbox = obj.find('bndbox') obj_struct['bbox'] = [int(bbox.find('xmin').text), int(bbox.find('ymin').text), int(bbox.find('xmax').text), int(bbox.find('ymax').text)] objects.append(obj_struct) recs[imagename] = objects if i % 100 == 0: print('Reading annotation for {:d}/{:d}'.format(i + 1, len(imagenames))) print('Saving cached annotations to {:s}'.format(cachefile)) with open(cachefile, 'wb') as f: pickle.dump(recs, f) else: print(cachefile) with open(cachefile, 'rb') as f: try: recs = pickle.load(f) except: recs = pickle.load(f, encoding='bytes') class_recs = {} npos = 0 for imagename in imagenames: R = [obj for obj in recs[imagename] if obj['name'] == classname] bbox = np.array([x['bbox'] for x in R]) difficult = np.array([x['difficult'] for x in R]).astype(np.bool) det = [False] * len(R) npos = npos + sum(~difficult) class_recs[imagename] = {'bbox': bbox, 'difficult': difficult, 'det': det} detfile = detpath.format(classname) with open(detfile, 'r') as f: lines = f.readlines() splitlines = [x.strip().split(' ') for x in lines] image_ids = [x[0] for x in splitlines] confidence = np.array([float(x[1]) for x in splitlines]) BB = np.array([[float(z) for z in x[2:]] for x in splitlines]) print('BB', BB) print('confidence', confidence) nd = len(image_ids) tp = np.zeros(nd) fp = np.zeros(nd) if BB.shape[0] > 0: sorted_ind = np.argsort(-confidence) sorted_scores = np.sort(-confidence) BB = BB[sorted_ind, :] image_ids = [image_ids[x] for x in sorted_ind] for d in range(nd): R = class_recs[image_ids[d]] bb = BB[d, :].astype(float) ovmax = -np.inf BBGT = R['bbox'].astype(float) if BBGT.size > 0: ixmin = np.maximum(BBGT[:, 0], bb[0]) iymin = np.maximum(BBGT[:, 1], bb[1]) ixmax = np.minimum(BBGT[:, 2], bb[2]) iymax = np.minimum(BBGT[:, 3], bb[3]) iw = np.maximum(ixmax - ixmin + 1.0, 0.0) ih = np.maximum(iymax - iymin + 1.0, 0.0) inters = iw * ih uni = (bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0) + (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0) - inters overlaps = inters / uni ovmax = np.max(overlaps) jmax = np.argmax(overlaps) if ovmax > ovthresh: if not R['difficult'][jmax]: if not R['det'][jmax]: tp[d] = 1.0 R['det'][jmax] = 1 else: fp[d] = 1.0 else: fp[d] = 1.0 fp = np.cumsum(fp) tp = np.cumsum(tp) rec = tp / float(npos) prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps) if use_07_metric: ap = 0.0 for t in np.arange(0.0, 1.1, 0.1): if np.sum(rec >= t) == 0: p = 0 else: p = np.max(prec[rec >= t]) ap = ap + p / 11.0 else: mrec = np.concatenate(([0.0], rec, [1.0])) mpre = np.concatenate(([0.0], prec, [0.0])) for i in range(mpre.size - 1, 0, -1): mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) i = np.where(mrec[1:] != mrec[:-1])[0] ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) ap = ap return (rec, prec, ap)
DatasetCulling
positive
def on_new_topfolder(self, widget, data=None): dlg = Gtk.FileChooserDialog(_('Create New Folder'), self.ui) dlg.set_action(Gtk.FileChooserAction.CREATE_FOLDER) dlg.set_local_only(True) dlg.add_buttons(_('Use Default'), Gtk.ResponseType.NONE, Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_OK, Gtk.ResponseType.OK) response = dlg.run() if response == Gtk.ResponseType.OK: path = dlg.get_filename() <DeepExtract> self.app.monitor.suspend() theModel = self.treeView.get_model() newFolder = model.Folder(os.path.basename(path), path=path) newIter = theModel.append_item(newFolder, None) newFolder.persist() self.app.monitor.unsuspend() self.treeView.expand_to_path(theModel.get_path(newIter)) self.treeView.get_selection().unselect_all() self.treeView.get_selection().select_iter(newIter) self.on_tree_selection_changed(self.treeView) </DeepExtract> self.app.monitor.add_watch(path) dlg.destroy() self.app.config_altered(True) elif response == Gtk.ResponseType.NONE: dlg.destroy() <DeepExtract> dlg = RenameDialog(self.ui, 'New %s' % 'Folder', True, _('Create New %s') % 'Folder') dlg.set_image(Gtk.STOCK_NEW) if dlg.run() == 1: newText = dlg.get_name() if validate(not EMPTY_FIELD_REGEX.match(newText), _("The name can't be empty"), None, self.ui): dlg.destroy() name = newText else: dlg.destroy() name = None dlg.destroy() name = None </DeepExtract> <DeepExtract> self.app.monitor.suspend() theModel = self.treeView.get_model() newFolder = model.Folder(name, path=path) newIter = theModel.append_item(newFolder, None) newFolder.persist() self.app.monitor.unsuspend() self.treeView.expand_to_path(theModel.get_path(newIter)) self.treeView.get_selection().unselect_all() self.treeView.get_selection().select_iter(newIter) self.on_tree_selection_changed(self.treeView) </DeepExtract> self.app.config_altered(True) else: dlg.destroy()
def on_new_topfolder(self, widget, data=None): dlg = Gtk.FileChooserDialog(_('Create New Folder'), self.ui) dlg.set_action(Gtk.FileChooserAction.CREATE_FOLDER) dlg.set_local_only(True) dlg.add_buttons(_('Use Default'), Gtk.ResponseType.NONE, Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_OK, Gtk.ResponseType.OK) response = dlg.run() if response == Gtk.ResponseType.OK: path = dlg.get_filename() self.app.monitor.suspend() theModel = self.treeView.get_model() newFolder = model.Folder(os.path.basename(path), path=path) newIter = theModel.append_item(newFolder, None) newFolder.persist() self.app.monitor.unsuspend() self.treeView.expand_to_path(theModel.get_path(newIter)) self.treeView.get_selection().unselect_all() self.treeView.get_selection().select_iter(newIter) self.on_tree_selection_changed(self.treeView) self.app.monitor.add_watch(path) dlg.destroy() self.app.config_altered(True) elif response == Gtk.ResponseType.NONE: dlg.destroy() dlg = RenameDialog(self.ui, 'New %s' % 'Folder', True, _('Create New %s') % 'Folder') dlg.set_image(Gtk.STOCK_NEW) if dlg.run() == 1: newText = dlg.get_name() if validate(not EMPTY_FIELD_REGEX.match(newText), _("The name can't be empty"), None, self.ui): dlg.destroy() name = newText else: dlg.destroy() name = None dlg.destroy() name = None self.app.monitor.suspend() theModel = self.treeView.get_model() newFolder = model.Folder(name, path=path) newIter = theModel.append_item(newFolder, None) newFolder.persist() self.app.monitor.unsuspend() self.treeView.expand_to_path(theModel.get_path(newIter)) self.treeView.get_selection().unselect_all() self.treeView.get_selection().select_iter(newIter) self.on_tree_selection_changed(self.treeView) self.app.config_altered(True) else: dlg.destroy()
autokey-python2
positive
def test_required_scope_not_in_response_by_default(self): self.access_token.scope = 'scope2' self.access_token.save() <DeepExtract> auth = 'Bearer {0}'.format(self.access_token.token) </DeepExtract> response = self.client.get('/oauth2-scoped-test/', HTTP_AUTHORIZATION=auth) self.assertEqual(response.status_code, 403) self.assertNotIn('required_scopes', response.data)
def test_required_scope_not_in_response_by_default(self): self.access_token.scope = 'scope2' self.access_token.save() auth = 'Bearer {0}'.format(self.access_token.token) response = self.client.get('/oauth2-scoped-test/', HTTP_AUTHORIZATION=auth) self.assertEqual(response.status_code, 403) self.assertNotIn('required_scopes', response.data)
django-oauth-toolkit
positive
def __init__(self, dataset): <DeepExtract> user_id_type = 'int' if dataset == G1_DATASET else 'bytes' session_features_config = {'single_features': {'user_id': {'type': 'categorical', 'dtype': user_id_type}, 'session_id': {'type': 'categorical', 'dtype': 'int'}, 'session_start': {'type': 'categorical', 'dtype': 'int'}, 'session_size': {'type': 'categorical', 'dtype': 'int'}}, 'sequence_features': {'event_timestamp': {'type': 'categorical', 'dtype': 'int'}, 'item_clicked': {'type': 'categorical', 'dtype': 'int'}}} features_config = session_features_config </DeepExtract> <DeepExtract> with tf.device('/cpu:0'): self.files_placeholder = tf.placeholder(tf.string) ds = make_dataset(self.files_placeholder, features_config, batch_size=1, truncate_sequence_length=truncate_session_length) iterator = tf.data.Iterator.from_structure(ds.output_types, ds.output_shapes) self.next_element_op = iterator.get_next() self.iterator_init_op = iterator.make_initializer(ds) </DeepExtract>
def __init__(self, dataset): user_id_type = 'int' if dataset == G1_DATASET else 'bytes' session_features_config = {'single_features': {'user_id': {'type': 'categorical', 'dtype': user_id_type}, 'session_id': {'type': 'categorical', 'dtype': 'int'}, 'session_start': {'type': 'categorical', 'dtype': 'int'}, 'session_size': {'type': 'categorical', 'dtype': 'int'}}, 'sequence_features': {'event_timestamp': {'type': 'categorical', 'dtype': 'int'}, 'item_clicked': {'type': 'categorical', 'dtype': 'int'}}} features_config = session_features_config with tf.device('/cpu:0'): self.files_placeholder = tf.placeholder(tf.string) ds = make_dataset(self.files_placeholder, features_config, batch_size=1, truncate_sequence_length=truncate_session_length) iterator = tf.data.Iterator.from_structure(ds.output_types, ds.output_shapes) self.next_element_op = iterator.get_next() self.iterator_init_op = iterator.make_initializer(ds) </DeepExtract>
chameleon_recsys
positive
def _draw_face(self, draw, face, scale_x, scale_y): <DeepExtract> (x, y, w, h) = face.bounding_box (x, y, width, height) = (x * scale_x, y * scale_y, w * scale_x, h * scale_y) </DeepExtract> text = 'Joy: %.2f' % face.joy_score (_, text_height) = self._font.getsize(text) margin = 3 bottom = y + height text_bottom = bottom + margin + text_height + margin <DeepExtract> assert 3 % 2 == 1 for i in range(-3 // 2, 3 // 2 + 1): draw.rectangle((x + i, y + i, x + width - i, bottom - i), fill=fill, outline='white') </DeepExtract> <DeepExtract> assert 3 % 2 == 1 for i in range(-3 // 2, 3 // 2 + 1): draw.rectangle((x + i, bottom + i, x + width - i, text_bottom - i), fill='white', outline='white') </DeepExtract> draw.text((x + 1 + margin, y + height + 1 + margin), text, font=self._font, fill='black')
def _draw_face(self, draw, face, scale_x, scale_y): (x, y, w, h) = face.bounding_box (x, y, width, height) = (x * scale_x, y * scale_y, w * scale_x, h * scale_y) text = 'Joy: %.2f' % face.joy_score (_, text_height) = self._font.getsize(text) margin = 3 bottom = y + height text_bottom = bottom + margin + text_height + margin assert 3 % 2 == 1 for i in range(-3 // 2, 3 // 2 + 1): draw.rectangle((x + i, y + i, x + width - i, bottom - i), fill=fill, outline='white') assert 3 % 2 == 1 for i in range(-3 // 2, 3 // 2 + 1): draw.rectangle((x + i, bottom + i, x + width - i, text_bottom - i), fill='white', outline='white') draw.text((x + 1 + margin, y + height + 1 + margin), text, font=self._font, fill='black')
aiyprojects-raspbian
positive
def __create_jump_execution_path(cur_block, tracker, path): exit_instruction = cur_block.get_exit_bytecode() cur_id = cur_block.get_id() try: (suc_id, push_id, jump_address) = self.resolver.resolve_jump_target(exit_instruction) except JumpAddressError: return suc_block = self.basic_blocks[suc_id] self.__graph.add_block(suc_block) self.__graph.add_edge(cur_id, suc_id, cur_id != push_id) <DeepExtract> if push_id == cur_id: return if cur_id not in self.ambiguous_blocks: self.ambiguous_blocks[cur_id] = dict() self.ambiguous_blocks[cur_id][push_id] = suc_id </DeepExtract> <DeepExtract> if self.__image_trackers.mark_observed_image(suc_id, tracker): return if len(path + [cur_id]) > self.__max_path_len: return if suc_id in self._poison_ids: self.__end_paths.append(path + [cur_id]) return cur_block = self.basic_blocks[suc_id] tracker = Interpreter._resolve_data_dependency(cur_block, tracker) cpy_tracker = tracker.make_copy() exit_instruction = cur_block.get_exit_bytecode() opcode = exit_instruction.opcode if opcode in jump_ops: self.__create_jump_execution_path(cur_block, tracker, path + [cur_id]) if opcode not in exit_ops and opcode not in {'JUMP', 'ASSERT'}: self.__create_natural_execution_path(cur_block, cpy_tracker, path + [cur_id]) </DeepExtract>
def __create_jump_execution_path(cur_block, tracker, path): exit_instruction = cur_block.get_exit_bytecode() cur_id = cur_block.get_id() try: (suc_id, push_id, jump_address) = self.resolver.resolve_jump_target(exit_instruction) except JumpAddressError: return suc_block = self.basic_blocks[suc_id] self.__graph.add_block(suc_block) self.__graph.add_edge(cur_id, suc_id, cur_id != push_id) if push_id == cur_id: return if cur_id not in self.ambiguous_blocks: self.ambiguous_blocks[cur_id] = dict() self.ambiguous_blocks[cur_id][push_id] = suc_id if self.__image_trackers.mark_observed_image(suc_id, tracker): return if len(path + [cur_id]) > self.__max_path_len: return if suc_id in self._poison_ids: self.__end_paths.append(path + [cur_id]) return cur_block = self.basic_blocks[suc_id] tracker = Interpreter._resolve_data_dependency(cur_block, tracker) cpy_tracker = tracker.make_copy() exit_instruction = cur_block.get_exit_bytecode() opcode = exit_instruction.opcode if opcode in jump_ops: self.__create_jump_execution_path(cur_block, tracker, path + [cur_id]) if opcode not in exit_ops and opcode not in {'JUMP', 'ASSERT'}: self.__create_natural_execution_path(cur_block, cpy_tracker, path + [cur_id]) </DeepExtract>
erays
positive
def test_fc_scene(self): """Test for a Fractional Cover scene.""" fc_ds = LandsatDataset(os.path.join(self.INPUT_DIR, self.FC_DIR, self.FC_SCENE)) mdd = fc_ds.metadata_dict <DeepExtract> out_file_path = os.path.join(self.OUTPUT_DIR, 'fc_metadata.txt') out_file = open(out_file_path, 'w') for k in self.SMALL_METADATA_KEYS: val = mdd[k] if k == 'pq_tests_run' and val is not None: val = '{:016b}'.format(val) (print >> out_file, '%s: %s' % (k, val)) out_file.close() </DeepExtract> <DeepExtract> out_file_path = os.path.join(self.OUTPUT_DIR, 'fc_xml.xml') out_file = open(out_file_path, 'w') out_file.write(mdd['xml_text']) out_file.close() </DeepExtract> self.assertIsNone(mdd['mtl_text']) <DeepExtract> output_path = os.path.join(self.OUTPUT_DIR, 'fc_metadata.txt') expected_path = os.path.join(self.EXPECTED_DIR, 'fc_metadata.txt') if not os.path.isfile(expected_path): self.skipTest("Expected file '%s' not found." % 'fc_metadata.txt') else: try: subprocess.check_output(['diff', output_path, expected_path]) except subprocess.CalledProcessError as err: self.fail("File '%s' not as expected:\n" % 'fc_metadata.txt' + err.output) </DeepExtract> <DeepExtract> output_path = os.path.join(self.OUTPUT_DIR, 'fc_xml.xml') expected_path = os.path.join(self.EXPECTED_DIR, 'fc_xml.xml') if not os.path.isfile(expected_path): self.skipTest("Expected file '%s' not found." % 'fc_xml.xml') else: try: subprocess.check_output(['diff', output_path, expected_path]) except subprocess.CalledProcessError as err: self.fail("File '%s' not as expected:\n" % 'fc_xml.xml' + err.output) </DeepExtract>
def test_fc_scene(self): """Test for a Fractional Cover scene.""" fc_ds = LandsatDataset(os.path.join(self.INPUT_DIR, self.FC_DIR, self.FC_SCENE)) mdd = fc_ds.metadata_dict out_file_path = os.path.join(self.OUTPUT_DIR, 'fc_metadata.txt') out_file = open(out_file_path, 'w') for k in self.SMALL_METADATA_KEYS: val = mdd[k] if k == 'pq_tests_run' and val is not None: val = '{:016b}'.format(val) (print >> out_file, '%s: %s' % (k, val)) out_file.close() out_file_path = os.path.join(self.OUTPUT_DIR, 'fc_xml.xml') out_file = open(out_file_path, 'w') out_file.write(mdd['xml_text']) out_file.close() self.assertIsNone(mdd['mtl_text']) output_path = os.path.join(self.OUTPUT_DIR, 'fc_metadata.txt') expected_path = os.path.join(self.EXPECTED_DIR, 'fc_metadata.txt') if not os.path.isfile(expected_path): self.skipTest("Expected file '%s' not found." % 'fc_metadata.txt') else: try: subprocess.check_output(['diff', output_path, expected_path]) except subprocess.CalledProcessError as err: self.fail("File '%s' not as expected:\n" % 'fc_metadata.txt' + err.output) output_path = os.path.join(self.OUTPUT_DIR, 'fc_xml.xml') expected_path = os.path.join(self.EXPECTED_DIR, 'fc_xml.xml') if not os.path.isfile(expected_path): self.skipTest("Expected file '%s' not found." % 'fc_xml.xml') else: try: subprocess.check_output(['diff', output_path, expected_path]) except subprocess.CalledProcessError as err: self.fail("File '%s' not as expected:\n" % 'fc_xml.xml' + err.output) </DeepExtract>
agdc
positive
@pytest.mark.parametrize('loss', loss_properties) def test_sisdr_and_mse_shape_checks(loss): (pairwise, singlesrc, multisrc, arbitrary_last_dim) = loss <DeepExtract> def _test(shape): pairwise(torch.randn((3, 1000)), torch.randn((3, 1000))) batch_size = 5 _test((batch_size, *(3, 1000))) if not no_batch_ok: with pytest.raises(TypeError): _test((3, 1000)) if arbitrary_last_dim: _test((batch_size, *(3, 1000), 4)) _test((batch_size, *(3, 1000), 4, 5)) else: for dim in range(len((3, 1000))): with pytest.raises(TypeError): _test((batch_size, *(3, 1000)[:dim], 1, *(3, 1000)[dim:])) </DeepExtract> <DeepExtract> def _test(shape): singlesrc(torch.randn((1000,)), torch.randn((1000,))) batch_size = 5 _test((batch_size, *(1000,))) if not no_batch_ok: with pytest.raises(TypeError): _test((1000,)) if arbitrary_last_dim: _test((batch_size, *(1000,), 4)) _test((batch_size, *(1000,), 4, 5)) else: for dim in range(len((1000,))): with pytest.raises(TypeError): _test((batch_size, *(1000,)[:dim], 1, *(1000,)[dim:])) </DeepExtract> no_batch_ok = multisrc == mse.multisrc_mse <DeepExtract> def _test(shape): multisrc(torch.randn((3, 1000)), torch.randn((3, 1000))) batch_size = 5 _test((batch_size, *(3, 1000))) if not no_batch_ok: with pytest.raises(TypeError): _test((3, 1000)) if arbitrary_last_dim: _test((batch_size, *(3, 1000), 4)) _test((batch_size, *(3, 1000), 4, 5)) else: for dim in range(len((3, 1000))): with pytest.raises(TypeError): _test((batch_size, *(3, 1000)[:dim], 1, *(3, 1000)[dim:])) </DeepExtract>
@pytest.mark.parametrize('loss', loss_properties) def test_sisdr_and_mse_shape_checks(loss): (pairwise, singlesrc, multisrc, arbitrary_last_dim) = loss def _test(shape): pairwise(torch.randn((3, 1000)), torch.randn((3, 1000))) batch_size = 5 _test((batch_size, *(3, 1000))) if not no_batch_ok: with pytest.raises(TypeError): _test((3, 1000)) if arbitrary_last_dim: _test((batch_size, *(3, 1000), 4)) _test((batch_size, *(3, 1000), 4, 5)) else: for dim in range(len((3, 1000))): with pytest.raises(TypeError): _test((batch_size, *(3, 1000)[:dim], 1, *(3, 1000)[dim:])) def _test(shape): singlesrc(torch.randn((1000,)), torch.randn((1000,))) batch_size = 5 _test((batch_size, *(1000,))) if not no_batch_ok: with pytest.raises(TypeError): _test((1000,)) if arbitrary_last_dim: _test((batch_size, *(1000,), 4)) _test((batch_size, *(1000,), 4, 5)) else: for dim in range(len((1000,))): with pytest.raises(TypeError): _test((batch_size, *(1000,)[:dim], 1, *(1000,)[dim:])) no_batch_ok = multisrc == mse.multisrc_mse def _test(shape): multisrc(torch.randn((3, 1000)), torch.randn((3, 1000))) batch_size = 5 _test((batch_size, *(3, 1000))) if not no_batch_ok: with pytest.raises(TypeError): _test((3, 1000)) if arbitrary_last_dim: _test((batch_size, *(3, 1000), 4)) _test((batch_size, *(3, 1000), 4, 5)) else: for dim in range(len((3, 1000))): with pytest.raises(TypeError): _test((batch_size, *(3, 1000)[:dim], 1, *(3, 1000)[dim:])) </DeepExtract>
asteroid
positive
def __init__(self, oracle_object=None, adata=None, obsm_key=None, pseudotime_key='Pseudotime', cell_idx_use=None, name=None, gt=None): """ Estimate the direction of differentiation by calculation gradient of pseudotime on the embedding space. Please look at web tutorial for example scripts. Args: adata (anndata): scRNA-seq data in anndata class obsm_key (str): Name of dimensional reduction. You can check the list of dimensional reduction data name with "adata.obsm.keys()" pseudotime_key (str): Pseudotime data should be stored in adata.obs[pseudotime_key]. Please set the name of pseudotime data in adata.obs cluster_column_name (str): If you set cluster_column_name and cluster, you can subset cells to calculate gradient. Please look at web tutorial for example codes. cluster (str): See above. """ self.cell_idx_use = None self.n_neighbors = None self.min_mass = None self.smooth = None self.n_grid = None if oracle_object is not None: <DeepExtract> self.load_adata(adata=oracle_object.adata, obsm_key=oracle_object.embedding_name, cell_idx_use=cell_idx_use, name=name, pseudotime_key=pseudotime_key) </DeepExtract> elif adata is not None: <DeepExtract> self.name = name self.embedding = adata.obsm[obsm_key].copy() self.pseudotime = adata.obs[pseudotime_key].values.copy() if cell_idx_use is not None: self.cell_idx_use = np.array(cell_idx_use) </DeepExtract> elif gt is not None: self.embedding = gt.embedding.copy() self.mass_filter = gt.mass_filter_whole.copy() self.mass_filter_whole = gt.mass_filter_whole.copy() self.gridpoints_coordinates = gt.gridpoints_coordinates.copy() self.n_neighbors = gt.n_neighbors self.min_mass = gt.min_mass self.smooth = gt.smooth self.n_grid = gt.n_grid
def __init__(self, oracle_object=None, adata=None, obsm_key=None, pseudotime_key='Pseudotime', cell_idx_use=None, name=None, gt=None): """ Estimate the direction of differentiation by calculation gradient of pseudotime on the embedding space. Please look at web tutorial for example scripts. Args: adata (anndata): scRNA-seq data in anndata class obsm_key (str): Name of dimensional reduction. You can check the list of dimensional reduction data name with "adata.obsm.keys()" pseudotime_key (str): Pseudotime data should be stored in adata.obs[pseudotime_key]. Please set the name of pseudotime data in adata.obs cluster_column_name (str): If you set cluster_column_name and cluster, you can subset cells to calculate gradient. Please look at web tutorial for example codes. cluster (str): See above. """ self.cell_idx_use = None self.n_neighbors = None self.min_mass = None self.smooth = None self.n_grid = None if oracle_object is not None: self.load_adata(adata=oracle_object.adata, obsm_key=oracle_object.embedding_name, cell_idx_use=cell_idx_use, name=name, pseudotime_key=pseudotime_key) elif adata is not None: self.name = name self.embedding = adata.obsm[obsm_key].copy() self.pseudotime = adata.obs[pseudotime_key].values.copy() if cell_idx_use is not None: self.cell_idx_use = np.array(cell_idx_use) elif gt is not None: self.embedding = gt.embedding.copy() self.mass_filter = gt.mass_filter_whole.copy() self.mass_filter_whole = gt.mass_filter_whole.copy() self.gridpoints_coordinates = gt.gridpoints_coordinates.copy() self.n_neighbors = gt.n_neighbors self.min_mass = gt.min_mass self.smooth = gt.smooth self.n_grid = gt.n_grid
CellOracle
positive
def test_provides_the_number_of_pages_of_a_topic(self): def get_rendered(topic): t = Template(self.loadstatement + '{% topic_pages_inline_list topic %}') c = Context({'topic': topic}) rendered = t.render(c) return rendered for i in range(0, 35): PostFactory.create(topic=self.forum_1_topic, poster=self.u1) expected_out_small = render_to_string('machina/forum_conversation/topic_pages_inline_list.html', {'topic': self.forum_1_topic, 'first_pages': [1, 2, 3]}) for i in range(0, 120): PostFactory.create(topic=self.forum_2_topic, poster=self.u1) expected_out_huge = render_to_string('machina/forum_conversation/topic_pages_inline_list.html', {'topic': self.forum_2_topic, 'first_pages': [1, 2, 3, 4], 'last_page': 9}) for i in range(0, 2 * machina_settings.TOPIC_POSTS_NUMBER_PER_PAGE - 1): PostFactory.create(topic=self.forum_3_topic, poster=self.u1) expected_out_multiple = render_to_string('machina/forum_conversation/topic_pages_inline_list.html', {'topic': self.forum_3_topic, 'first_pages': [1, 2]}) <DeepExtract> request = self.get_request() request.user = user t = Template(self.loadstatement + '{% if post|posted_by:request.user %}OWNER{% else %}NO_OWNER{% endif %}') c = Context({'post': self.forum_1_topic, 'request': request}) rendered = t.render(c) rendered_small = rendered </DeepExtract> <DeepExtract> request = self.get_request() request.user = user t = Template(self.loadstatement + '{% if post|posted_by:request.user %}OWNER{% else %}NO_OWNER{% endif %}') c = Context({'post': self.forum_2_topic, 'request': request}) rendered = t.render(c) rendered_huge = rendered </DeepExtract> <DeepExtract> request = self.get_request() request.user = user t = Template(self.loadstatement + '{% if post|posted_by:request.user %}OWNER{% else %}NO_OWNER{% endif %}') c = Context({'post': self.forum_3_topic, 'request': request}) rendered = t.render(c) rendered_multiple = rendered </DeepExtract> assert rendered_small == expected_out_small assert rendered_huge == expected_out_huge assert rendered_multiple == expected_out_multiple
def test_provides_the_number_of_pages_of_a_topic(self): def get_rendered(topic): t = Template(self.loadstatement + '{% topic_pages_inline_list topic %}') c = Context({'topic': topic}) rendered = t.render(c) return rendered for i in range(0, 35): PostFactory.create(topic=self.forum_1_topic, poster=self.u1) expected_out_small = render_to_string('machina/forum_conversation/topic_pages_inline_list.html', {'topic': self.forum_1_topic, 'first_pages': [1, 2, 3]}) for i in range(0, 120): PostFactory.create(topic=self.forum_2_topic, poster=self.u1) expected_out_huge = render_to_string('machina/forum_conversation/topic_pages_inline_list.html', {'topic': self.forum_2_topic, 'first_pages': [1, 2, 3, 4], 'last_page': 9}) for i in range(0, 2 * machina_settings.TOPIC_POSTS_NUMBER_PER_PAGE - 1): PostFactory.create(topic=self.forum_3_topic, poster=self.u1) expected_out_multiple = render_to_string('machina/forum_conversation/topic_pages_inline_list.html', {'topic': self.forum_3_topic, 'first_pages': [1, 2]}) request = self.get_request() request.user = user t = Template(self.loadstatement + '{% if post|posted_by:request.user %}OWNER{% else %}NO_OWNER{% endif %}') c = Context({'post': self.forum_1_topic, 'request': request}) rendered = t.render(c) rendered_small = rendered request = self.get_request() request.user = user t = Template(self.loadstatement + '{% if post|posted_by:request.user %}OWNER{% else %}NO_OWNER{% endif %}') c = Context({'post': self.forum_2_topic, 'request': request}) rendered = t.render(c) rendered_huge = rendered request = self.get_request() request.user = user t = Template(self.loadstatement + '{% if post|posted_by:request.user %}OWNER{% else %}NO_OWNER{% endif %}') c = Context({'post': self.forum_3_topic, 'request': request}) rendered = t.render(c) rendered_multiple = rendered assert rendered_small == expected_out_small assert rendered_huge == expected_out_huge assert rendered_multiple == expected_out_multiple
django-machina
positive
def get_split(split_name, dataset_dir, file_pattern=None, reader=None): """Gets a dataset tuple with instructions for reading ImageNet. Args: split_name: A train/test split name. dataset_dir: The base directory of the dataset sources. file_pattern: The file pattern to use when matching the dataset sources. It is assumed that the pattern contains a '%s' string so that the split name can be inserted. reader: The TensorFlow reader type. Returns: A `Dataset` namedtuple. Raises: ValueError: if `split_name` is not a valid train/test split. """ if split_name not in _SPLITS_TO_SIZES: raise ValueError('split name %s was not recognized.' % split_name) if not file_pattern: file_pattern = _FILE_PATTERN file_pattern = os.path.join(dataset_dir, file_pattern % split_name) if reader is None: reader = tf.TFRecordReader keys_to_features = {'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''), 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'), 'image/class/label': tf.FixedLenFeature([], dtype=tf.int64, default_value=-1), 'image/class/text': tf.FixedLenFeature([], dtype=tf.string, default_value=''), 'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32), 'image/object/class/label': tf.VarLenFeature(dtype=tf.int64)} items_to_handlers = {'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'), 'label': slim.tfexample_decoder.Tensor('image/class/label'), 'label_text': slim.tfexample_decoder.Tensor('image/class/text'), 'object/bbox': slim.tfexample_decoder.BoundingBox(['ymin', 'xmin', 'ymax', 'xmax'], 'image/object/bbox/'), 'object/label': slim.tfexample_decoder.Tensor('image/object/class/label')} decoder = slim.tfexample_decoder.TFExampleDecoder(keys_to_features, items_to_handlers) labels_to_names = None if LOAD_READABLE_NAMES: if dataset_utils.has_labels(dataset_dir): labels_to_names = dataset_utils.read_label_file(dataset_dir) else: <DeepExtract> base_url = 'https://raw.githubusercontent.com/tensorflow/models/master/research/inception/inception/data/' synset_url = '{}/imagenet_lsvrc_2015_synsets.txt'.format(base_url) synset_to_human_url = '{}/imagenet_metadata.txt'.format(base_url) (filename, _) = urllib.request.urlretrieve(synset_url) synset_list = [s.strip() for s in open(filename).readlines()] num_synsets_in_ilsvrc = len(synset_list) assert num_synsets_in_ilsvrc == 1000 (filename, _) = urllib.request.urlretrieve(synset_to_human_url) synset_to_human_list = open(filename).readlines() num_synsets_in_all_imagenet = len(synset_to_human_list) assert num_synsets_in_all_imagenet == 21842 synset_to_human = {} for s in synset_to_human_list: parts = s.strip().split('\t') assert len(parts) == 2 synset = parts[0] human = parts[1] synset_to_human[synset] = human label_index = 1 labels_to_names = {0: 'background'} for synset in synset_list: name = synset_to_human[synset] labels_to_names[label_index] = name label_index += 1 labels_to_names = labels_to_names </DeepExtract> dataset_utils.write_label_file(labels_to_names, dataset_dir) return slim.dataset.Dataset(data_sources=file_pattern, reader=reader, decoder=decoder, num_samples=_SPLITS_TO_SIZES[split_name], items_to_descriptions=_ITEMS_TO_DESCRIPTIONS, num_classes=_NUM_CLASSES, labels_to_names=labels_to_names)
def get_split(split_name, dataset_dir, file_pattern=None, reader=None): """Gets a dataset tuple with instructions for reading ImageNet. Args: split_name: A train/test split name. dataset_dir: The base directory of the dataset sources. file_pattern: The file pattern to use when matching the dataset sources. It is assumed that the pattern contains a '%s' string so that the split name can be inserted. reader: The TensorFlow reader type. Returns: A `Dataset` namedtuple. Raises: ValueError: if `split_name` is not a valid train/test split. """ if split_name not in _SPLITS_TO_SIZES: raise ValueError('split name %s was not recognized.' % split_name) if not file_pattern: file_pattern = _FILE_PATTERN file_pattern = os.path.join(dataset_dir, file_pattern % split_name) if reader is None: reader = tf.TFRecordReader keys_to_features = {'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''), 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'), 'image/class/label': tf.FixedLenFeature([], dtype=tf.int64, default_value=-1), 'image/class/text': tf.FixedLenFeature([], dtype=tf.string, default_value=''), 'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32), 'image/object/class/label': tf.VarLenFeature(dtype=tf.int64)} items_to_handlers = {'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'), 'label': slim.tfexample_decoder.Tensor('image/class/label'), 'label_text': slim.tfexample_decoder.Tensor('image/class/text'), 'object/bbox': slim.tfexample_decoder.BoundingBox(['ymin', 'xmin', 'ymax', 'xmax'], 'image/object/bbox/'), 'object/label': slim.tfexample_decoder.Tensor('image/object/class/label')} decoder = slim.tfexample_decoder.TFExampleDecoder(keys_to_features, items_to_handlers) labels_to_names = None if LOAD_READABLE_NAMES: if dataset_utils.has_labels(dataset_dir): labels_to_names = dataset_utils.read_label_file(dataset_dir) else: base_url = 'https://raw.githubusercontent.com/tensorflow/models/master/research/inception/inception/data/' synset_url = '{}/imagenet_lsvrc_2015_synsets.txt'.format(base_url) synset_to_human_url = '{}/imagenet_metadata.txt'.format(base_url) (filename, _) = urllib.request.urlretrieve(synset_url) synset_list = [s.strip() for s in open(filename).readlines()] num_synsets_in_ilsvrc = len(synset_list) assert num_synsets_in_ilsvrc == 1000 (filename, _) = urllib.request.urlretrieve(synset_to_human_url) synset_to_human_list = open(filename).readlines() num_synsets_in_all_imagenet = len(synset_to_human_list) assert num_synsets_in_all_imagenet == 21842 synset_to_human = {} for s in synset_to_human_list: parts = s.strip().split('\t') assert len(parts) == 2 synset = parts[0] human = parts[1] synset_to_human[synset] = human label_index = 1 labels_to_names = {0: 'background'} for synset in synset_list: name = synset_to_human[synset] labels_to_names[label_index] = name label_index += 1 labels_to_names = labels_to_names dataset_utils.write_label_file(labels_to_names, dataset_dir) return slim.dataset.Dataset(data_sources=file_pattern, reader=reader, decoder=decoder, num_samples=_SPLITS_TO_SIZES[split_name], items_to_descriptions=_ITEMS_TO_DESCRIPTIONS, num_classes=_NUM_CLASSES, labels_to_names=labels_to_names)
CBAM-tensorflow-slim
positive
def flatten_action_id_to_actions(action_id_to_actions, global_action_id_to_primitive_action, num_primitive_actions): """Converts the values in an action_id_to_actions dictionary back to the primitive actions they represent""" flattened_action_id_to_actions = {} for key in action_id_to_actions.keys(): actions = action_id_to_actions[key] <DeepExtract> print('Recursing to backtrack on ', actions) primitive_actions = range(num_primitive_actions) if all((action in primitive_actions for action in actions)): raw_actions = actions new_action_tuple = [] for action in actions: if action in primitive_actions: new_action_tuple.append(action) else: converted_action = global_action_id_to_primitive_action[action] print(new_action_tuple) new_action_tuple.extend(converted_action) print('Should have changed: ', new_action_tuple) new_action_tuple = tuple(new_action_tuple) raw_actions = backtrack_action_to_primitive_actions(new_action_tuple) </DeepExtract> flattened_action_id_to_actions[key] = raw_actions return flattened_action_id_to_actions
def flatten_action_id_to_actions(action_id_to_actions, global_action_id_to_primitive_action, num_primitive_actions): """Converts the values in an action_id_to_actions dictionary back to the primitive actions they represent""" flattened_action_id_to_actions = {} for key in action_id_to_actions.keys(): actions = action_id_to_actions[key] print('Recursing to backtrack on ', actions) primitive_actions = range(num_primitive_actions) if all((action in primitive_actions for action in actions)): raw_actions = actions new_action_tuple = [] for action in actions: if action in primitive_actions: new_action_tuple.append(action) else: converted_action = global_action_id_to_primitive_action[action] print(new_action_tuple) new_action_tuple.extend(converted_action) print('Should have changed: ', new_action_tuple) new_action_tuple = tuple(new_action_tuple) raw_actions = backtrack_action_to_primitive_actions(new_action_tuple) flattened_action_id_to_actions[key] = raw_actions return flattened_action_id_to_actions
Deep-Reinforcement-Learning-Algorithms-with-PyTorch
positive
def sub_part(sliceno, opts): a = Automata(g.server_url, verbose=True) pid = os.getpid() def verify(want): timeout = 0 got = None for _ in range(25): status_stacks = a._server_idle(timeout)[2] for line in status_stacks: if line[0] == pid and line[1] < 0: got = line[2].replace('\r\n', '\n') if got == want: return timeout += 0.01 raise Exception('Wanted to see tail output of %r, but saw %r' % (want, got)) print(opts.prefix, file=sys.stderr) <DeepExtract> timeout = 0 got = None for _ in range(25): status_stacks = a._server_idle(timeout)[2] for line in status_stacks: if line[0] == pid and line[1] < 0: got = line[2].replace('\r\n', '\n') if got == opts.prefix + '\n': return timeout += 0.01 raise Exception('Wanted to see tail output of %r, but saw %r' % (opts.prefix + '\n', got)) </DeepExtract> if isinstance(sliceno, int): msg = opts.a % (sliceno,) else: msg = opts[sliceno] print(msg) <DeepExtract> timeout = 0 got = None for _ in range(25): status_stacks = a._server_idle(timeout)[2] for line in status_stacks: if line[0] == pid and line[1] < 0: got = line[2].replace('\r\n', '\n') if got == opts.prefix + '\n' + msg + '\n': return timeout += 0.01 raise Exception('Wanted to see tail output of %r, but saw %r' % (opts.prefix + '\n' + msg + '\n', got)) </DeepExtract>
def sub_part(sliceno, opts): a = Automata(g.server_url, verbose=True) pid = os.getpid() def verify(want): timeout = 0 got = None for _ in range(25): status_stacks = a._server_idle(timeout)[2] for line in status_stacks: if line[0] == pid and line[1] < 0: got = line[2].replace('\r\n', '\n') if got == want: return timeout += 0.01 raise Exception('Wanted to see tail output of %r, but saw %r' % (want, got)) print(opts.prefix, file=sys.stderr) timeout = 0 got = None for _ in range(25): status_stacks = a._server_idle(timeout)[2] for line in status_stacks: if line[0] == pid and line[1] < 0: got = line[2].replace('\r\n', '\n') if got == opts.prefix + '\n': return timeout += 0.01 raise Exception('Wanted to see tail output of %r, but saw %r' % (opts.prefix + '\n', got)) if isinstance(sliceno, int): msg = opts.a % (sliceno,) else: msg = opts[sliceno] print(msg) timeout = 0 got = None for _ in range(25): status_stacks = a._server_idle(timeout)[2] for line in status_stacks: if line[0] == pid and line[1] < 0: got = line[2].replace('\r\n', '\n') if got == opts.prefix + '\n' + msg + '\n': return timeout += 0.01 raise Exception('Wanted to see tail output of %r, but saw %r' % (opts.prefix + '\n' + msg + '\n', got)) </DeepExtract>
accelerator
positive