before
stringlengths
0
955k
after
stringlengths
0
877k
repo
stringlengths
1
74
type
stringclasses
1 value
@njit(locals={'lower_mask': _QSMask}, nogil=True, parallel=True, fastmath=FASTMATH) def _xgate(qubits: np.ndarray, n_qubits: _QSIdx, target: _QSIdx) -> None: lower_mask = (1 << _QSMask(target)) - 1 for i in prange(1 << _QSMask(n_qubits) - 1): <DeepExtract> lower = i & lower_mask higher = (i & ~lower_mask) << 1 i0 = higher | lower </DeepExtract> t = qubits[i0] qubits[i0] = qubits[i0 + (1 << target)] qubits[i0 + (1 << target)] = t
@njit(locals={'lower_mask': _QSMask}, nogil=True, parallel=True, fastmath=FASTMATH) def _xgate(qubits: np.ndarray, n_qubits: _QSIdx, target: _QSIdx) -> None: lower_mask = (1 << _QSMask(target)) - 1 for i in prange(1 << _QSMask(n_qubits) - 1): lower = i & lower_mask higher = (i & ~lower_mask) << 1 i0 = higher | lower t = qubits[i0] qubits[i0] = qubits[i0 + (1 << target)] qubits[i0 + (1 << target)] = t
Blueqat
positive
def partition_indices(indices): <DeepExtract> if self.conf.graph.rank == 0: if self.partition_type == 'random': random.shuffle(indices) elif self.partition_type == 'sorted': indices = [i[0] for i in sorted(enumerate(self.data.targets), key=lambda x: x[1])] indices = torch.IntTensor(indices) indices = indices.cuda() if self.conf.backend == 'nccl' else indices group = dist.new_group(self.conf.graph.ranks) dist.broadcast(indices, src=0, group=group) indices = indices.cpu() if self.conf.backend == 'nccl' else indices indices = list(indices) </DeepExtract> from_index = 0 for partition_size in self.partition_sizes: to_index = from_index + int(partition_size * self.data_size) self.partitions.append(indices[from_index:to_index]) from_index = to_index
def partition_indices(indices): if self.conf.graph.rank == 0: if self.partition_type == 'random': random.shuffle(indices) elif self.partition_type == 'sorted': indices = [i[0] for i in sorted(enumerate(self.data.targets), key=lambda x: x[1])] indices = torch.IntTensor(indices) indices = indices.cuda() if self.conf.backend == 'nccl' else indices group = dist.new_group(self.conf.graph.ranks) dist.broadcast(indices, src=0, group=group) indices = indices.cpu() if self.conf.backend == 'nccl' else indices indices = list(indices) from_index = 0 for partition_size in self.partition_sizes: to_index = from_index + int(partition_size * self.data_size) self.partitions.append(indices[from_index:to_index]) from_index = to_index
ChocoSGD
positive
def _generate_single_file(suffix: str): <DeepExtract> print(f'{C.BOLD}Generating single file output...{C.ENDC}', file=sys.stderr) </DeepExtract> buf = io.StringIO() output = [] sources = [] for (name, source, query, dr) in sorted(self._describe_results): sources.append(source) output.append(self._generate(name, query, dr)) <DeepExtract> src_str = map(lambda p: repr(p.relative_to(self._project_dir).as_posix()), sources) if len(sources) > 1: print('# AUTOGENERATED FROM:', file=buf) for s in src_str: print(f'# {s}', file=buf) print('# WITH:', file=buf) else: print(f'# AUTOGENERATED FROM {next(src_str)} WITH:', file=buf) cmd = [] if sys.argv[0].endswith('__main__.py'): cmd.append(pathlib.Path(sys.executable).name) cmd.extend(['-m', 'edgedb.codegen']) else: cmd.append(pathlib.Path(sys.argv[0]).name) cmd.extend(sys.argv[1:]) cmd = ' '.join(cmd) print(f'# $ {cmd}', file=buf) print(file=buf) print(file=buf) </DeepExtract> <DeepExtract> print('from __future__ import annotations', file=buf) for m in sorted(self._imports): print(f'import {m}', file=buf) print(file=buf) print(file=buf) if self._aliases: for (_, a) in sorted(self._aliases.items()): print(a, file=buf) print(file=buf) print(file=buf) if self._use_pydantic: print(PYDANTIC_MIXIN, file=buf) print(file=buf) print(file=buf) for (_, d) in sorted(self._defs.items()): print(d, file=buf) print(file=buf) print(file=buf) </DeepExtract> for (i, o) in enumerate(output): buf.write(o) if i < len(output) - 1: print(file=buf) print(file=buf) for target in self._single_mode_files: if target: target = pathlib.Path(target).absolute() else: target = self._project_dir / f'{FILE_MODE_OUTPUT_FILE}{suffix}' <DeepExtract> print(f'{C.BOLD}Writing{C.ENDC} {C.BLUE}{target}{C.ENDC}', file=sys.stderr) </DeepExtract> with target.open('w') as f: f.write(buf.getvalue())
def _generate_single_file(suffix: str): print(f'{C.BOLD}Generating single file output...{C.ENDC}', file=sys.stderr) buf = io.StringIO() output = [] sources = [] for (name, source, query, dr) in sorted(self._describe_results): sources.append(source) output.append(self._generate(name, query, dr)) src_str = map(lambda p: repr(p.relative_to(self._project_dir).as_posix()), sources) if len(sources) > 1: print('# AUTOGENERATED FROM:', file=buf) for s in src_str: print(f'# {s}', file=buf) print('# WITH:', file=buf) else: print(f'# AUTOGENERATED FROM {next(src_str)} WITH:', file=buf) cmd = [] if sys.argv[0].endswith('__main__.py'): cmd.append(pathlib.Path(sys.executable).name) cmd.extend(['-m', 'edgedb.codegen']) else: cmd.append(pathlib.Path(sys.argv[0]).name) cmd.extend(sys.argv[1:]) cmd = ' '.join(cmd) print(f'# $ {cmd}', file=buf) print(file=buf) print(file=buf) print('from __future__ import annotations', file=buf) for m in sorted(self._imports): print(f'import {m}', file=buf) print(file=buf) print(file=buf) if self._aliases: for (_, a) in sorted(self._aliases.items()): print(a, file=buf) print(file=buf) print(file=buf) if self._use_pydantic: print(PYDANTIC_MIXIN, file=buf) print(file=buf) print(file=buf) for (_, d) in sorted(self._defs.items()): print(d, file=buf) print(file=buf) print(file=buf) for (i, o) in enumerate(output): buf.write(o) if i < len(output) - 1: print(file=buf) print(file=buf) for target in self._single_mode_files: if target: target = pathlib.Path(target).absolute() else: target = self._project_dir / f'{FILE_MODE_OUTPUT_FILE}{suffix}' print(f'{C.BOLD}Writing{C.ENDC} {C.BLUE}{target}{C.ENDC}', file=sys.stderr) with target.open('w') as f: f.write(buf.getvalue())
edgedb-python
positive
def convert(src, dst, depth): """Convert keys in detectron pretrained ResNet models to pytorch style.""" if depth not in arch_settings: raise ValueError('Only support ResNet-50 and ResNet-101 currently') block_nums = arch_settings[depth] caffe_model = mmcv.load(src, encoding='latin1') blobs = caffe_model['blobs'] if 'blobs' in caffe_model else caffe_model state_dict = OrderedDict() converted_names = set() <DeepExtract> state_dict['conv1' + '.weight'] = torch.from_numpy(blobs['conv1' + '_w']) converted_names.add('conv1' + '_w') if 'conv1' + '_b' in blobs: state_dict['conv1' + '.bias'] = torch.from_numpy(blobs['conv1' + '_b']) converted_names.add('conv1' + '_b') </DeepExtract> <DeepExtract> state_dict['bn1' + '.bias'] = torch.from_numpy(blobs['res_conv1_bn' + '_b']) state_dict['bn1' + '.weight'] = torch.from_numpy(blobs['res_conv1_bn' + '_s']) bn_size = state_dict['bn1' + '.weight'].size() state_dict['bn1' + '.running_mean'] = torch.zeros(bn_size) state_dict['bn1' + '.running_var'] = torch.ones(bn_size) converted_names.add('res_conv1_bn' + '_b') converted_names.add('res_conv1_bn' + '_s') </DeepExtract> for i in range(1, len(block_nums) + 1): for j in range(block_nums[i - 1]): if j == 0: <DeepExtract> state_dict['layer{}.{}.downsample.0'.format(i, j) + '.weight'] = torch.from_numpy(blobs['res{}_{}_branch1'.format(i + 1, j) + '_w']) converted_names.add('res{}_{}_branch1'.format(i + 1, j) + '_w') if 'res{}_{}_branch1'.format(i + 1, j) + '_b' in blobs: state_dict['layer{}.{}.downsample.0'.format(i, j) + '.bias'] = torch.from_numpy(blobs['res{}_{}_branch1'.format(i + 1, j) + '_b']) converted_names.add('res{}_{}_branch1'.format(i + 1, j) + '_b') </DeepExtract> <DeepExtract> state_dict['layer{}.{}.downsample.1'.format(i, j) + '.bias'] = torch.from_numpy(blobs['res{}_{}_branch1_bn'.format(i + 1, j) + '_b']) state_dict['layer{}.{}.downsample.1'.format(i, j) + '.weight'] = torch.from_numpy(blobs['res{}_{}_branch1_bn'.format(i + 1, j) + '_s']) bn_size = state_dict['layer{}.{}.downsample.1'.format(i, j) + '.weight'].size() state_dict['layer{}.{}.downsample.1'.format(i, j) + '.running_mean'] = torch.zeros(bn_size) state_dict['layer{}.{}.downsample.1'.format(i, j) + '.running_var'] = torch.ones(bn_size) converted_names.add('res{}_{}_branch1_bn'.format(i + 1, j) + '_b') converted_names.add('res{}_{}_branch1_bn'.format(i + 1, j) + '_s') </DeepExtract> for (k, letter) in enumerate(['a', 'b', 'c']): <DeepExtract> state_dict['layer{}.{}.conv{}'.format(i, j, k + 1) + '.weight'] = torch.from_numpy(blobs['res{}_{}_branch2{}'.format(i + 1, j, letter) + '_w']) converted_names.add('res{}_{}_branch2{}'.format(i + 1, j, letter) + '_w') if 'res{}_{}_branch2{}'.format(i + 1, j, letter) + '_b' in blobs: state_dict['layer{}.{}.conv{}'.format(i, j, k + 1) + '.bias'] = torch.from_numpy(blobs['res{}_{}_branch2{}'.format(i + 1, j, letter) + '_b']) converted_names.add('res{}_{}_branch2{}'.format(i + 1, j, letter) + '_b') </DeepExtract> <DeepExtract> state_dict['layer{}.{}.bn{}'.format(i, j, k + 1) + '.bias'] = torch.from_numpy(blobs['res{}_{}_branch2{}_bn'.format(i + 1, j, letter) + '_b']) state_dict['layer{}.{}.bn{}'.format(i, j, k + 1) + '.weight'] = torch.from_numpy(blobs['res{}_{}_branch2{}_bn'.format(i + 1, j, letter) + '_s']) bn_size = state_dict['layer{}.{}.bn{}'.format(i, j, k + 1) + '.weight'].size() state_dict['layer{}.{}.bn{}'.format(i, j, k + 1) + '.running_mean'] = torch.zeros(bn_size) state_dict['layer{}.{}.bn{}'.format(i, j, k + 1) + '.running_var'] = torch.ones(bn_size) converted_names.add('res{}_{}_branch2{}_bn'.format(i + 1, j, letter) + '_b') converted_names.add('res{}_{}_branch2{}_bn'.format(i + 1, j, letter) + '_s') </DeepExtract> for key in blobs: if key not in converted_names: print('Not Convert: {}'.format(key)) checkpoint = dict() checkpoint['state_dict'] = state_dict torch.save(checkpoint, dst)
def convert(src, dst, depth): """Convert keys in detectron pretrained ResNet models to pytorch style.""" if depth not in arch_settings: raise ValueError('Only support ResNet-50 and ResNet-101 currently') block_nums = arch_settings[depth] caffe_model = mmcv.load(src, encoding='latin1') blobs = caffe_model['blobs'] if 'blobs' in caffe_model else caffe_model state_dict = OrderedDict() converted_names = set() state_dict['conv1' + '.weight'] = torch.from_numpy(blobs['conv1' + '_w']) converted_names.add('conv1' + '_w') if 'conv1' + '_b' in blobs: state_dict['conv1' + '.bias'] = torch.from_numpy(blobs['conv1' + '_b']) converted_names.add('conv1' + '_b') state_dict['bn1' + '.bias'] = torch.from_numpy(blobs['res_conv1_bn' + '_b']) state_dict['bn1' + '.weight'] = torch.from_numpy(blobs['res_conv1_bn' + '_s']) bn_size = state_dict['bn1' + '.weight'].size() state_dict['bn1' + '.running_mean'] = torch.zeros(bn_size) state_dict['bn1' + '.running_var'] = torch.ones(bn_size) converted_names.add('res_conv1_bn' + '_b') converted_names.add('res_conv1_bn' + '_s') for i in range(1, len(block_nums) + 1): for j in range(block_nums[i - 1]): if j == 0: state_dict['layer{}.{}.downsample.0'.format(i, j) + '.weight'] = torch.from_numpy(blobs['res{}_{}_branch1'.format(i + 1, j) + '_w']) converted_names.add('res{}_{}_branch1'.format(i + 1, j) + '_w') if 'res{}_{}_branch1'.format(i + 1, j) + '_b' in blobs: state_dict['layer{}.{}.downsample.0'.format(i, j) + '.bias'] = torch.from_numpy(blobs['res{}_{}_branch1'.format(i + 1, j) + '_b']) converted_names.add('res{}_{}_branch1'.format(i + 1, j) + '_b') state_dict['layer{}.{}.downsample.1'.format(i, j) + '.bias'] = torch.from_numpy(blobs['res{}_{}_branch1_bn'.format(i + 1, j) + '_b']) state_dict['layer{}.{}.downsample.1'.format(i, j) + '.weight'] = torch.from_numpy(blobs['res{}_{}_branch1_bn'.format(i + 1, j) + '_s']) bn_size = state_dict['layer{}.{}.downsample.1'.format(i, j) + '.weight'].size() state_dict['layer{}.{}.downsample.1'.format(i, j) + '.running_mean'] = torch.zeros(bn_size) state_dict['layer{}.{}.downsample.1'.format(i, j) + '.running_var'] = torch.ones(bn_size) converted_names.add('res{}_{}_branch1_bn'.format(i + 1, j) + '_b') converted_names.add('res{}_{}_branch1_bn'.format(i + 1, j) + '_s') for (k, letter) in enumerate(['a', 'b', 'c']): state_dict['layer{}.{}.conv{}'.format(i, j, k + 1) + '.weight'] = torch.from_numpy(blobs['res{}_{}_branch2{}'.format(i + 1, j, letter) + '_w']) converted_names.add('res{}_{}_branch2{}'.format(i + 1, j, letter) + '_w') if 'res{}_{}_branch2{}'.format(i + 1, j, letter) + '_b' in blobs: state_dict['layer{}.{}.conv{}'.format(i, j, k + 1) + '.bias'] = torch.from_numpy(blobs['res{}_{}_branch2{}'.format(i + 1, j, letter) + '_b']) converted_names.add('res{}_{}_branch2{}'.format(i + 1, j, letter) + '_b') state_dict['layer{}.{}.bn{}'.format(i, j, k + 1) + '.bias'] = torch.from_numpy(blobs['res{}_{}_branch2{}_bn'.format(i + 1, j, letter) + '_b']) state_dict['layer{}.{}.bn{}'.format(i, j, k + 1) + '.weight'] = torch.from_numpy(blobs['res{}_{}_branch2{}_bn'.format(i + 1, j, letter) + '_s']) bn_size = state_dict['layer{}.{}.bn{}'.format(i, j, k + 1) + '.weight'].size() state_dict['layer{}.{}.bn{}'.format(i, j, k + 1) + '.running_mean'] = torch.zeros(bn_size) state_dict['layer{}.{}.bn{}'.format(i, j, k + 1) + '.running_var'] = torch.ones(bn_size) converted_names.add('res{}_{}_branch2{}_bn'.format(i + 1, j, letter) + '_b') converted_names.add('res{}_{}_branch2{}_bn'.format(i + 1, j, letter) + '_s') for key in blobs: if key not in converted_names: print('Not Convert: {}'.format(key)) checkpoint = dict() checkpoint['state_dict'] = state_dict torch.save(checkpoint, dst)
C-HOI
positive
def main(): setup_default_logging() args = parser.parse_args() args.prefetcher = not args.no_prefetcher args.distributed = False if 'WORLD_SIZE' in os.environ: args.distributed = int(os.environ['WORLD_SIZE']) > 1 if args.distributed and args.num_gpu > 1: logging.warning('Using more than one GPU per process in distributed mode is not allowed. Setting num_gpu to 1.') args.num_gpu = 1 args.device = 'cuda:0' args.world_size = 1 args.rank = 0 if args.distributed: args.num_gpu = 1 args.device = 'cuda:%d' % args.local_rank torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group(backend='nccl', init_method='env://') args.world_size = torch.distributed.get_world_size() args.rank = torch.distributed.get_rank() assert args.rank >= 0 if args.distributed: logging.info('Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.' % (args.rank, args.world_size)) else: logging.info('Training with a single process on %d GPUs.' % args.num_gpu) torch.manual_seed(args.seed + args.rank) model = eval(args.model)(pretrained=args.pretrained, num_classes=args.num_classes, drop_rate=args.drop, global_pool=args.gp, bn_tf=args.bn_tf, bn_momentum=args.bn_momentum, bn_eps=args.bn_eps) if os.path.exists(args.initial_checkpoint): load_checkpoint(model, args.initial_checkpoint) if args.local_rank == 0: logging.info('Model %s created, param count: %d' % (args.model, sum([m.numel() for m in model.parameters()]))) data_config = resolve_data_config(vars(args), model=model, verbose=args.local_rank == 0) optimizer_state = None resume_epoch = None if args.resume: (optimizer_state, resume_epoch) = resume_checkpoint(model, args.resume) if args.num_gpu > 1: if args.amp: logging.warning('AMP does not work well with nn.DataParallel, disabling. Use distributed mode for multi-GPU AMP.') args.amp = False model = nn.DataParallel(model, device_ids=list(range(args.num_gpu))).cuda() else: model.cuda() optimizer = create_optimizer(args, model) if optimizer_state is not None: optimizer.load_state_dict(optimizer_state) use_amp = False if has_apex and args.amp: (model, optimizer) = amp.initialize(model, optimizer, opt_level='O1') use_amp = True if args.local_rank == 0: logging.info('NVIDIA APEX {}. AMP {}.'.format('installed' if has_apex else 'not installed', 'on' if use_amp else 'off')) model_ema = None if args.model_ema: model_ema = ModelEma(model, decay=args.model_ema_decay, device='cpu' if args.model_ema_force_cpu else '', resume=args.resume) if args.distributed: if args.sync_bn: try: if has_apex: model = convert_syncbn_model(model) else: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) if args.local_rank == 0: logging.info('Converted model to use Synchronized BatchNorm.') except Exception as e: logging.error('Failed to enable Synchronized BatchNorm. Install Apex or Torch >= 1.1') if has_apex: model = DDP(model, delay_allreduce=True) else: if args.local_rank == 0: logging.info('Using torch DistributedDataParallel. Install NVIDIA Apex for Apex DDP.') model = DDP(model, device_ids=[args.local_rank]) (lr_scheduler, num_epochs) = create_scheduler(args, optimizer) start_epoch = 0 if args.start_epoch is not None: start_epoch = args.start_epoch elif resume_epoch is not None: start_epoch = resume_epoch if start_epoch > 0: lr_scheduler.step(start_epoch) if args.local_rank == 0: logging.info('Scheduled epochs: {}'.format(num_epochs)) train_dir = os.path.join(args.data, 'train') if not os.path.exists(train_dir): logging.error('Training folder does not exist at: {}'.format(train_dir)) exit(1) dataset_train = Dataset(train_dir) collate_fn = None if args.prefetcher and args.mixup > 0: collate_fn = FastCollateMixup(args.mixup, args.smoothing, args.num_classes) loader_train = create_loader(dataset_train, input_size=data_config['input_size'], batch_size=args.batch_size, is_training=True, use_prefetcher=args.prefetcher, rand_erase_prob=args.reprob, rand_erase_mode=args.remode, color_jitter=args.color_jitter, interpolation='random', mean=data_config['mean'], std=data_config['std'], num_workers=args.workers, distributed=args.distributed, collate_fn=collate_fn) eval_dir = os.path.join(args.data, 'val') if not os.path.isdir(eval_dir): logging.error('Validation folder does not exist at: {}'.format(eval_dir)) exit(1) dataset_eval = Dataset(eval_dir) loader_eval = create_loader(dataset_eval, input_size=data_config['input_size'], batch_size=4 * args.batch_size, is_training=False, use_prefetcher=args.prefetcher, interpolation=data_config['interpolation'], mean=data_config['mean'], std=data_config['std'], num_workers=args.workers, distributed=args.distributed) if args.mixup > 0.0: train_loss_fn = SoftTargetCrossEntropy().cuda() validate_loss_fn = nn.CrossEntropyLoss().cuda() elif args.smoothing: train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing).cuda() validate_loss_fn = nn.CrossEntropyLoss().cuda() else: train_loss_fn = nn.CrossEntropyLoss().cuda() validate_loss_fn = train_loss_fn eval_metric = args.eval_metric best_metric = None best_epoch = None saver = None output_dir = '' if args.local_rank == 0: output_base = args.output if args.output else './output' exp_name = '-'.join([datetime.now().strftime('%Y%m%d-%H%M%S'), args.model, str(data_config['input_size'][-1])]) output_dir = get_outdir(output_base, 'train', exp_name) decreasing = True if eval_metric == 'loss' else False saver = CheckpointSaver(checkpoint_dir=output_dir, decreasing=decreasing) try: for epoch in range(start_epoch, num_epochs): if args.distributed: loader_train.sampler.set_epoch(epoch) <DeepExtract> if args.prefetcher and args.mixup > 0 and loader_train.mixup_enabled: if args.mixup_off_epoch and epoch >= args.mixup_off_epoch: loader_train.mixup_enabled = False batch_time_m = AverageMeter() data_time_m = AverageMeter() losses_m = AverageMeter() model.train() end = time.time() last_idx = len(loader_train) - 1 num_updates = epoch * len(loader_train) for (batch_idx, (input, target)) in enumerate(loader_train): last_batch = batch_idx == last_idx data_time_m.update(time.time() - end) if not args.prefetcher: input = input.cuda() target = target.cuda() if args.mixup > 0.0: lam = 1.0 if not args.mixup_off_epoch or epoch < args.mixup_off_epoch: lam = np.random.beta(args.mixup, args.mixup) input.mul_(lam).add_(1 - lam, input.flip(0)) target = mixup_target(target, args.num_classes, lam, args.smoothing) output = model(input) loss = train_loss_fn(output, target) if not args.distributed: losses_m.update(loss.item(), input.size(0)) optimizer.zero_grad() if use_amp: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() optimizer.step() torch.cuda.synchronize() if model_ema is not None: model_ema.update(model) num_updates += 1 batch_time_m.update(time.time() - end) if last_batch or batch_idx % args.log_interval == 0: lrl = [param_group['lr'] for param_group in optimizer.param_groups] lr = sum(lrl) / len(lrl) if args.distributed: reduced_loss = reduce_tensor(loss.data, args.world_size) losses_m.update(reduced_loss.item(), input.size(0)) if args.local_rank == 0: logging.info('Train: {} [{:>4d}/{} ({:>3.0f}%)] Loss: {loss.val:>9.6f} ({loss.avg:>6.4f}) Time: {batch_time.val:.3f}s, {rate:>7.2f}/s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) LR: {lr:.3e} Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(epoch, batch_idx, len(loader_train), 100.0 * batch_idx / last_idx, loss=losses_m, batch_time=batch_time_m, rate=input.size(0) * args.world_size / batch_time_m.val, rate_avg=input.size(0) * args.world_size / batch_time_m.avg, lr=lr, data_time=data_time_m)) if args.save_images and output_dir: torchvision.utils.save_image(input, os.path.join(output_dir, 'train-batch-%d.jpg' % batch_idx), padding=0, normalize=True) if saver is not None and args.recovery_interval and (last_batch or (batch_idx + 1) % args.recovery_interval == 0): saver.save_recovery(model, optimizer, args, epoch, model_ema=model_ema, batch_idx=batch_idx) if lr_scheduler is not None: lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg) end = time.time() train_metrics = OrderedDict([('loss', losses_m.avg)]) </DeepExtract> <DeepExtract> batch_time_m = AverageMeter() losses_m = AverageMeter() prec1_m = AverageMeter() prec5_m = AverageMeter() model.eval() end = time.time() last_idx = len(loader_eval) - 1 with torch.no_grad(): for (batch_idx, (input, target)) in enumerate(loader_eval): last_batch = batch_idx == last_idx if not args.prefetcher: input = input.cuda() target = target.cuda() output = model(input) if isinstance(output, (tuple, list)): output = output[0] reduce_factor = args.tta if reduce_factor > 1: output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2) target = target[0:target.size(0):reduce_factor] loss = validate_loss_fn(output, target) (prec1, prec5) = accuracy(output, target, topk=(1, 5)) if args.distributed: reduced_loss = reduce_tensor(loss.data, args.world_size) prec1 = reduce_tensor(prec1, args.world_size) prec5 = reduce_tensor(prec5, args.world_size) else: reduced_loss = loss.data torch.cuda.synchronize() losses_m.update(reduced_loss.item(), input.size(0)) prec1_m.update(prec1.item(), output.size(0)) prec5_m.update(prec5.item(), output.size(0)) batch_time_m.update(time.time() - end) end = time.time() if args.local_rank == 0 and (last_batch or batch_idx % args.log_interval == 0): log_name = 'Test' + log_suffix logging.info('{0}: [{1:>4d}/{2}] Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) Prec@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) Prec@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format(log_name, batch_idx, last_idx, batch_time=batch_time_m, loss=losses_m, top1=prec1_m, top5=prec5_m)) metrics = OrderedDict([('loss', losses_m.avg), ('prec1', prec1_m.avg), ('prec5', prec5_m.avg)]) eval_metrics = metrics </DeepExtract> if model_ema is not None and (not args.model_ema_force_cpu): <DeepExtract> batch_time_m = AverageMeter() losses_m = AverageMeter() prec1_m = AverageMeter() prec5_m = AverageMeter() model_ema.ema.eval() end = time.time() last_idx = len(loader_eval) - 1 with torch.no_grad(): for (batch_idx, (input, target)) in enumerate(loader_eval): last_batch = batch_idx == last_idx if not args.prefetcher: input = input.cuda() target = target.cuda() output = model_ema.ema(input) if isinstance(output, (tuple, list)): output = output[0] reduce_factor = args.tta if reduce_factor > 1: output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2) target = target[0:target.size(0):reduce_factor] loss = validate_loss_fn(output, target) (prec1, prec5) = accuracy(output, target, topk=(1, 5)) if args.distributed: reduced_loss = reduce_tensor(loss.data, args.world_size) prec1 = reduce_tensor(prec1, args.world_size) prec5 = reduce_tensor(prec5, args.world_size) else: reduced_loss = loss.data torch.cuda.synchronize() losses_m.update(reduced_loss.item(), input.size(0)) prec1_m.update(prec1.item(), output.size(0)) prec5_m.update(prec5.item(), output.size(0)) batch_time_m.update(time.time() - end) end = time.time() if args.local_rank == 0 and (last_batch or batch_idx % args.log_interval == 0): log_name = 'Test' + ' (EMA)' logging.info('{0}: [{1:>4d}/{2}] Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) Prec@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) Prec@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format(log_name, batch_idx, last_idx, batch_time=batch_time_m, loss=losses_m, top1=prec1_m, top5=prec5_m)) metrics = OrderedDict([('loss', losses_m.avg), ('prec1', prec1_m.avg), ('prec5', prec5_m.avg)]) ema_eval_metrics = metrics </DeepExtract> eval_metrics = ema_eval_metrics if lr_scheduler is not None: lr_scheduler.step(epoch + 1, eval_metrics[eval_metric]) update_summary(epoch, train_metrics, eval_metrics, os.path.join(output_dir, 'summary.csv'), write_header=best_metric is None) if saver is not None: save_metric = eval_metrics[eval_metric] (best_metric, best_epoch) = saver.save_checkpoint(model, optimizer, args, epoch=epoch, model_ema=model_ema, metric=save_metric) except KeyboardInterrupt: pass if best_metric is not None: logging.info('*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch))
def main(): setup_default_logging() args = parser.parse_args() args.prefetcher = not args.no_prefetcher args.distributed = False if 'WORLD_SIZE' in os.environ: args.distributed = int(os.environ['WORLD_SIZE']) > 1 if args.distributed and args.num_gpu > 1: logging.warning('Using more than one GPU per process in distributed mode is not allowed. Setting num_gpu to 1.') args.num_gpu = 1 args.device = 'cuda:0' args.world_size = 1 args.rank = 0 if args.distributed: args.num_gpu = 1 args.device = 'cuda:%d' % args.local_rank torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group(backend='nccl', init_method='env://') args.world_size = torch.distributed.get_world_size() args.rank = torch.distributed.get_rank() assert args.rank >= 0 if args.distributed: logging.info('Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.' % (args.rank, args.world_size)) else: logging.info('Training with a single process on %d GPUs.' % args.num_gpu) torch.manual_seed(args.seed + args.rank) model = eval(args.model)(pretrained=args.pretrained, num_classes=args.num_classes, drop_rate=args.drop, global_pool=args.gp, bn_tf=args.bn_tf, bn_momentum=args.bn_momentum, bn_eps=args.bn_eps) if os.path.exists(args.initial_checkpoint): load_checkpoint(model, args.initial_checkpoint) if args.local_rank == 0: logging.info('Model %s created, param count: %d' % (args.model, sum([m.numel() for m in model.parameters()]))) data_config = resolve_data_config(vars(args), model=model, verbose=args.local_rank == 0) optimizer_state = None resume_epoch = None if args.resume: (optimizer_state, resume_epoch) = resume_checkpoint(model, args.resume) if args.num_gpu > 1: if args.amp: logging.warning('AMP does not work well with nn.DataParallel, disabling. Use distributed mode for multi-GPU AMP.') args.amp = False model = nn.DataParallel(model, device_ids=list(range(args.num_gpu))).cuda() else: model.cuda() optimizer = create_optimizer(args, model) if optimizer_state is not None: optimizer.load_state_dict(optimizer_state) use_amp = False if has_apex and args.amp: (model, optimizer) = amp.initialize(model, optimizer, opt_level='O1') use_amp = True if args.local_rank == 0: logging.info('NVIDIA APEX {}. AMP {}.'.format('installed' if has_apex else 'not installed', 'on' if use_amp else 'off')) model_ema = None if args.model_ema: model_ema = ModelEma(model, decay=args.model_ema_decay, device='cpu' if args.model_ema_force_cpu else '', resume=args.resume) if args.distributed: if args.sync_bn: try: if has_apex: model = convert_syncbn_model(model) else: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) if args.local_rank == 0: logging.info('Converted model to use Synchronized BatchNorm.') except Exception as e: logging.error('Failed to enable Synchronized BatchNorm. Install Apex or Torch >= 1.1') if has_apex: model = DDP(model, delay_allreduce=True) else: if args.local_rank == 0: logging.info('Using torch DistributedDataParallel. Install NVIDIA Apex for Apex DDP.') model = DDP(model, device_ids=[args.local_rank]) (lr_scheduler, num_epochs) = create_scheduler(args, optimizer) start_epoch = 0 if args.start_epoch is not None: start_epoch = args.start_epoch elif resume_epoch is not None: start_epoch = resume_epoch if start_epoch > 0: lr_scheduler.step(start_epoch) if args.local_rank == 0: logging.info('Scheduled epochs: {}'.format(num_epochs)) train_dir = os.path.join(args.data, 'train') if not os.path.exists(train_dir): logging.error('Training folder does not exist at: {}'.format(train_dir)) exit(1) dataset_train = Dataset(train_dir) collate_fn = None if args.prefetcher and args.mixup > 0: collate_fn = FastCollateMixup(args.mixup, args.smoothing, args.num_classes) loader_train = create_loader(dataset_train, input_size=data_config['input_size'], batch_size=args.batch_size, is_training=True, use_prefetcher=args.prefetcher, rand_erase_prob=args.reprob, rand_erase_mode=args.remode, color_jitter=args.color_jitter, interpolation='random', mean=data_config['mean'], std=data_config['std'], num_workers=args.workers, distributed=args.distributed, collate_fn=collate_fn) eval_dir = os.path.join(args.data, 'val') if not os.path.isdir(eval_dir): logging.error('Validation folder does not exist at: {}'.format(eval_dir)) exit(1) dataset_eval = Dataset(eval_dir) loader_eval = create_loader(dataset_eval, input_size=data_config['input_size'], batch_size=4 * args.batch_size, is_training=False, use_prefetcher=args.prefetcher, interpolation=data_config['interpolation'], mean=data_config['mean'], std=data_config['std'], num_workers=args.workers, distributed=args.distributed) if args.mixup > 0.0: train_loss_fn = SoftTargetCrossEntropy().cuda() validate_loss_fn = nn.CrossEntropyLoss().cuda() elif args.smoothing: train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing).cuda() validate_loss_fn = nn.CrossEntropyLoss().cuda() else: train_loss_fn = nn.CrossEntropyLoss().cuda() validate_loss_fn = train_loss_fn eval_metric = args.eval_metric best_metric = None best_epoch = None saver = None output_dir = '' if args.local_rank == 0: output_base = args.output if args.output else './output' exp_name = '-'.join([datetime.now().strftime('%Y%m%d-%H%M%S'), args.model, str(data_config['input_size'][-1])]) output_dir = get_outdir(output_base, 'train', exp_name) decreasing = True if eval_metric == 'loss' else False saver = CheckpointSaver(checkpoint_dir=output_dir, decreasing=decreasing) try: for epoch in range(start_epoch, num_epochs): if args.distributed: loader_train.sampler.set_epoch(epoch) if args.prefetcher and args.mixup > 0 and loader_train.mixup_enabled: if args.mixup_off_epoch and epoch >= args.mixup_off_epoch: loader_train.mixup_enabled = False batch_time_m = AverageMeter() data_time_m = AverageMeter() losses_m = AverageMeter() model.train() end = time.time() last_idx = len(loader_train) - 1 num_updates = epoch * len(loader_train) for (batch_idx, (input, target)) in enumerate(loader_train): last_batch = batch_idx == last_idx data_time_m.update(time.time() - end) if not args.prefetcher: input = input.cuda() target = target.cuda() if args.mixup > 0.0: lam = 1.0 if not args.mixup_off_epoch or epoch < args.mixup_off_epoch: lam = np.random.beta(args.mixup, args.mixup) input.mul_(lam).add_(1 - lam, input.flip(0)) target = mixup_target(target, args.num_classes, lam, args.smoothing) output = model(input) loss = train_loss_fn(output, target) if not args.distributed: losses_m.update(loss.item(), input.size(0)) optimizer.zero_grad() if use_amp: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() optimizer.step() torch.cuda.synchronize() if model_ema is not None: model_ema.update(model) num_updates += 1 batch_time_m.update(time.time() - end) if last_batch or batch_idx % args.log_interval == 0: lrl = [param_group['lr'] for param_group in optimizer.param_groups] lr = sum(lrl) / len(lrl) if args.distributed: reduced_loss = reduce_tensor(loss.data, args.world_size) losses_m.update(reduced_loss.item(), input.size(0)) if args.local_rank == 0: logging.info('Train: {} [{:>4d}/{} ({:>3.0f}%)] Loss: {loss.val:>9.6f} ({loss.avg:>6.4f}) Time: {batch_time.val:.3f}s, {rate:>7.2f}/s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) LR: {lr:.3e} Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(epoch, batch_idx, len(loader_train), 100.0 * batch_idx / last_idx, loss=losses_m, batch_time=batch_time_m, rate=input.size(0) * args.world_size / batch_time_m.val, rate_avg=input.size(0) * args.world_size / batch_time_m.avg, lr=lr, data_time=data_time_m)) if args.save_images and output_dir: torchvision.utils.save_image(input, os.path.join(output_dir, 'train-batch-%d.jpg' % batch_idx), padding=0, normalize=True) if saver is not None and args.recovery_interval and (last_batch or (batch_idx + 1) % args.recovery_interval == 0): saver.save_recovery(model, optimizer, args, epoch, model_ema=model_ema, batch_idx=batch_idx) if lr_scheduler is not None: lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg) end = time.time() train_metrics = OrderedDict([('loss', losses_m.avg)]) batch_time_m = AverageMeter() losses_m = AverageMeter() prec1_m = AverageMeter() prec5_m = AverageMeter() model.eval() end = time.time() last_idx = len(loader_eval) - 1 with torch.no_grad(): for (batch_idx, (input, target)) in enumerate(loader_eval): last_batch = batch_idx == last_idx if not args.prefetcher: input = input.cuda() target = target.cuda() output = model(input) if isinstance(output, (tuple, list)): output = output[0] reduce_factor = args.tta if reduce_factor > 1: output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2) target = target[0:target.size(0):reduce_factor] loss = validate_loss_fn(output, target) (prec1, prec5) = accuracy(output, target, topk=(1, 5)) if args.distributed: reduced_loss = reduce_tensor(loss.data, args.world_size) prec1 = reduce_tensor(prec1, args.world_size) prec5 = reduce_tensor(prec5, args.world_size) else: reduced_loss = loss.data torch.cuda.synchronize() losses_m.update(reduced_loss.item(), input.size(0)) prec1_m.update(prec1.item(), output.size(0)) prec5_m.update(prec5.item(), output.size(0)) batch_time_m.update(time.time() - end) end = time.time() if args.local_rank == 0 and (last_batch or batch_idx % args.log_interval == 0): log_name = 'Test' + log_suffix logging.info('{0}: [{1:>4d}/{2}] Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) Prec@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) Prec@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format(log_name, batch_idx, last_idx, batch_time=batch_time_m, loss=losses_m, top1=prec1_m, top5=prec5_m)) metrics = OrderedDict([('loss', losses_m.avg), ('prec1', prec1_m.avg), ('prec5', prec5_m.avg)]) eval_metrics = metrics if model_ema is not None and (not args.model_ema_force_cpu): batch_time_m = AverageMeter() losses_m = AverageMeter() prec1_m = AverageMeter() prec5_m = AverageMeter() model_ema.ema.eval() end = time.time() last_idx = len(loader_eval) - 1 with torch.no_grad(): for (batch_idx, (input, target)) in enumerate(loader_eval): last_batch = batch_idx == last_idx if not args.prefetcher: input = input.cuda() target = target.cuda() output = model_ema.ema(input) if isinstance(output, (tuple, list)): output = output[0] reduce_factor = args.tta if reduce_factor > 1: output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2) target = target[0:target.size(0):reduce_factor] loss = validate_loss_fn(output, target) (prec1, prec5) = accuracy(output, target, topk=(1, 5)) if args.distributed: reduced_loss = reduce_tensor(loss.data, args.world_size) prec1 = reduce_tensor(prec1, args.world_size) prec5 = reduce_tensor(prec5, args.world_size) else: reduced_loss = loss.data torch.cuda.synchronize() losses_m.update(reduced_loss.item(), input.size(0)) prec1_m.update(prec1.item(), output.size(0)) prec5_m.update(prec5.item(), output.size(0)) batch_time_m.update(time.time() - end) end = time.time() if args.local_rank == 0 and (last_batch or batch_idx % args.log_interval == 0): log_name = 'Test' + ' (EMA)' logging.info('{0}: [{1:>4d}/{2}] Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) Prec@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) Prec@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format(log_name, batch_idx, last_idx, batch_time=batch_time_m, loss=losses_m, top1=prec1_m, top5=prec5_m)) metrics = OrderedDict([('loss', losses_m.avg), ('prec1', prec1_m.avg), ('prec5', prec5_m.avg)]) ema_eval_metrics = metrics eval_metrics = ema_eval_metrics if lr_scheduler is not None: lr_scheduler.step(epoch + 1, eval_metrics[eval_metric]) update_summary(epoch, train_metrics, eval_metrics, os.path.join(output_dir, 'summary.csv'), write_header=best_metric is None) if saver is not None: save_metric = eval_metrics[eval_metric] (best_metric, best_epoch) = saver.save_checkpoint(model, optimizer, args, epoch=epoch, model_ema=model_ema, metric=save_metric) except KeyboardInterrupt: pass if best_metric is not None: logging.info('*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch))
DNA
positive
def __init__(self, srcUniqueName, tarUniqueName, edgeData={}, parent=None, scene=None): super(CodeUIEdgeItem, self).__init__(parent) self.setFlag(QtWidgets.QGraphicsItem.ItemIsSelectable) self.setAcceptHoverEvents(True) self.srcUniqueName = srcUniqueName self.tarUniqueName = tarUniqueName self.setZValue(-1) self.path = None self.pathShape = None self.curve = None self.pathPnt = None self.file = '' self.line = -1 self.column = -1 dbRef = edgeData.get('dbRef', None) if dbRef: self.file = dbRef.file().longname() self.line = dbRef.line() self.column = dbRef.column() self.isHover = False self.orderData = None <DeepExtract> (srcPos, tarPos) = self.getNodePos() if self.pathPnt and (self.pathPnt[0] - srcPos).manhattanLength() < 0.05 and ((self.pathPnt[1] - tarPos).manhattanLength() < 0.05): return self.path self.pathPnt = (srcPos, tarPos) path = QtGui.QPainterPath() path.moveTo(srcPos) dx = tarPos.x() - srcPos.x() p1 = srcPos + QtCore.QPointF(dx * 0.3, 0) p2 = tarPos + QtCore.QPointF(-dx * 0.7, 0) path.cubicTo(p1, p2, tarPos) self.curve = QtGui.QPainterPath(path) self.path = path from PyQt5.QtGui import QPainterPathStroker stroker = QPainterPathStroker() stroker.setWidth(10.0) self.pathShape = stroker.createStroke(self.path) return path </DeepExtract> self.isConnectedToFocusNode = False self.schemeColorList = [] self.customEdge = edgeData.get('customEdge', False) self.isCandidate = False
def __init__(self, srcUniqueName, tarUniqueName, edgeData={}, parent=None, scene=None): super(CodeUIEdgeItem, self).__init__(parent) self.setFlag(QtWidgets.QGraphicsItem.ItemIsSelectable) self.setAcceptHoverEvents(True) self.srcUniqueName = srcUniqueName self.tarUniqueName = tarUniqueName self.setZValue(-1) self.path = None self.pathShape = None self.curve = None self.pathPnt = None self.file = '' self.line = -1 self.column = -1 dbRef = edgeData.get('dbRef', None) if dbRef: self.file = dbRef.file().longname() self.line = dbRef.line() self.column = dbRef.column() self.isHover = False self.orderData = None (srcPos, tarPos) = self.getNodePos() if self.pathPnt and (self.pathPnt[0] - srcPos).manhattanLength() < 0.05 and ((self.pathPnt[1] - tarPos).manhattanLength() < 0.05): return self.path self.pathPnt = (srcPos, tarPos) path = QtGui.QPainterPath() path.moveTo(srcPos) dx = tarPos.x() - srcPos.x() p1 = srcPos + QtCore.QPointF(dx * 0.3, 0) p2 = tarPos + QtCore.QPointF(-dx * 0.7, 0) path.cubicTo(p1, p2, tarPos) self.curve = QtGui.QPainterPath(path) self.path = path from PyQt5.QtGui import QPainterPathStroker stroker = QPainterPathStroker() stroker.setWidth(10.0) self.pathShape = stroker.createStroke(self.path) return path self.isConnectedToFocusNode = False self.schemeColorList = [] self.customEdge = edgeData.get('customEdge', False) self.isCandidate = False
CodeAtlasSublime
positive
def wait_until_input_queue_empty(self): for count in range(1, 10): <DeepExtract> self.cpu_interval_calls += 1 if self.runtime_cfg.speedlimit: target_cycles_per_sec = self.runtime_cfg.cycles_per_sec else: target_cycles_per_sec = None start_time = time.time() self.machine.cpu.run(max_run_time=self.runtime_cfg.max_run_time, target_cycles_per_sec=target_cycles_per_sec) now = time.time() self.total_burst_duration += now - start_time if interval is not None: if self.machine.cpu.running: self.cpu_after_id = self.root.after(interval, self.cpu_interval, interval) else: log.critical('CPU stopped.') </DeepExtract> if self.user_input_queue.empty(): log.critical('user_input_queue is empty, after %i burst runs, ok.', count) if self.cpu_after_id is None: <DeepExtract> self.status.set(f'{self.cfg.MACHINE_NAME} paused.\n') </DeepExtract> return if self.cpu_after_id is None: <DeepExtract> self.status.set(f'{self.cfg.MACHINE_NAME} paused.\n') </DeepExtract> log.critical('user_input_queue not empty, after %i burst runs!', count)
def wait_until_input_queue_empty(self): for count in range(1, 10): self.cpu_interval_calls += 1 if self.runtime_cfg.speedlimit: target_cycles_per_sec = self.runtime_cfg.cycles_per_sec else: target_cycles_per_sec = None start_time = time.time() self.machine.cpu.run(max_run_time=self.runtime_cfg.max_run_time, target_cycles_per_sec=target_cycles_per_sec) now = time.time() self.total_burst_duration += now - start_time if interval is not None: if self.machine.cpu.running: self.cpu_after_id = self.root.after(interval, self.cpu_interval, interval) else: log.critical('CPU stopped.') if self.user_input_queue.empty(): log.critical('user_input_queue is empty, after %i burst runs, ok.', count) if self.cpu_after_id is None: self.status.set(f'{self.cfg.MACHINE_NAME} paused.\n') return if self.cpu_after_id is None: self.status.set(f'{self.cfg.MACHINE_NAME} paused.\n') log.critical('user_input_queue not empty, after %i burst runs!', count)
DragonPy
positive
def test_rm_valid_name(self): <DeepExtract> (_, fname) = tempfile.mkstemp() with open(fname, 'w') as f: f.write(open(os.environ[ENV_VAR]).read()) fname = fname </DeepExtract> <DeepExtract> argv = ['ejtp-identity'] argv.append('rm') argv.extend(args) with self.io: try: self.identity.main(argv) except SystemExit: if not kwargs.get('error'): raise output = self.io.get_value() </DeepExtract> self.assertIn('atlas@lackadaisy.com removed from file %s' % fname, output) self.io.clear() <DeepExtract> argv = ['ejtp-identity'] argv.append('list') argv.extend(args) with self.io: try: self.identity.main(argv) except SystemExit: if not kwargs.get('error'): raise output = self.io.get_value() </DeepExtract> self.assertNotIn('atlas@lackadaisy.com', self.io.get_value())
def test_rm_valid_name(self): (_, fname) = tempfile.mkstemp() with open(fname, 'w') as f: f.write(open(os.environ[ENV_VAR]).read()) fname = fname argv = ['ejtp-identity'] argv.append('rm') argv.extend(args) with self.io: try: self.identity.main(argv) except SystemExit: if not kwargs.get('error'): raise output = self.io.get_value() self.assertIn('atlas@lackadaisy.com removed from file %s' % fname, output) self.io.clear() argv = ['ejtp-identity'] argv.append('list') argv.extend(args) with self.io: try: self.identity.main(argv) except SystemExit: if not kwargs.get('error'): raise output = self.io.get_value() self.assertNotIn('atlas@lackadaisy.com', self.io.get_value())
EJTP-lib-python
positive
def parse_config(config_rules, config_parser): for (section, rules) in config_rules.items(): if section == 'DEFAULT' or config_parser.has_section(section): for (option, rule) in rules.items(): ini_option = rule.get('name', option) if config_parser.has_option(section, ini_option): _type = rule.get('type', '') if _type == 'list' or _type == 'str': cp_get_method_name = 'get' else: cp_get_method_name = 'get' + _type cp_get_method = getattr(config_parser, cp_get_method_name) val = cp_get_method(section, ini_option) if _type == 'list': val = val.split(',') setattr(self, option, val) else: <DeepExtract> if rule.get('ignore_missing'): return if 'default' in rule: val = rule['default'] else: defaults = {'boolean': False, 'int': 0, 'str': '', 'list': []} val = None _type = rule.get('type') if _type and _type in defaults: val = defaults[_type] setattr(self, option, val) </DeepExtract> else: for (option, rule) in rules.items(): <DeepExtract> if rule.get('ignore_missing'): return if 'default' in rule: val = rule['default'] else: defaults = {'boolean': False, 'int': 0, 'str': '', 'list': []} val = None _type = rule.get('type') if _type and _type in defaults: val = defaults[_type] setattr(self, option, val) </DeepExtract>
def parse_config(config_rules, config_parser): for (section, rules) in config_rules.items(): if section == 'DEFAULT' or config_parser.has_section(section): for (option, rule) in rules.items(): ini_option = rule.get('name', option) if config_parser.has_option(section, ini_option): _type = rule.get('type', '') if _type == 'list' or _type == 'str': cp_get_method_name = 'get' else: cp_get_method_name = 'get' + _type cp_get_method = getattr(config_parser, cp_get_method_name) val = cp_get_method(section, ini_option) if _type == 'list': val = val.split(',') setattr(self, option, val) else: if rule.get('ignore_missing'): return if 'default' in rule: val = rule['default'] else: defaults = {'boolean': False, 'int': 0, 'str': '', 'list': []} val = None _type = rule.get('type') if _type and _type in defaults: val = defaults[_type] setattr(self, option, val) else: for (option, rule) in rules.items(): if rule.get('ignore_missing'): return if 'default' in rule: val = rule['default'] else: defaults = {'boolean': False, 'int': 0, 'str': '', 'list': []} val = None _type = rule.get('type') if _type and _type in defaults: val = defaults[_type] setattr(self, option, val) </DeepExtract>
DLRN
positive
def test_commit_is_signed(self): """ Ensure that the feature commit is signed """ <DeepExtract> thisdir = os.path.dirname(os.path.realpath(__file__)) (parent, _) = os.path.split(thisdir) (parent, _) = os.path.split(parent) repodir = parent </DeepExtract> feature_sha1 = os.environ.get('TRAVIS_PULL_REQUEST_SHA') signer_encoded = subprocess.check_output(['git', 'show', '-s', '--pretty="%GK"', feature_sha1], cwd=repodir).decode('utf-8').strip().strip('"') known_fingerprints = set() proc = subprocess.Popen(['gpg', '--fingerprint', '--with-colons'], stdout=subprocess.PIPE) for line in proc.stdout: parts = line.decode('utf-8').split(':') if parts[0] == 'fpr': fingerprint = parts[-2] known_fingerprints.add(fingerprint[-16:]) proc.wait() if signer_encoded not in known_fingerprints: result = subprocess.check_call(['gpg', '--keyserver', 'keyserver.ubuntu.com', '--recv-keys', signer_encoded]) self.assertEqual(result, 0, msg='Failed to fetch signer key from keyserver') with tempfile.NamedTemporaryFile(delete=False) as stderr: result = subprocess.call(['git', 'verify-commit', feature_sha1], cwd=repodir, stderr=stderr) stderrpath = stderr.name with open(stderrpath, 'r') as infile: stderrtext = infile.read() os.unlink(stderrpath) self.assertEqual(0, result, 'git was unable to verify commit {}\n{}'.format(feature_sha1, stderrtext))
def test_commit_is_signed(self): """ Ensure that the feature commit is signed """ thisdir = os.path.dirname(os.path.realpath(__file__)) (parent, _) = os.path.split(thisdir) (parent, _) = os.path.split(parent) repodir = parent feature_sha1 = os.environ.get('TRAVIS_PULL_REQUEST_SHA') signer_encoded = subprocess.check_output(['git', 'show', '-s', '--pretty="%GK"', feature_sha1], cwd=repodir).decode('utf-8').strip().strip('"') known_fingerprints = set() proc = subprocess.Popen(['gpg', '--fingerprint', '--with-colons'], stdout=subprocess.PIPE) for line in proc.stdout: parts = line.decode('utf-8').split(':') if parts[0] == 'fpr': fingerprint = parts[-2] known_fingerprints.add(fingerprint[-16:]) proc.wait() if signer_encoded not in known_fingerprints: result = subprocess.check_call(['gpg', '--keyserver', 'keyserver.ubuntu.com', '--recv-keys', signer_encoded]) self.assertEqual(result, 0, msg='Failed to fetch signer key from keyserver') with tempfile.NamedTemporaryFile(delete=False) as stderr: result = subprocess.call(['git', 'verify-commit', feature_sha1], cwd=repodir, stderr=stderr) stderrpath = stderr.name with open(stderrpath, 'r') as infile: stderrtext = infile.read() os.unlink(stderrpath) self.assertEqual(0, result, 'git was unable to verify commit {}\n{}'.format(feature_sha1, stderrtext))
cmake_format
positive
def call(self, inputs, step_type=(), network_state=(), training=False): flat_inputs = tf.nest.flatten(inputs) del step_type processed_inputs = [] for (single_input, input_spec) in zip(flat_inputs, self._flat_specs): if common_lib.is_categorical_spec(input_spec): if input_spec.name == 'step_num': if self._step_encoding is None: continue if self._max_trajectory_length_train is not None: max_step = self._max_trajectory_length_train else: max_step = input_spec.maximum <DeepExtract> if self._step_encoding == 'one_hot': processed_input = tf.one_hot(single_input, max_step + 1) if self._step_encoding == 'sinusoid': i = tf.range(self._d_step_emb, dtype=tf.float32)[tf.newaxis, :] step_num = tf.cast(single_input, tf.float32)[:, tf.newaxis] rads = step_num / tf.math.pow(10000.0, 2 * (i // 2) / tf.cast(self._d_step_emb, tf.float32)) processed_input = tf.concat([tf.sin(rads[:, 0::2]), tf.cos(rads[:, 1::2])], axis=-1) if self._step_encoding == 'learned': processed_input = self._step_embedding_layer(tf.one_hot(single_input, max_step + 1)) raise ValueError('Step encoding must be one of ["one_hot, "sinusoid", "learned"].') </DeepExtract> else: processed_input = tf.one_hot(single_input, input_spec.maximum + 1) else: if len(input_spec.shape) != 1: raise ValueError('Invalid input spec shape %s.' % input_spec.shape) processed_input = single_input processed_inputs.append(processed_input) joint = tf.concat(processed_inputs, -1) for layer in self._fc_layers: joint = layer(joint, training=training) if self._output_dim is None: joint = tf.reshape(joint, [-1]) return (joint, network_state)
def call(self, inputs, step_type=(), network_state=(), training=False): flat_inputs = tf.nest.flatten(inputs) del step_type processed_inputs = [] for (single_input, input_spec) in zip(flat_inputs, self._flat_specs): if common_lib.is_categorical_spec(input_spec): if input_spec.name == 'step_num': if self._step_encoding is None: continue if self._max_trajectory_length_train is not None: max_step = self._max_trajectory_length_train else: max_step = input_spec.maximum if self._step_encoding == 'one_hot': processed_input = tf.one_hot(single_input, max_step + 1) if self._step_encoding == 'sinusoid': i = tf.range(self._d_step_emb, dtype=tf.float32)[tf.newaxis, :] step_num = tf.cast(single_input, tf.float32)[:, tf.newaxis] rads = step_num / tf.math.pow(10000.0, 2 * (i // 2) / tf.cast(self._d_step_emb, tf.float32)) processed_input = tf.concat([tf.sin(rads[:, 0::2]), tf.cos(rads[:, 1::2])], axis=-1) if self._step_encoding == 'learned': processed_input = self._step_embedding_layer(tf.one_hot(single_input, max_step + 1)) raise ValueError('Step encoding must be one of ["one_hot, "sinusoid", "learned"].') else: processed_input = tf.one_hot(single_input, input_spec.maximum + 1) else: if len(input_spec.shape) != 1: raise ValueError('Invalid input spec shape %s.' % input_spec.shape) processed_input = single_input processed_inputs.append(processed_input) joint = tf.concat(processed_inputs, -1) for layer in self._fc_layers: joint = layer(joint, training=training) if self._output_dim is None: joint = tf.reshape(joint, [-1]) return (joint, network_state)
dice_rl
positive
def fetch(self, is_dl_forced=True): """ Fetches data from udp collaboration server, see top level comments for class for more information :return: """ username = config.get_config()['dbauth']['udp']['user'] password = config.get_config()['dbauth']['udp']['password'] credentials = (username, password) patient_id_map = self.open_and_parse_yaml(self.map_files['patient_ids']) udp_internal_ids = patient_id_map.keys() phenotype_fields = ['Patient', 'HPID', 'Present'] phenotype_params = {'method': 'search_subjects', 'subject_type': 'Phenotype', 'search_mode': 'DEEP', 'fields': 'Patient', 'conditions': 'equals', 'values': ','.join(udp_internal_ids), 'user_fields': ','.join(phenotype_fields)} prioritized_variants = ['Patient', 'Gene', 'Chromosome Position', 'Variant Allele', 'Transcript'] prioritized_params = {'method': 'search_subjects', 'subject_type': 'Variant Prioritization', 'search_mode': 'DEEP', 'fields': 'Patient', 'conditions': 'equals', 'values': ','.join(udp_internal_ids), 'user_fields': ','.join(prioritized_variants), 'format': 'json'} variant_fields = ['Patient', 'Family', 'Chr', 'Build', 'Chromosome Position', 'Reference Allele', 'Variant Allele', 'Parent of origin', 'Allele Type', 'Mutation Type', 'Gene', 'Transcript', 'Original Amino Acid', 'Variant Amino Acid', 'Amino Acid Change', 'Segregates with', 'Position', 'Exon', 'Inheritance model', 'Zygosity', 'dbSNP ID', '1K Frequency', 'Number of Alleles'] variant_params = {'method': 'search_subjects', 'subject_type': 'Exome Analysis Results', 'search_mode': 'DEEP', 'fields': 'Patient', 'conditions': 'equals', 'user_fields': ','.join(variant_fields), 'format': 'json'} pheno_file = open('/'.join((self.rawdir, self.files['patient_phenotypes']['file'])), 'w') variant_file = open('/'.join((self.rawdir, self.files['patient_variants']['file'])), 'w') pheno_file.write('{0}\n'.format('\t'.join(phenotype_fields))) variant_file.write('{0}\n'.format('\t'.join(variant_fields))) <DeepExtract> data = [] for patient in udp_internal_ids: prioritized_params['values'] = patient result_count = 1 prioritized_params['start'] = 0 prioritized_params['limit'] = 100 while result_count > 0: LOG.debug('processing %s lines starting at %s for patient %s', prioritized_params['limit'], prioritized_params['start'], patient) req = requests.get(self.UDP_SERVER, params=prioritized_params, auth=credentials) results = req.json() result_count = len(results['Subjects']) for res in results['Subjects']: line = [res[field] for field in prioritized_variants] data.append(line) prioritized_params['start'] = prioritized_params['start'] + prioritized_params['limit'] variant_gene = data </DeepExtract> variant_gene_map = dict() for line in variant_gene: variant_gene_map.setdefault(line[0], []).append('{0}-{1}-{2}-{3}'.format(line[1], line[2], line[3], line[4])) <DeepExtract> data = [] for patient in udp_internal_ids: variant_params['values'] = patient result_count = 1 variant_params['start'] = 0 variant_params['limit'] = 100 while result_count > 0: LOG.debug('processing %s lines starting at %s for patient %s', variant_params['limit'], variant_params['start'], patient) req = requests.get(self.UDP_SERVER, params=variant_params, auth=credentials) results = req.json() result_count = len(results['Subjects']) for res in results['Subjects']: line = [res[field] for field in variant_fields] data.append(line) variant_params['start'] = variant_params['start'] + variant_params['limit'] variant_info = data </DeepExtract> for line in variant_info: variant = '{0}-{1}-{2}-{3}'.format(line[10], line[4], line[6], line[11]) if variant in variant_gene_map[line[0]]: line[0] = patient_id_map[line[0]] line[4] = re.sub('\\.0$', '', line[4]) variant_file.write('{0}\n'.format('\t'.join(line))) <DeepExtract> data = [] for patient in udp_internal_ids: phenotype_params['values'] = patient result_count = 1 phenotype_params['start'] = 0 phenotype_params['limit'] = 100 while result_count > 0: LOG.debug('processing %s lines starting at %s for patient %s', phenotype_params['limit'], phenotype_params['start'], patient) req = requests.get(self.UDP_SERVER, params=phenotype_params, auth=credentials) results = req.json() result_count = len(results['Subjects']) for res in results['Subjects']: line = [res[field] for field in phenotype_fields] data.append(line) phenotype_params['start'] = phenotype_params['start'] + phenotype_params['limit'] phenotype_info = data </DeepExtract> for line in phenotype_info: line[0] = patient_id_map[line[0]] pheno_file.write('{0}\n'.format('\t'.join(line))) variant_file.close() pheno_file.close() return
def fetch(self, is_dl_forced=True): """ Fetches data from udp collaboration server, see top level comments for class for more information :return: """ username = config.get_config()['dbauth']['udp']['user'] password = config.get_config()['dbauth']['udp']['password'] credentials = (username, password) patient_id_map = self.open_and_parse_yaml(self.map_files['patient_ids']) udp_internal_ids = patient_id_map.keys() phenotype_fields = ['Patient', 'HPID', 'Present'] phenotype_params = {'method': 'search_subjects', 'subject_type': 'Phenotype', 'search_mode': 'DEEP', 'fields': 'Patient', 'conditions': 'equals', 'values': ','.join(udp_internal_ids), 'user_fields': ','.join(phenotype_fields)} prioritized_variants = ['Patient', 'Gene', 'Chromosome Position', 'Variant Allele', 'Transcript'] prioritized_params = {'method': 'search_subjects', 'subject_type': 'Variant Prioritization', 'search_mode': 'DEEP', 'fields': 'Patient', 'conditions': 'equals', 'values': ','.join(udp_internal_ids), 'user_fields': ','.join(prioritized_variants), 'format': 'json'} variant_fields = ['Patient', 'Family', 'Chr', 'Build', 'Chromosome Position', 'Reference Allele', 'Variant Allele', 'Parent of origin', 'Allele Type', 'Mutation Type', 'Gene', 'Transcript', 'Original Amino Acid', 'Variant Amino Acid', 'Amino Acid Change', 'Segregates with', 'Position', 'Exon', 'Inheritance model', 'Zygosity', 'dbSNP ID', '1K Frequency', 'Number of Alleles'] variant_params = {'method': 'search_subjects', 'subject_type': 'Exome Analysis Results', 'search_mode': 'DEEP', 'fields': 'Patient', 'conditions': 'equals', 'user_fields': ','.join(variant_fields), 'format': 'json'} pheno_file = open('/'.join((self.rawdir, self.files['patient_phenotypes']['file'])), 'w') variant_file = open('/'.join((self.rawdir, self.files['patient_variants']['file'])), 'w') pheno_file.write('{0}\n'.format('\t'.join(phenotype_fields))) variant_file.write('{0}\n'.format('\t'.join(variant_fields))) data = [] for patient in udp_internal_ids: prioritized_params['values'] = patient result_count = 1 prioritized_params['start'] = 0 prioritized_params['limit'] = 100 while result_count > 0: LOG.debug('processing %s lines starting at %s for patient %s', prioritized_params['limit'], prioritized_params['start'], patient) req = requests.get(self.UDP_SERVER, params=prioritized_params, auth=credentials) results = req.json() result_count = len(results['Subjects']) for res in results['Subjects']: line = [res[field] for field in prioritized_variants] data.append(line) prioritized_params['start'] = prioritized_params['start'] + prioritized_params['limit'] variant_gene = data variant_gene_map = dict() for line in variant_gene: variant_gene_map.setdefault(line[0], []).append('{0}-{1}-{2}-{3}'.format(line[1], line[2], line[3], line[4])) data = [] for patient in udp_internal_ids: variant_params['values'] = patient result_count = 1 variant_params['start'] = 0 variant_params['limit'] = 100 while result_count > 0: LOG.debug('processing %s lines starting at %s for patient %s', variant_params['limit'], variant_params['start'], patient) req = requests.get(self.UDP_SERVER, params=variant_params, auth=credentials) results = req.json() result_count = len(results['Subjects']) for res in results['Subjects']: line = [res[field] for field in variant_fields] data.append(line) variant_params['start'] = variant_params['start'] + variant_params['limit'] variant_info = data for line in variant_info: variant = '{0}-{1}-{2}-{3}'.format(line[10], line[4], line[6], line[11]) if variant in variant_gene_map[line[0]]: line[0] = patient_id_map[line[0]] line[4] = re.sub('\\.0$', '', line[4]) variant_file.write('{0}\n'.format('\t'.join(line))) data = [] for patient in udp_internal_ids: phenotype_params['values'] = patient result_count = 1 phenotype_params['start'] = 0 phenotype_params['limit'] = 100 while result_count > 0: LOG.debug('processing %s lines starting at %s for patient %s', phenotype_params['limit'], phenotype_params['start'], patient) req = requests.get(self.UDP_SERVER, params=phenotype_params, auth=credentials) results = req.json() result_count = len(results['Subjects']) for res in results['Subjects']: line = [res[field] for field in phenotype_fields] data.append(line) phenotype_params['start'] = phenotype_params['start'] + phenotype_params['limit'] phenotype_info = data for line in phenotype_info: line[0] = patient_id_map[line[0]] pheno_file.write('{0}\n'.format('\t'.join(line))) variant_file.close() pheno_file.close() return
dipper
positive
def run(self): """Running the Recipe.""" (f_opt, f_sam, f_pos) = self.recipe_trace.finished if not f_opt: <DeepExtract> step = self.recipe_trace._s_optimize result = self.recipe_trace._r_optimize recipe_trace = self.recipe_trace if step.has_surrogate: if isinstance(self._density, DensityLite): raise RuntimeError('self.density should be a Density, instead of DensityLite, for surrogate modeling.') self._density.surrogate_list = step._surrogate_list if step.fitted: if step.x_0 is None: x_0 = np.zeros(self.density.input_size) else: x_0 = step.x_0.copy() var_dicts = None else: if step.x_0 is None: dim = self.density.input_size x_0 = multivariate_normal(np.zeros(dim), np.eye(dim), step.n_eval) elif step.n_eval > 0: if step.x_0.shape[0] < step.n_eval: raise RuntimeError('I need {} points to fit the surrogate model, but you only gave me enough {} points in x_0.'.format(step.n_eval, step.x_0.shape[0])) x_0 = step.x_0[:step.n_eval].copy() else: x_0 = step.x_0.copy() self.density.use_surrogate = False self.density.original_space = True with self.parallel_backend: var_dicts = self.parallel_backend.map(self.density.fun, x_0) self.density.fit(var_dicts) self._opt_surro(x_0, var_dicts) _a = result[-1].f_max _pq = _a.logp_trans - _a.logq_trans print(' OptimizeStep proceeding: iter #0 finished, while current logp = {:.3f}, logp_trans = {:.3f}, delta_pq = {:.3f}.'.format(_a.logp, _a.logp_trans, _pq)) for i in range(1, step.max_iter): if step.n_eval <= 0: raise RuntimeError('alpha_n should be positive if max_iter is larger than 1.') x_0 = result[-1].laplace_samples if x_0.shape[0] < step.n_eval: raise RuntimeError('I need {} points to fit the surrogate model, but I can only get {} points from the previous iteration.'.format(step.n_eval, x_0.shape[0])) x_0 = x_0[:step.n_eval].copy() self.density.use_surrogate = False self.density.original_space = True with self.parallel_backend: var_dicts = self.parallel_backend.map(self.density.fun, x_0) self.density.fit(var_dicts) self._opt_surro(x_0, var_dicts) _a = result[-1].f_max _b = result[-2].f_max _pp = _a.logp_trans - _b.logp_trans _pq = _a.logp_trans - _a.logq_trans print(' OptimizeStep proceeding: iter #{} finished, while current logp = {:.3f}, logp_trans = {:.3f}, delta_pp = {:.3f}, delta_pq = {:.3f}.'.format(i, _a.logp, _a.logp_trans, _pp, _pq)) if i == step.max_iter - 1: warnings.warn('Optimization did not converge within the max number of iterations.', RuntimeWarning) if abs(_pp) < step._eps_pp and abs(_pq) < step._eps_pq: break logp_trans_all = np.asarray([r.f_max.logp_trans for r in result]) is_max = np.where(logp_trans_all == np.max(logp_trans_all))[0] if is_max.size == 1: i_max = is_max[0] else: logq_trans_all = np.asarray([r.f_max.logq_trans for r in result]) diff_all = np.abs(logp_trans_all - logq_trans_all) i_max = is_max[np.argmin(diff_all[is_max])] result.append(result[i_max]) print(' OptimizeStep proceeding: we will use iter #{} as it has the highest logp_trans.\n'.format(i_max)) else: if step.x_0 is None: dim = self.density.input_size if dim is None: raise RuntimeError('Neither OptimizeStep.x_0 nor Density/DensityLite.input_size is defined.') x_0 = np.zeros(dim) else: x_0 = self.density.from_original(step.x_0[0]) _logp = lambda x: self.density.logp(x, original_space=False) try: _grad_0 = self.density.grad(x_0, original_space=False) assert np.all(np.isfinite(_grad_0)) _grad = lambda x: self.density.grad(x, original_space=False) except Exception: _grad = None laplace_result = step.laplace.run(logp=_logp, x_0=x_0, grad=_grad) x_trans = laplace_result.x_max x = self.density.to_original(x_trans) x_max = PointDoublet(x, x_trans) logp_trans = laplace_result.f_max logp = self.density.to_original_density(density=logp_trans, x=x_max) f_max = DensityQuartet(float(logp), None, float(logp_trans), None) laplace_samples = self.density.to_original(laplace_result.samples) result.append(OptimizeResult(x_max=x_max, f_max=f_max, surrogate_list=(), var_dicts=None, laplace_samples=laplace_samples, laplace_result=laplace_result, samples=None, sample_trace=None)) if step.has_surrogate and step.run_sampling: self._opt_sample() recipe_trace._i_optimize = 1 print('\n ***** OptimizeStep finished. ***** \n') </DeepExtract> if not f_sam: <DeepExtract> steps = self.recipe_trace._s_sample results = self.recipe_trace._r_sample recipe_trace = self.recipe_trace i = recipe_trace._i_sample this_step = recipe_trace._strategy.update(results) while this_step is not None: sample_trace = this_step.sample_trace get_prev_step = not (i == 0 and (not recipe_trace._i_optimize)) get_prev_samples = get_prev_step or this_step.x_0 is not None if get_prev_step: if i == 0: prev_result = recipe_trace._r_optimize[-1] prev_step = recipe_trace._s_optimize else: prev_result = results[i - 1] prev_step = steps[i - 1] get_prev_density = get_prev_step and this_step.x_0 is None and (prev_step.sample_trace is not None) if get_prev_samples: if this_step.x_0 is None: if prev_result.samples is None: prev_samples = Laplace.untemper_laplace_samples(prev_result.laplace_result) prev_transformed = True else: prev_samples = prev_result.samples prev_transformed = False else: prev_samples = this_step.x_0 prev_transformed = False if get_prev_density: prev_density = prev_result.sample_trace.get(return_type='logp', flatten=True) if isinstance(sample_trace, _HTrace): if sample_trace.x_0 is None and get_prev_samples: sample_trace.x_0 = prev_samples sample_trace._x_0_transformed = prev_transformed if get_prev_step: if sample_trace._step_size is None: if this_step.reuse_step_size and prev_result.sample_trace is not None: sample_trace._step_size = _get_step_size(prev_result.sample_trace) if sample_trace._metric == 'diag' or sample_trace._metric == 'full': if this_step.reuse_metric and prev_result.sample_trace is not None: sample_trace._metric = _get_metric(prev_result.sample_trace, sample_trace._metric) if this_step.has_surrogate: if not isinstance(self._density, Density): raise RuntimeError('self.density should be a Density for surrogate modeling.') self._density.surrogate_list = this_step._surrogate_list if this_step._fitted: var_dicts = None else: if not get_prev_samples: raise RuntimeError('You did not give me samples to fit the surrogate model.') if this_step.n_eval > 0 and prev_samples.shape[0] < this_step.n_eval: raise RuntimeError('I need {} points to fit the surrogate model, but I can find at most {} points.'.format(this_step.n_eval, prev_samples.shape[0])) if i > 0 and (not prev_step.has_surrogate): warnings.warn('you are doing surrogate modeling after sampling the true density. Please make sure this is what you want.', RuntimeWarning) if get_prev_density: i_resample = this_step.resampler(prev_density, this_step.n_eval) elif this_step.n_eval > 0: i_resample = np.arange(this_step.n_eval) else: i_resample = np.arange(prev_samples.shape[0]) x_fit = prev_samples[i_resample] self.density.use_surrogate = False self.density.original_space = True with self.parallel_backend: var_dicts = np.asarray(self.parallel_backend.map(self.density.fun, x_fit)) var_dicts_fit = var_dicts.copy() if this_step.reuse_samples: for j in range(i): if j + this_step.reuse_samples >= i or this_step.reuse_samples < 0: var_dicts_fit = np.concatenate((var_dicts_fit, results[j].var_dicts)) if this_step.logp_cutoff and get_prev_density: logp_fit = np.concatenate([vd.fun[self.density.density_name] for vd in var_dicts_fit]) logq_fit = prev_density[i_resample] logq_min = np.min(logq_fit) np.delete(prev_samples, i_resample, axis=0) np.delete(prev_density, i_resample, axis=0) is_good = logp_fit > logq_min n_good = np.sum(is_good) f_good = n_good / logp_fit.size if f_good < 0.5: warnings.warn('more than half of the samples are abandoned because their logp < logq_min.', RuntimeWarning) if f_good == 0.0: raise RuntimeError('f_good is 0, indicating that the samples seem very bad. Please check your recipe setup. You may also want to try logp_cutoff=False for the SampleStep.') var_dicts_fit = var_dicts_fit[is_good] while len(var_dicts_fit) < this_step.n_eval_min: n_eval_supp = (this_step.n_eval_min - len(var_dicts_fit)) / f_good * this_step.alpha_supp n_eval_supp = max(int(n_eval_supp), 4) if prev_samples.shape[0] < n_eval_supp: raise RuntimeError('I do not have enough supplementary points.') i_resample = this_step.resampler(prev_density, n_eval_supp) x_fit = prev_samples[i_resample] self.density.use_surrogate = False self.density.original_space = True with self.parallel_backend: var_dicts_supp = np.asarray(self.parallel_backend.map(self.density.fun, x_fit)) logp_supp = np.concatenate([vd.fun[self.density.density_name] for vd in var_dicts_supp]) np.delete(prev_samples, i_resample, axis=0) np.delete(prev_density, i_resample, axis=0) is_good = logp_supp > logq_min n_good = np.sum(is_good) if n_good < logp_supp.size / 2: warnings.warn('more than half of the samples are abandoned because their logp < logq_min.', RuntimeWarning) var_dicts = np.concatenate((var_dicts, var_dicts_supp)) var_dicts_fit = np.concatenate((var_dicts_fit, var_dicts_supp[is_good])) self.density.fit(var_dicts_fit) self.density.use_surrogate = True t = sample(self.density, sample_trace=sample_trace, parallel_backend=self.parallel_backend) x = t.get(flatten=True) surrogate_list = deepcopy(self._density._surrogate_list) results.append(SampleResult(samples=x, surrogate_list=surrogate_list, var_dicts=var_dicts, sample_trace=t)) else: if isinstance(self._density, Density): self.density.use_surrogate = False t = sample(self.density, sample_trace=sample_trace, parallel_backend=self.parallel_backend) x = t.get(flatten=True) results.append(SampleResult(samples=x, surrogate_list=(), var_dicts=None, sample_trace=t)) steps.append(this_step) print('\n *** SampleStep proceeding: iter #{} finished. *** \n'.format(i)) recipe_trace._i_sample += 1 i = recipe_trace._i_sample this_step = recipe_trace._strategy.update(results) print('\n ***** SampleStep finished. ***** \n') </DeepExtract> if not f_pos: <DeepExtract> step = self.recipe_trace._s_post recipe_trace = self.recipe_trace x_p = None x_q = None f_logp = None f_logq = None logp_p = None logq_q = None x_max = None f_max = None samples = None weights = None weights_trunc = None logp = None logq = None trace_p = None trace_q = None logz = None logz_err = None if recipe_trace._i_optimize: opt_result = recipe_trace._r_optimize[-1] x_max = opt_result.x_max f_max = opt_result.f_max if recipe_trace._i_sample: prev_step = recipe_trace._s_sample[-1] prev_result = recipe_trace._r_sample[-1] if prev_step.has_surrogate: trace_q = prev_result.sample_trace x_q = trace_q.get(return_type='samples', flatten=False) logq_q = trace_q.get(return_type='logp', flatten=False) self.density._surrogate_list = prev_step.surrogate_list else: trace_p = prev_result.sample_trace x_p = trace_p.get(return_type='samples', flatten=False) logp_p = trace_p.get(return_type='logp', flatten=False) elif recipe_trace._i_optimize: prev_step = recipe_trace._s_optimize prev_result = recipe_trace._r_optimize[-1] if prev_step.has_surrogate and prev_result.sample_trace is not None: trace_q = prev_result.sample_trace x_q = trace_q.get(return_type='samples', flatten=False) logq_q = trace_q.get(return_type='logp', flatten=False) self.density._surrogate_list = prev_step.surrogate_list else: warnings.warn('no existing samples found.', RuntimeWarning) else: raise RuntimeError('you have run neither OptimizeStep nor SampleStep before the PostStep.') if x_p is not None: samples = x_p.reshape((-1, x_p.shape[-1])) weights = np.ones(samples.shape[0]) weights_trunc = weights logp = logp_p.reshape(-1) if step.evidence_method is not None: (logz, logz_err) = step.evidence_method(x_p=trace_p, logp=self._f_logp, logp_p=logp_p) if step.n_is > 0: warnings.warn('n_is will not be used when we already have exact samples from logp.', RuntimeWarning) elif x_q is not None: samples = x_q.reshape((-1, x_q.shape[-1])) logq = logq_q.reshape(-1) if step.n_is != 0: if step.n_is < 0 or step.n_is > samples.shape[0]: if step.n_is > 0: warnings.warn('you set n_is as {}, but I can only get {} samples from the previous step, so I will use all these samples to do IS for now.'.format(step.n_is, samples.shape[0]), RuntimeWarning) n_is = samples.shape[0] else: n_is = step.n_is foo = int(samples.shape[0] / n_is) samples = samples[::foo][:n_is] logq = logq[::foo][:n_is] self.density.use_surrogate = False self.density.original_space = True with self.parallel_backend: logp = np.asarray(self.parallel_backend.map(self.density.logp, samples)).reshape(-1) weights = np.exp(logp - logq) if step.k_trunc < 0: weights_trunc = weights.copy() else: weights_trunc = np.clip(weights, 0, np.mean(weights) * n_is ** step.k_trunc) if step.evidence_method is not None: (logz_q, logz_err_q) = step.evidence_method(x_p=trace_q, logp=self._f_logq, logp_p=logq_q) logz_pq = logsumexp(logp - logq, b=1 / logp.size) foo = np.exp(logp - logq - logz_pq) tau = float(integrated_time(foo)) logz_err_pq = (np.var(foo) / np.mean(foo) ** 2 / logp.size * tau) ** 0.5 logz = logz_q + logz_pq logz_err = (logz_err_q ** 2 + logz_err_pq ** 2) ** 0.5 else: weights = np.ones(samples.shape[0]) weights_trunc = weights if step.evidence_method is not None: warnings.warn('since n_is is 0, we are computing the evidence of logq, which may differ from the evidence of logp.', RuntimeWarning) (logz, logz_err) = step.evidence_method(x_p=trace_q, logp=self._f_logq, logp_p=logq_q) elif step.n_is is not None or step.evidence_method is not None: warnings.warn('n_is and evidence_method will not be used when we only have Laplace samples.', RuntimeWarning) try: n_call = recipe_trace.n_call + step.n_is warnings.warn('as of now, n_call does not take the possible logp calls during evidence evaluation into account.', RuntimeWarning) except Exception: n_call = None recipe_trace._r_post = PostResult(samples, weights, weights_trunc, logp, logq, logz, logz_err, x_p, x_q, logp_p, logq_q, trace_p, trace_q, n_call, x_max, f_max) recipe_trace._i_post = 1 print('\n ***** PostStep finished. ***** \n') </DeepExtract>
def run(self): """Running the Recipe.""" (f_opt, f_sam, f_pos) = self.recipe_trace.finished if not f_opt: step = self.recipe_trace._s_optimize result = self.recipe_trace._r_optimize recipe_trace = self.recipe_trace if step.has_surrogate: if isinstance(self._density, DensityLite): raise RuntimeError('self.density should be a Density, instead of DensityLite, for surrogate modeling.') self._density.surrogate_list = step._surrogate_list if step.fitted: if step.x_0 is None: x_0 = np.zeros(self.density.input_size) else: x_0 = step.x_0.copy() var_dicts = None else: if step.x_0 is None: dim = self.density.input_size x_0 = multivariate_normal(np.zeros(dim), np.eye(dim), step.n_eval) elif step.n_eval > 0: if step.x_0.shape[0] < step.n_eval: raise RuntimeError('I need {} points to fit the surrogate model, but you only gave me enough {} points in x_0.'.format(step.n_eval, step.x_0.shape[0])) x_0 = step.x_0[:step.n_eval].copy() else: x_0 = step.x_0.copy() self.density.use_surrogate = False self.density.original_space = True with self.parallel_backend: var_dicts = self.parallel_backend.map(self.density.fun, x_0) self.density.fit(var_dicts) self._opt_surro(x_0, var_dicts) _a = result[-1].f_max _pq = _a.logp_trans - _a.logq_trans print(' OptimizeStep proceeding: iter #0 finished, while current logp = {:.3f}, logp_trans = {:.3f}, delta_pq = {:.3f}.'.format(_a.logp, _a.logp_trans, _pq)) for i in range(1, step.max_iter): if step.n_eval <= 0: raise RuntimeError('alpha_n should be positive if max_iter is larger than 1.') x_0 = result[-1].laplace_samples if x_0.shape[0] < step.n_eval: raise RuntimeError('I need {} points to fit the surrogate model, but I can only get {} points from the previous iteration.'.format(step.n_eval, x_0.shape[0])) x_0 = x_0[:step.n_eval].copy() self.density.use_surrogate = False self.density.original_space = True with self.parallel_backend: var_dicts = self.parallel_backend.map(self.density.fun, x_0) self.density.fit(var_dicts) self._opt_surro(x_0, var_dicts) _a = result[-1].f_max _b = result[-2].f_max _pp = _a.logp_trans - _b.logp_trans _pq = _a.logp_trans - _a.logq_trans print(' OptimizeStep proceeding: iter #{} finished, while current logp = {:.3f}, logp_trans = {:.3f}, delta_pp = {:.3f}, delta_pq = {:.3f}.'.format(i, _a.logp, _a.logp_trans, _pp, _pq)) if i == step.max_iter - 1: warnings.warn('Optimization did not converge within the max number of iterations.', RuntimeWarning) if abs(_pp) < step._eps_pp and abs(_pq) < step._eps_pq: break logp_trans_all = np.asarray([r.f_max.logp_trans for r in result]) is_max = np.where(logp_trans_all == np.max(logp_trans_all))[0] if is_max.size == 1: i_max = is_max[0] else: logq_trans_all = np.asarray([r.f_max.logq_trans for r in result]) diff_all = np.abs(logp_trans_all - logq_trans_all) i_max = is_max[np.argmin(diff_all[is_max])] result.append(result[i_max]) print(' OptimizeStep proceeding: we will use iter #{} as it has the highest logp_trans.\n'.format(i_max)) else: if step.x_0 is None: dim = self.density.input_size if dim is None: raise RuntimeError('Neither OptimizeStep.x_0 nor Density/DensityLite.input_size is defined.') x_0 = np.zeros(dim) else: x_0 = self.density.from_original(step.x_0[0]) _logp = lambda x: self.density.logp(x, original_space=False) try: _grad_0 = self.density.grad(x_0, original_space=False) assert np.all(np.isfinite(_grad_0)) _grad = lambda x: self.density.grad(x, original_space=False) except Exception: _grad = None laplace_result = step.laplace.run(logp=_logp, x_0=x_0, grad=_grad) x_trans = laplace_result.x_max x = self.density.to_original(x_trans) x_max = PointDoublet(x, x_trans) logp_trans = laplace_result.f_max logp = self.density.to_original_density(density=logp_trans, x=x_max) f_max = DensityQuartet(float(logp), None, float(logp_trans), None) laplace_samples = self.density.to_original(laplace_result.samples) result.append(OptimizeResult(x_max=x_max, f_max=f_max, surrogate_list=(), var_dicts=None, laplace_samples=laplace_samples, laplace_result=laplace_result, samples=None, sample_trace=None)) if step.has_surrogate and step.run_sampling: self._opt_sample() recipe_trace._i_optimize = 1 print('\n ***** OptimizeStep finished. ***** \n') if not f_sam: steps = self.recipe_trace._s_sample results = self.recipe_trace._r_sample recipe_trace = self.recipe_trace i = recipe_trace._i_sample this_step = recipe_trace._strategy.update(results) while this_step is not None: sample_trace = this_step.sample_trace get_prev_step = not (i == 0 and (not recipe_trace._i_optimize)) get_prev_samples = get_prev_step or this_step.x_0 is not None if get_prev_step: if i == 0: prev_result = recipe_trace._r_optimize[-1] prev_step = recipe_trace._s_optimize else: prev_result = results[i - 1] prev_step = steps[i - 1] get_prev_density = get_prev_step and this_step.x_0 is None and (prev_step.sample_trace is not None) if get_prev_samples: if this_step.x_0 is None: if prev_result.samples is None: prev_samples = Laplace.untemper_laplace_samples(prev_result.laplace_result) prev_transformed = True else: prev_samples = prev_result.samples prev_transformed = False else: prev_samples = this_step.x_0 prev_transformed = False if get_prev_density: prev_density = prev_result.sample_trace.get(return_type='logp', flatten=True) if isinstance(sample_trace, _HTrace): if sample_trace.x_0 is None and get_prev_samples: sample_trace.x_0 = prev_samples sample_trace._x_0_transformed = prev_transformed if get_prev_step: if sample_trace._step_size is None: if this_step.reuse_step_size and prev_result.sample_trace is not None: sample_trace._step_size = _get_step_size(prev_result.sample_trace) if sample_trace._metric == 'diag' or sample_trace._metric == 'full': if this_step.reuse_metric and prev_result.sample_trace is not None: sample_trace._metric = _get_metric(prev_result.sample_trace, sample_trace._metric) if this_step.has_surrogate: if not isinstance(self._density, Density): raise RuntimeError('self.density should be a Density for surrogate modeling.') self._density.surrogate_list = this_step._surrogate_list if this_step._fitted: var_dicts = None else: if not get_prev_samples: raise RuntimeError('You did not give me samples to fit the surrogate model.') if this_step.n_eval > 0 and prev_samples.shape[0] < this_step.n_eval: raise RuntimeError('I need {} points to fit the surrogate model, but I can find at most {} points.'.format(this_step.n_eval, prev_samples.shape[0])) if i > 0 and (not prev_step.has_surrogate): warnings.warn('you are doing surrogate modeling after sampling the true density. Please make sure this is what you want.', RuntimeWarning) if get_prev_density: i_resample = this_step.resampler(prev_density, this_step.n_eval) elif this_step.n_eval > 0: i_resample = np.arange(this_step.n_eval) else: i_resample = np.arange(prev_samples.shape[0]) x_fit = prev_samples[i_resample] self.density.use_surrogate = False self.density.original_space = True with self.parallel_backend: var_dicts = np.asarray(self.parallel_backend.map(self.density.fun, x_fit)) var_dicts_fit = var_dicts.copy() if this_step.reuse_samples: for j in range(i): if j + this_step.reuse_samples >= i or this_step.reuse_samples < 0: var_dicts_fit = np.concatenate((var_dicts_fit, results[j].var_dicts)) if this_step.logp_cutoff and get_prev_density: logp_fit = np.concatenate([vd.fun[self.density.density_name] for vd in var_dicts_fit]) logq_fit = prev_density[i_resample] logq_min = np.min(logq_fit) np.delete(prev_samples, i_resample, axis=0) np.delete(prev_density, i_resample, axis=0) is_good = logp_fit > logq_min n_good = np.sum(is_good) f_good = n_good / logp_fit.size if f_good < 0.5: warnings.warn('more than half of the samples are abandoned because their logp < logq_min.', RuntimeWarning) if f_good == 0.0: raise RuntimeError('f_good is 0, indicating that the samples seem very bad. Please check your recipe setup. You may also want to try logp_cutoff=False for the SampleStep.') var_dicts_fit = var_dicts_fit[is_good] while len(var_dicts_fit) < this_step.n_eval_min: n_eval_supp = (this_step.n_eval_min - len(var_dicts_fit)) / f_good * this_step.alpha_supp n_eval_supp = max(int(n_eval_supp), 4) if prev_samples.shape[0] < n_eval_supp: raise RuntimeError('I do not have enough supplementary points.') i_resample = this_step.resampler(prev_density, n_eval_supp) x_fit = prev_samples[i_resample] self.density.use_surrogate = False self.density.original_space = True with self.parallel_backend: var_dicts_supp = np.asarray(self.parallel_backend.map(self.density.fun, x_fit)) logp_supp = np.concatenate([vd.fun[self.density.density_name] for vd in var_dicts_supp]) np.delete(prev_samples, i_resample, axis=0) np.delete(prev_density, i_resample, axis=0) is_good = logp_supp > logq_min n_good = np.sum(is_good) if n_good < logp_supp.size / 2: warnings.warn('more than half of the samples are abandoned because their logp < logq_min.', RuntimeWarning) var_dicts = np.concatenate((var_dicts, var_dicts_supp)) var_dicts_fit = np.concatenate((var_dicts_fit, var_dicts_supp[is_good])) self.density.fit(var_dicts_fit) self.density.use_surrogate = True t = sample(self.density, sample_trace=sample_trace, parallel_backend=self.parallel_backend) x = t.get(flatten=True) surrogate_list = deepcopy(self._density._surrogate_list) results.append(SampleResult(samples=x, surrogate_list=surrogate_list, var_dicts=var_dicts, sample_trace=t)) else: if isinstance(self._density, Density): self.density.use_surrogate = False t = sample(self.density, sample_trace=sample_trace, parallel_backend=self.parallel_backend) x = t.get(flatten=True) results.append(SampleResult(samples=x, surrogate_list=(), var_dicts=None, sample_trace=t)) steps.append(this_step) print('\n *** SampleStep proceeding: iter #{} finished. *** \n'.format(i)) recipe_trace._i_sample += 1 i = recipe_trace._i_sample this_step = recipe_trace._strategy.update(results) print('\n ***** SampleStep finished. ***** \n') if not f_pos: step = self.recipe_trace._s_post recipe_trace = self.recipe_trace x_p = None x_q = None f_logp = None f_logq = None logp_p = None logq_q = None x_max = None f_max = None samples = None weights = None weights_trunc = None logp = None logq = None trace_p = None trace_q = None logz = None logz_err = None if recipe_trace._i_optimize: opt_result = recipe_trace._r_optimize[-1] x_max = opt_result.x_max f_max = opt_result.f_max if recipe_trace._i_sample: prev_step = recipe_trace._s_sample[-1] prev_result = recipe_trace._r_sample[-1] if prev_step.has_surrogate: trace_q = prev_result.sample_trace x_q = trace_q.get(return_type='samples', flatten=False) logq_q = trace_q.get(return_type='logp', flatten=False) self.density._surrogate_list = prev_step.surrogate_list else: trace_p = prev_result.sample_trace x_p = trace_p.get(return_type='samples', flatten=False) logp_p = trace_p.get(return_type='logp', flatten=False) elif recipe_trace._i_optimize: prev_step = recipe_trace._s_optimize prev_result = recipe_trace._r_optimize[-1] if prev_step.has_surrogate and prev_result.sample_trace is not None: trace_q = prev_result.sample_trace x_q = trace_q.get(return_type='samples', flatten=False) logq_q = trace_q.get(return_type='logp', flatten=False) self.density._surrogate_list = prev_step.surrogate_list else: warnings.warn('no existing samples found.', RuntimeWarning) else: raise RuntimeError('you have run neither OptimizeStep nor SampleStep before the PostStep.') if x_p is not None: samples = x_p.reshape((-1, x_p.shape[-1])) weights = np.ones(samples.shape[0]) weights_trunc = weights logp = logp_p.reshape(-1) if step.evidence_method is not None: (logz, logz_err) = step.evidence_method(x_p=trace_p, logp=self._f_logp, logp_p=logp_p) if step.n_is > 0: warnings.warn('n_is will not be used when we already have exact samples from logp.', RuntimeWarning) elif x_q is not None: samples = x_q.reshape((-1, x_q.shape[-1])) logq = logq_q.reshape(-1) if step.n_is != 0: if step.n_is < 0 or step.n_is > samples.shape[0]: if step.n_is > 0: warnings.warn('you set n_is as {}, but I can only get {} samples from the previous step, so I will use all these samples to do IS for now.'.format(step.n_is, samples.shape[0]), RuntimeWarning) n_is = samples.shape[0] else: n_is = step.n_is foo = int(samples.shape[0] / n_is) samples = samples[::foo][:n_is] logq = logq[::foo][:n_is] self.density.use_surrogate = False self.density.original_space = True with self.parallel_backend: logp = np.asarray(self.parallel_backend.map(self.density.logp, samples)).reshape(-1) weights = np.exp(logp - logq) if step.k_trunc < 0: weights_trunc = weights.copy() else: weights_trunc = np.clip(weights, 0, np.mean(weights) * n_is ** step.k_trunc) if step.evidence_method is not None: (logz_q, logz_err_q) = step.evidence_method(x_p=trace_q, logp=self._f_logq, logp_p=logq_q) logz_pq = logsumexp(logp - logq, b=1 / logp.size) foo = np.exp(logp - logq - logz_pq) tau = float(integrated_time(foo)) logz_err_pq = (np.var(foo) / np.mean(foo) ** 2 / logp.size * tau) ** 0.5 logz = logz_q + logz_pq logz_err = (logz_err_q ** 2 + logz_err_pq ** 2) ** 0.5 else: weights = np.ones(samples.shape[0]) weights_trunc = weights if step.evidence_method is not None: warnings.warn('since n_is is 0, we are computing the evidence of logq, which may differ from the evidence of logp.', RuntimeWarning) (logz, logz_err) = step.evidence_method(x_p=trace_q, logp=self._f_logq, logp_p=logq_q) elif step.n_is is not None or step.evidence_method is not None: warnings.warn('n_is and evidence_method will not be used when we only have Laplace samples.', RuntimeWarning) try: n_call = recipe_trace.n_call + step.n_is warnings.warn('as of now, n_call does not take the possible logp calls during evidence evaluation into account.', RuntimeWarning) except Exception: n_call = None recipe_trace._r_post = PostResult(samples, weights, weights_trunc, logp, logq, logz, logz_err, x_p, x_q, logp_p, logq_q, trace_p, trace_q, n_call, x_max, f_max) recipe_trace._i_post = 1 print('\n ***** PostStep finished. ***** \n') </DeepExtract>
bayesfast
positive
def getInventory(p_org): <DeepExtract> global LAST_MERAKI_REQUEST if (datetime.datetime.now() - LAST_MERAKI_REQUEST).total_seconds() < API_EXEC_DELAY: time.sleep(API_EXEC_DELAY) LAST_MERAKI_REQUEST = datetime.datetime.now() return </DeepExtract> try: r = requests.get('https://%s/api/v0/organizations/%s/inventory' % (p_org.shard, p_org.id), headers={'X-Cisco-Meraki-API-Key': ARG_APIKEY, 'Content-Type': 'application/json'}, timeout=(REQUESTS_CONNECT_TIMEOUT, REQUESTS_READ_TIMEOUT)) except: print('ERROR 06: Unable to contact Meraki cloud') return None if r.status_code != requests.codes.ok: return None return r.json()
def getInventory(p_org): global LAST_MERAKI_REQUEST if (datetime.datetime.now() - LAST_MERAKI_REQUEST).total_seconds() < API_EXEC_DELAY: time.sleep(API_EXEC_DELAY) LAST_MERAKI_REQUEST = datetime.datetime.now() return try: r = requests.get('https://%s/api/v0/organizations/%s/inventory' % (p_org.shard, p_org.id), headers={'X-Cisco-Meraki-API-Key': ARG_APIKEY, 'Content-Type': 'application/json'}, timeout=(REQUESTS_CONNECT_TIMEOUT, REQUESTS_READ_TIMEOUT)) except: print('ERROR 06: Unable to contact Meraki cloud') return None if r.status_code != requests.codes.ok: return None return r.json()
automation-scripts
positive
def __init__(self, with_comments=False, yield_comments=False): self.lexer = None self.prev_token = None self.valid_prev_token = None self.cur_token = None self.cur_token_real = None self.next_tokens = [] self.token_stack = [[None, []]] self.newline_idx = [0] self.error_token_handlers = [broken_string_token_handler] self.with_comments = with_comments self.yield_comments = yield_comments self.hidden_tokens = [] <DeepExtract> self.lexer = ply.lex.lex(object=self, **kwargs) </DeepExtract> if not with_comments: self.token = self._token
def __init__(self, with_comments=False, yield_comments=False): self.lexer = None self.prev_token = None self.valid_prev_token = None self.cur_token = None self.cur_token_real = None self.next_tokens = [] self.token_stack = [[None, []]] self.newline_idx = [0] self.error_token_handlers = [broken_string_token_handler] self.with_comments = with_comments self.yield_comments = yield_comments self.hidden_tokens = [] self.lexer = ply.lex.lex(object=self, **kwargs) if not with_comments: self.token = self._token
calmjs.parse
positive
def run(): with open(schema_yaml) as f: schema = yaml.safe_load(f) with open(values_yaml) as f: values = yaml.safe_load(f) <DeepExtract> items = [] for (k, v) in reduce_schema(schema).items(): new_key = parent_key + sep + k if parent_key else k if isinstance(v, MutableMapping): if v: items.extend(flatten(v, parent_key=new_key, sep=sep)) else: items.append(new_key) else: items.append(new_key) if not parent_key: schema = set(items) else: schema = items </DeepExtract> <DeepExtract> items = [] for (k, v) in values.items(): new_key = parent_key + sep + k if parent_key else k if isinstance(v, MutableMapping): if v: items.extend(flatten(v, parent_key=new_key, sep=sep)) else: items.append(new_key) else: items.append(new_key) if not parent_key: values = set(items) else: values = items </DeepExtract> print('The keys from values.yaml minus those from values.schema.yaml:\n', '\n'.join(sorted(values - schema)), '\n\n', sep='\n') print('The keys from values.schema.yaml minus those from values.yaml:\n', '\n'.join(sorted(schema - values)), '\n\n', sep='\n')
def run(): with open(schema_yaml) as f: schema = yaml.safe_load(f) with open(values_yaml) as f: values = yaml.safe_load(f) items = [] for (k, v) in reduce_schema(schema).items(): new_key = parent_key + sep + k if parent_key else k if isinstance(v, MutableMapping): if v: items.extend(flatten(v, parent_key=new_key, sep=sep)) else: items.append(new_key) else: items.append(new_key) if not parent_key: schema = set(items) else: schema = items items = [] for (k, v) in values.items(): new_key = parent_key + sep + k if parent_key else k if isinstance(v, MutableMapping): if v: items.extend(flatten(v, parent_key=new_key, sep=sep)) else: items.append(new_key) else: items.append(new_key) if not parent_key: values = set(items) else: values = items print('The keys from values.yaml minus those from values.schema.yaml:\n', '\n'.join(sorted(values - schema)), '\n\n', sep='\n') print('The keys from values.schema.yaml minus those from values.yaml:\n', '\n'.join(sorted(schema - values)), '\n\n', sep='\n')
dask-gateway
positive
def crawl_helper(self, url_id: str, pub_date: datetime.date) -> CrawlerResult: page_url = 'http://www.pondus.no/?section=artikkel&id=%s' % url_id <DeepExtract> if page_url not in self.pages: self.pages[page_url] = LxmlParser(page_url, headers=self.headers) page = self.pages[page_url] </DeepExtract> url = page.src('.imagegallery img') return CrawlerImage(url)
def crawl_helper(self, url_id: str, pub_date: datetime.date) -> CrawlerResult: page_url = 'http://www.pondus.no/?section=artikkel&id=%s' % url_id if page_url not in self.pages: self.pages[page_url] = LxmlParser(page_url, headers=self.headers) page = self.pages[page_url] url = page.src('.imagegallery img') return CrawlerImage(url)
comics
positive
def _select_instance_data(self, instance, tagname, config): def get_tags(inst): return {tag['Key']: tag['Value'] for tag in inst['Tags']} if 'Tags' in inst else {} <DeepExtract> tags = {tag['Key']: tag['Value'] for tag in instance['Tags']} if 'Tags' in instance else {} </DeepExtract> name = tags.get('Name', '') instance_id = instance['InstanceId'] state = instance['State']['Code'] & 255 is_running = self.EC2_STATE_RUNNING == state is_terminated = state == Ec2Service.EC2_STATE_TERMINATED schedule_name = tags.get(tagname) maintenance_window_schedule = None schedule = config.schedules.get(schedule_name, None) if schedule is not None: if schedule.use_maintenance_window and schedule.ssm_maintenance_window not in [None, '']: maintenance_window_schedule = self._ssm_maintenance_windows.get(schedule.ssm_maintenance_window, None) if maintenance_window_schedule is None: self._logger.error(ERR_MAINT_WINDOW_NOT_FOUND_OR_DISABLED, schedule.ssm_maintenance_window, schedule.name) self._ssm_maintenance_windows[schedule.ssm_maintenance_window] = 'NOT-FOUND' if maintenance_window_schedule == 'NOT-FOUND': maintenance_window_schedule = None instance_data = {schedulers.INST_ID: instance_id, schedulers.INST_SCHEDULE: schedule_name, schedulers.INST_HIBERNATE: schedule_name in self.schedules_with_hibernation, schedulers.INST_NAME: name, schedulers.INST_STATE: state, schedulers.INST_STATE_NAME: instance['State']['Name'], schedulers.INST_ALLOW_RESIZE: self.allow_resize, schedulers.INST_RESIZED: False, schedulers.INST_IS_RUNNING: is_running, schedulers.INST_IS_TERMINATED: is_terminated, schedulers.INST_CURRENT_STATE: InstanceSchedule.STATE_RUNNING if is_running else InstanceSchedule.STATE_STOPPED, schedulers.INST_INSTANCE_TYPE: instance['InstanceType'], schedulers.INST_TAGS: tags, schedulers.INST_MAINTENANCE_WINDOW: maintenance_window_schedule} return instance_data
def _select_instance_data(self, instance, tagname, config): def get_tags(inst): return {tag['Key']: tag['Value'] for tag in inst['Tags']} if 'Tags' in inst else {} tags = {tag['Key']: tag['Value'] for tag in instance['Tags']} if 'Tags' in instance else {} name = tags.get('Name', '') instance_id = instance['InstanceId'] state = instance['State']['Code'] & 255 is_running = self.EC2_STATE_RUNNING == state is_terminated = state == Ec2Service.EC2_STATE_TERMINATED schedule_name = tags.get(tagname) maintenance_window_schedule = None schedule = config.schedules.get(schedule_name, None) if schedule is not None: if schedule.use_maintenance_window and schedule.ssm_maintenance_window not in [None, '']: maintenance_window_schedule = self._ssm_maintenance_windows.get(schedule.ssm_maintenance_window, None) if maintenance_window_schedule is None: self._logger.error(ERR_MAINT_WINDOW_NOT_FOUND_OR_DISABLED, schedule.ssm_maintenance_window, schedule.name) self._ssm_maintenance_windows[schedule.ssm_maintenance_window] = 'NOT-FOUND' if maintenance_window_schedule == 'NOT-FOUND': maintenance_window_schedule = None instance_data = {schedulers.INST_ID: instance_id, schedulers.INST_SCHEDULE: schedule_name, schedulers.INST_HIBERNATE: schedule_name in self.schedules_with_hibernation, schedulers.INST_NAME: name, schedulers.INST_STATE: state, schedulers.INST_STATE_NAME: instance['State']['Name'], schedulers.INST_ALLOW_RESIZE: self.allow_resize, schedulers.INST_RESIZED: False, schedulers.INST_IS_RUNNING: is_running, schedulers.INST_IS_TERMINATED: is_terminated, schedulers.INST_CURRENT_STATE: InstanceSchedule.STATE_RUNNING if is_running else InstanceSchedule.STATE_STOPPED, schedulers.INST_INSTANCE_TYPE: instance['InstanceType'], schedulers.INST_TAGS: tags, schedulers.INST_MAINTENANCE_WINDOW: maintenance_window_schedule} return instance_data
aws-instance-scheduler
positive
def exec_module(self, **kwargs): nsg = None for key in list(self.module_arg_spec.keys()) + ['tags']: setattr(self, key, kwargs[key]) if self.module._name == 'azure_rm_virtualmachine_scaleset': self.module.deprecate("The 'azure_rm_virtualmachine_scaleset' module has been renamed to 'azure_rm_virtualmachinescaleset'", version='2.12') self.remove_on_absent = set([resource.lower() for resource in self.remove_on_absent]) self.zones = [int(i) for i in self.zones] if self.zones else None if not self.virtual_network_resource_group: self.virtual_network_resource_group = self.resource_group changed = False results = dict() vmss = None disable_ssh_password = None vmss_dict = None virtual_network = None subnet = None image_reference = None custom_image = False load_balancer_backend_address_pools = None load_balancer_inbound_nat_pools = None load_balancer = None application_gateway = None application_gateway_backend_address_pools = None support_lb_change = True resource_group = self.get_resource_group(self.resource_group) if not self.location: self.location = resource_group.location if self.custom_data: self.custom_data = to_native(base64.b64encode(to_bytes(self.custom_data))) if self.state == 'present': if self.vm_size and (not self.vm_size_is_valid()): self.fail('Parameter error: vm_size {0} is not valid for your subscription and location.'.format(self.vm_size)) if self.ssh_public_keys: msg = 'Parameter error: expecting ssh_public_keys to be a list of type dict where each dict contains keys: path, key_data.' for key in self.ssh_public_keys: if not isinstance(key, dict): self.fail(msg) if not key.get('path') or not key.get('key_data'): self.fail(msg) if self.image and isinstance(self.image, dict): if all((key in self.image for key in ('publisher', 'offer', 'sku', 'version'))): <DeepExtract> try: versions = self.compute_client.virtual_machine_images.list(self.location, self.image['publisher'], self.image['offer'], self.image['sku']) except CloudError as exc: self.fail('Error fetching image {0} {1} {2} - {3}'.format(self.image['publisher'], self.image['offer'], self.image['sku'], str(exc))) if versions and len(versions) > 0: if self.image['version'] == 'latest': marketplace_image = versions[len(versions) - 1] for version in versions: if version.name == self.image['version']: marketplace_image = version self.fail('Error could not find image {0} {1} {2} {3}'.format(self.image['publisher'], self.image['offer'], self.image['sku'], self.image['version'])) </DeepExtract> if self.image['version'] == 'latest': self.image['version'] = marketplace_image.name self.log('Using image version {0}'.format(self.image['version'])) image_reference = self.compute_models.ImageReference(publisher=self.image['publisher'], offer=self.image['offer'], sku=self.image['sku'], version=self.image['version']) elif self.image.get('name'): custom_image = True <DeepExtract> try: if self.image.get('resource_group'): vm_images = self.compute_client.images.list_by_resource_group(self.image.get('resource_group')) else: vm_images = self.compute_client.images.list() except Exception as exc: self.fail('Error fetching custom images from subscription - {0}'.format(str(exc))) for vm_image in vm_images: if vm_image.name == self.image.get('name'): self.log('Using custom image id {0}'.format(vm_image.id)) image_reference = self.compute_models.ImageReference(id=vm_image.id) self.fail('Error could not find image with name {0}'.format(self.image.get('name'))) </DeepExtract> elif self.image.get('id'): try: image_reference = self.compute_models.ImageReference(id=self.image['id']) except Exception as exc: self.fail('id Error: Cannot get image from the reference id - {0}'.format(self.image['id'])) else: self.fail('parameter error: expecting image to contain [publisher, offer, sku, version], [name, resource_group] or [id]') elif self.image and isinstance(self.image, str): custom_image = True <DeepExtract> try: if resource_group: vm_images = self.compute_client.images.list_by_resource_group(resource_group) else: vm_images = self.compute_client.images.list() except Exception as exc: self.fail('Error fetching custom images from subscription - {0}'.format(str(exc))) for vm_image in vm_images: if vm_image.name == self.image: self.log('Using custom image id {0}'.format(vm_image.id)) image_reference = self.compute_models.ImageReference(id=vm_image.id) self.fail('Error could not find image with name {0}'.format(self.image)) </DeepExtract> elif self.image: self.fail('parameter error: expecting image to be a string or dict not {0}'.format(type(self.image).__name__)) disable_ssh_password = not self.ssh_password_enabled if self.load_balancer: <DeepExtract> id_dict = parse_resource_id(self.load_balancer) try: load_balancer = self.network_client.load_balancers.get(id_dict.get('resource_group', self.resource_group), id_dict.get('name')) except CloudError as exc: self.fail('Error fetching load balancer {0} - {1}'.format(self.load_balancer, str(exc))) </DeepExtract> load_balancer_backend_address_pools = [self.compute_models.SubResource(id=resource.id) for resource in load_balancer.backend_address_pools] if load_balancer.backend_address_pools else None load_balancer_inbound_nat_pools = [self.compute_models.SubResource(id=resource.id) for resource in load_balancer.inbound_nat_pools] if load_balancer.inbound_nat_pools else None if self.application_gateway: <DeepExtract> id_dict = parse_resource_id(self.application_gateway) try: application_gateway = self.network_client.application_gateways.get(id_dict.get('resource_group', self.resource_group), id_dict.get('name')) except CloudError as exc: self.fail('Error fetching application_gateway {0} - {1}'.format(self.application_gateway, str(exc))) </DeepExtract> application_gateway_backend_address_pools = [self.compute_models.SubResource(id=resource.id) for resource in application_gateway.backend_address_pools] if application_gateway.backend_address_pools else None try: self.log('Fetching virtual machine scale set {0}'.format(self.name)) vmss = self.compute_client.virtual_machine_scale_sets.get(self.resource_group, self.name) self.check_provisioning_state(vmss, self.state) <DeepExtract> result = self.serialize_obj(vmss, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES) result['id'] = vmss.id result['name'] = vmss.name result['type'] = vmss.type result['location'] = vmss.location result['tags'] = vmss.tags vmss_dict = result </DeepExtract> if self.state == 'present': differences = [] results = vmss_dict if self.os_disk_caching and self.os_disk_caching != vmss_dict['properties']['virtualMachineProfile']['storageProfile']['osDisk']['caching']: self.log('CHANGED: virtual machine scale set {0} - OS disk caching'.format(self.name)) differences.append('OS Disk caching') changed = True vmss_dict['properties']['virtualMachineProfile']['storageProfile']['osDisk']['caching'] = self.os_disk_caching if self.capacity and self.capacity != vmss_dict['sku']['capacity']: self.log('CHANGED: virtual machine scale set {0} - Capacity'.format(self.name)) differences.append('Capacity') changed = True vmss_dict['sku']['capacity'] = self.capacity if self.data_disks and len(self.data_disks) != len(vmss_dict['properties']['virtualMachineProfile']['storageProfile'].get('dataDisks', [])): self.log('CHANGED: virtual machine scale set {0} - Data Disks'.format(self.name)) differences.append('Data Disks') changed = True if self.upgrade_policy and self.upgrade_policy != vmss_dict['properties']['upgradePolicy']['mode']: self.log('CHANGED: virtual machine scale set {0} - Upgrade Policy'.format(self.name)) differences.append('Upgrade Policy') changed = True vmss_dict['properties']['upgradePolicy']['mode'] = self.upgrade_policy if image_reference and image_reference.as_dict() != vmss_dict['properties']['virtualMachineProfile']['storageProfile']['imageReference']: self.log('CHANGED: virtual machine scale set {0} - Image'.format(self.name)) differences.append('Image') changed = True vmss_dict['properties']['virtualMachineProfile']['storageProfile']['imageReference'] = image_reference.as_dict() (update_tags, vmss_dict['tags']) = self.update_tags(vmss_dict.get('tags', dict())) if update_tags: differences.append('Tags') changed = True if bool(self.overprovision) != bool(vmss_dict['properties']['overprovision']): differences.append('overprovision') changed = True if bool(self.single_placement_group) != bool(vmss_dict['properties']['singlePlacementGroup']): differences.append('single_placement_group') changed = True vmss_dict['zones'] = [int(i) for i in vmss_dict['zones']] if 'zones' in vmss_dict and vmss_dict['zones'] else None if self.zones != vmss_dict['zones']: self.log('CHANGED: virtual machine scale sets {0} zones'.format(self.name)) differences.append('Zones') changed = True vmss_dict['zones'] = self.zones nicConfigs = vmss_dict['properties']['virtualMachineProfile']['networkProfile']['networkInterfaceConfigurations'] backend_address_pool = nicConfigs[0]['properties']['ipConfigurations'][0]['properties'].get('loadBalancerBackendAddressPools', []) backend_address_pool += nicConfigs[0]['properties']['ipConfigurations'][0]['properties'].get('applicationGatewayBackendAddressPools', []) lb_or_ag_id = None if len(nicConfigs) != 1 or len(backend_address_pool) != 1: support_lb_change = False self.module.warn('Updating more than one load balancer on VMSS is currently not supported') else: if load_balancer: lb_or_ag_id = '{0}/'.format(load_balancer.id) elif application_gateway: lb_or_ag_id = '{0}/'.format(application_gateway.id) backend_address_pool_id = backend_address_pool[0].get('id') if bool(lb_or_ag_id) != bool(backend_address_pool_id) or not backend_address_pool_id.startswith(lb_or_ag_id): differences.append('load_balancer') changed = True if self.custom_data: if self.custom_data != vmss_dict['properties']['virtualMachineProfile']['osProfile'].get('customData'): differences.append('custom_data') changed = True vmss_dict['properties']['virtualMachineProfile']['osProfile']['customData'] = self.custom_data self.differences = differences elif self.state == 'absent': self.log("CHANGED: virtual machine scale set {0} exists and requested state is 'absent'".format(self.name)) results = dict() changed = True except CloudError: self.log('Virtual machine scale set {0} does not exist'.format(self.name)) if self.state == 'present': self.log("CHANGED: virtual machine scale set {0} does not exist but state is 'present'.".format(self.name)) changed = True self.results['changed'] = changed self.results['ansible_facts']['azure_vmss'] = results if self.check_mode: return self.results if changed: if self.state == 'present': if not vmss: self.log('Create virtual machine scale set {0}'.format(self.name)) self.results['actions'].append('Created VMSS {0}'.format(self.name)) if self.os_type == 'Linux': if disable_ssh_password and (not self.ssh_public_keys): self.fail('Parameter error: ssh_public_keys required when disabling SSH password.') if not self.virtual_network_name: default_vnet = self.create_default_vnet() virtual_network = default_vnet.id self.virtual_network_name = default_vnet.name if self.subnet_name: <DeepExtract> self.log('Fetching subnet {0} in virtual network {1}'.format(self.subnet_name, self.virtual_network_name)) try: subnet = self.network_client.subnets.get(self.virtual_network_resource_group, self.virtual_network_name, self.subnet_name) except CloudError as exc: self.fail('Error: fetching subnet {0} in virtual network {1} - {2}'.format(self.subnet_name, self.virtual_network_name, str(exc))) subnet = subnet </DeepExtract> if not self.short_hostname: self.short_hostname = self.name if not image_reference: self.fail('Parameter error: an image is required when creating a virtual machine.') managed_disk = self.compute_models.VirtualMachineScaleSetManagedDiskParameters(storage_account_type=self.managed_disk_type) if self.security_group: <DeepExtract> nsg = self.security_group resource_group = self.resource_group if isinstance(self.security_group, dict): nsg = self.security_group.get('name') resource_group = self.security_group.get('resource_group', self.resource_group) id = format_resource_id(val=nsg, subscription_id=self.subscription_id, namespace='Microsoft.Network', types='networkSecurityGroups', resource_group=resource_group) name = azure_id_to_dict(id).get('name') nsg = dict(id=id, name=name) </DeepExtract> if nsg: self.security_group = self.network_models.NetworkSecurityGroup(id=nsg.get('id')) os_profile = None if self.admin_username or self.custom_data or self.ssh_public_keys: os_profile = self.compute_models.VirtualMachineScaleSetOSProfile(admin_username=self.admin_username, computer_name_prefix=self.short_hostname, custom_data=self.custom_data) vmss_resource = self.compute_models.VirtualMachineScaleSet(location=self.location, overprovision=self.overprovision, single_placement_group=self.single_placement_group, tags=self.tags, upgrade_policy=self.compute_models.UpgradePolicy(mode=self.upgrade_policy), sku=self.compute_models.Sku(name=self.vm_size, capacity=self.capacity, tier=self.tier), virtual_machine_profile=self.compute_models.VirtualMachineScaleSetVMProfile(os_profile=os_profile, storage_profile=self.compute_models.VirtualMachineScaleSetStorageProfile(os_disk=self.compute_models.VirtualMachineScaleSetOSDisk(managed_disk=managed_disk, create_option=self.compute_models.DiskCreateOptionTypes.from_image, caching=self.os_disk_caching), image_reference=image_reference), network_profile=self.compute_models.VirtualMachineScaleSetNetworkProfile(network_interface_configurations=[self.compute_models.VirtualMachineScaleSetNetworkConfiguration(name=self.name, primary=True, ip_configurations=[self.compute_models.VirtualMachineScaleSetIPConfiguration(name='default', subnet=self.compute_models.ApiEntityReference(id=subnet.id), primary=True, load_balancer_backend_address_pools=load_balancer_backend_address_pools, load_balancer_inbound_nat_pools=load_balancer_inbound_nat_pools, application_gateway_backend_address_pools=application_gateway_backend_address_pools)], enable_accelerated_networking=self.enable_accelerated_networking, network_security_group=self.security_group)])), zones=self.zones) if self.admin_password: vmss_resource.virtual_machine_profile.os_profile.admin_password = self.admin_password if self.os_type == 'Linux' and os_profile: vmss_resource.virtual_machine_profile.os_profile.linux_configuration = self.compute_models.LinuxConfiguration(disable_password_authentication=disable_ssh_password) if self.ssh_public_keys: ssh_config = self.compute_models.SshConfiguration() ssh_config.public_keys = [self.compute_models.SshPublicKey(path=key['path'], key_data=key['key_data']) for key in self.ssh_public_keys] vmss_resource.virtual_machine_profile.os_profile.linux_configuration.ssh = ssh_config if self.data_disks: data_disks = [] for data_disk in self.data_disks: data_disk_managed_disk = self.compute_models.VirtualMachineScaleSetManagedDiskParameters(storage_account_type=data_disk.get('managed_disk_type', None)) data_disk['caching'] = data_disk.get('caching', self.compute_models.CachingTypes.read_only) data_disks.append(self.compute_models.VirtualMachineScaleSetDataDisk(lun=data_disk.get('lun', None), caching=data_disk.get('caching', None), create_option=self.compute_models.DiskCreateOptionTypes.empty, disk_size_gb=data_disk.get('disk_size_gb', None), managed_disk=data_disk_managed_disk)) vmss_resource.virtual_machine_profile.storage_profile.data_disks = data_disks self.log('Create virtual machine with parameters:') <DeepExtract> try: poller = self.compute_client.virtual_machine_scale_sets.create_or_update(self.resource_group, self.name, vmss_resource) self.get_poller_result(poller) except CloudError as exc: self.fail('Error creating or updating virtual machine {0} - {1}'.format(self.name, str(exc))) </DeepExtract> elif self.differences and len(self.differences) > 0: self.log('Update virtual machine scale set {0}'.format(self.name)) self.results['actions'].append('Updated VMSS {0}'.format(self.name)) <DeepExtract> try: vmss = self.compute_client.virtual_machine_scale_sets.get(self.resource_group, self.name) vmss_resource = vmss except CloudError as exc: self.fail('Error getting virtual machine scale set {0} - {1}'.format(self.name, str(exc))) </DeepExtract> vmss_resource.virtual_machine_profile.storage_profile.os_disk.caching = self.os_disk_caching vmss_resource.sku.capacity = self.capacity vmss_resource.overprovision = self.overprovision vmss_resource.single_placement_group = self.single_placement_group if support_lb_change: if self.load_balancer: vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0].ip_configurations[0].load_balancer_backend_address_pools = load_balancer_backend_address_pools vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0].ip_configurations[0].load_balancer_inbound_nat_pools = load_balancer_inbound_nat_pools vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0].ip_configurations[0].application_gateway_backend_address_pools = None elif self.application_gateway: vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0].ip_configurations[0].application_gateway_backend_address_pools = application_gateway_backend_address_pools vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0].ip_configurations[0].load_balancer_backend_address_pools = None vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0].ip_configurations[0].load_balancer_inbound_nat_pools = None if self.data_disks is not None: data_disks = [] for data_disk in self.data_disks: data_disks.append(self.compute_models.VirtualMachineScaleSetDataDisk(lun=data_disk['lun'], caching=data_disk['caching'], create_option=self.compute_models.DiskCreateOptionTypes.empty, disk_size_gb=data_disk['disk_size_gb'], managed_disk=self.compute_models.VirtualMachineScaleSetManagedDiskParameters(storage_account_type=data_disk['managed_disk_type']))) vmss_resource.virtual_machine_profile.storage_profile.data_disks = data_disks if image_reference is not None: vmss_resource.virtual_machine_profile.storage_profile.image_reference = image_reference self.log('Update virtual machine with parameters:') <DeepExtract> try: poller = self.compute_client.virtual_machine_scale_sets.create_or_update(self.resource_group, self.name, vmss_resource) self.get_poller_result(poller) except CloudError as exc: self.fail('Error creating or updating virtual machine {0} - {1}'.format(self.name, str(exc))) </DeepExtract> <DeepExtract> result = self.serialize_obj(self.get_vmss(), AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES) result['id'] = self.get_vmss().id result['name'] = self.get_vmss().name result['type'] = self.get_vmss().type result['location'] = self.get_vmss().location result['tags'] = self.get_vmss().tags self.results['ansible_facts']['azure_vmss'] = result </DeepExtract> elif self.state == 'absent': self.log('Delete virtual machine scale set {0}'.format(self.name)) self.results['ansible_facts']['azure_vmss'] = None <DeepExtract> self.log('Deleting virtual machine scale set {0}'.format(self.name)) self.results['actions'].append('Deleted virtual machine scale set {0}'.format(self.name)) try: poller = self.compute_client.virtual_machine_scale_sets.delete(self.resource_group, self.name) self.get_poller_result(poller) except CloudError as exc: self.fail('Error deleting virtual machine scale set {0} - {1}'.format(self.name, str(exc))) return True </DeepExtract> del self.results['actions'] return self.results
def exec_module(self, **kwargs): nsg = None for key in list(self.module_arg_spec.keys()) + ['tags']: setattr(self, key, kwargs[key]) if self.module._name == 'azure_rm_virtualmachine_scaleset': self.module.deprecate("The 'azure_rm_virtualmachine_scaleset' module has been renamed to 'azure_rm_virtualmachinescaleset'", version='2.12') self.remove_on_absent = set([resource.lower() for resource in self.remove_on_absent]) self.zones = [int(i) for i in self.zones] if self.zones else None if not self.virtual_network_resource_group: self.virtual_network_resource_group = self.resource_group changed = False results = dict() vmss = None disable_ssh_password = None vmss_dict = None virtual_network = None subnet = None image_reference = None custom_image = False load_balancer_backend_address_pools = None load_balancer_inbound_nat_pools = None load_balancer = None application_gateway = None application_gateway_backend_address_pools = None support_lb_change = True resource_group = self.get_resource_group(self.resource_group) if not self.location: self.location = resource_group.location if self.custom_data: self.custom_data = to_native(base64.b64encode(to_bytes(self.custom_data))) if self.state == 'present': if self.vm_size and (not self.vm_size_is_valid()): self.fail('Parameter error: vm_size {0} is not valid for your subscription and location.'.format(self.vm_size)) if self.ssh_public_keys: msg = 'Parameter error: expecting ssh_public_keys to be a list of type dict where each dict contains keys: path, key_data.' for key in self.ssh_public_keys: if not isinstance(key, dict): self.fail(msg) if not key.get('path') or not key.get('key_data'): self.fail(msg) if self.image and isinstance(self.image, dict): if all((key in self.image for key in ('publisher', 'offer', 'sku', 'version'))): try: versions = self.compute_client.virtual_machine_images.list(self.location, self.image['publisher'], self.image['offer'], self.image['sku']) except CloudError as exc: self.fail('Error fetching image {0} {1} {2} - {3}'.format(self.image['publisher'], self.image['offer'], self.image['sku'], str(exc))) if versions and len(versions) > 0: if self.image['version'] == 'latest': marketplace_image = versions[len(versions) - 1] for version in versions: if version.name == self.image['version']: marketplace_image = version self.fail('Error could not find image {0} {1} {2} {3}'.format(self.image['publisher'], self.image['offer'], self.image['sku'], self.image['version'])) if self.image['version'] == 'latest': self.image['version'] = marketplace_image.name self.log('Using image version {0}'.format(self.image['version'])) image_reference = self.compute_models.ImageReference(publisher=self.image['publisher'], offer=self.image['offer'], sku=self.image['sku'], version=self.image['version']) elif self.image.get('name'): custom_image = True try: if self.image.get('resource_group'): vm_images = self.compute_client.images.list_by_resource_group(self.image.get('resource_group')) else: vm_images = self.compute_client.images.list() except Exception as exc: self.fail('Error fetching custom images from subscription - {0}'.format(str(exc))) for vm_image in vm_images: if vm_image.name == self.image.get('name'): self.log('Using custom image id {0}'.format(vm_image.id)) image_reference = self.compute_models.ImageReference(id=vm_image.id) self.fail('Error could not find image with name {0}'.format(self.image.get('name'))) elif self.image.get('id'): try: image_reference = self.compute_models.ImageReference(id=self.image['id']) except Exception as exc: self.fail('id Error: Cannot get image from the reference id - {0}'.format(self.image['id'])) else: self.fail('parameter error: expecting image to contain [publisher, offer, sku, version], [name, resource_group] or [id]') elif self.image and isinstance(self.image, str): custom_image = True try: if resource_group: vm_images = self.compute_client.images.list_by_resource_group(resource_group) else: vm_images = self.compute_client.images.list() except Exception as exc: self.fail('Error fetching custom images from subscription - {0}'.format(str(exc))) for vm_image in vm_images: if vm_image.name == self.image: self.log('Using custom image id {0}'.format(vm_image.id)) image_reference = self.compute_models.ImageReference(id=vm_image.id) self.fail('Error could not find image with name {0}'.format(self.image)) elif self.image: self.fail('parameter error: expecting image to be a string or dict not {0}'.format(type(self.image).__name__)) disable_ssh_password = not self.ssh_password_enabled if self.load_balancer: id_dict = parse_resource_id(self.load_balancer) try: load_balancer = self.network_client.load_balancers.get(id_dict.get('resource_group', self.resource_group), id_dict.get('name')) except CloudError as exc: self.fail('Error fetching load balancer {0} - {1}'.format(self.load_balancer, str(exc))) load_balancer_backend_address_pools = [self.compute_models.SubResource(id=resource.id) for resource in load_balancer.backend_address_pools] if load_balancer.backend_address_pools else None load_balancer_inbound_nat_pools = [self.compute_models.SubResource(id=resource.id) for resource in load_balancer.inbound_nat_pools] if load_balancer.inbound_nat_pools else None if self.application_gateway: id_dict = parse_resource_id(self.application_gateway) try: application_gateway = self.network_client.application_gateways.get(id_dict.get('resource_group', self.resource_group), id_dict.get('name')) except CloudError as exc: self.fail('Error fetching application_gateway {0} - {1}'.format(self.application_gateway, str(exc))) application_gateway_backend_address_pools = [self.compute_models.SubResource(id=resource.id) for resource in application_gateway.backend_address_pools] if application_gateway.backend_address_pools else None try: self.log('Fetching virtual machine scale set {0}'.format(self.name)) vmss = self.compute_client.virtual_machine_scale_sets.get(self.resource_group, self.name) self.check_provisioning_state(vmss, self.state) result = self.serialize_obj(vmss, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES) result['id'] = vmss.id result['name'] = vmss.name result['type'] = vmss.type result['location'] = vmss.location result['tags'] = vmss.tags vmss_dict = result if self.state == 'present': differences = [] results = vmss_dict if self.os_disk_caching and self.os_disk_caching != vmss_dict['properties']['virtualMachineProfile']['storageProfile']['osDisk']['caching']: self.log('CHANGED: virtual machine scale set {0} - OS disk caching'.format(self.name)) differences.append('OS Disk caching') changed = True vmss_dict['properties']['virtualMachineProfile']['storageProfile']['osDisk']['caching'] = self.os_disk_caching if self.capacity and self.capacity != vmss_dict['sku']['capacity']: self.log('CHANGED: virtual machine scale set {0} - Capacity'.format(self.name)) differences.append('Capacity') changed = True vmss_dict['sku']['capacity'] = self.capacity if self.data_disks and len(self.data_disks) != len(vmss_dict['properties']['virtualMachineProfile']['storageProfile'].get('dataDisks', [])): self.log('CHANGED: virtual machine scale set {0} - Data Disks'.format(self.name)) differences.append('Data Disks') changed = True if self.upgrade_policy and self.upgrade_policy != vmss_dict['properties']['upgradePolicy']['mode']: self.log('CHANGED: virtual machine scale set {0} - Upgrade Policy'.format(self.name)) differences.append('Upgrade Policy') changed = True vmss_dict['properties']['upgradePolicy']['mode'] = self.upgrade_policy if image_reference and image_reference.as_dict() != vmss_dict['properties']['virtualMachineProfile']['storageProfile']['imageReference']: self.log('CHANGED: virtual machine scale set {0} - Image'.format(self.name)) differences.append('Image') changed = True vmss_dict['properties']['virtualMachineProfile']['storageProfile']['imageReference'] = image_reference.as_dict() (update_tags, vmss_dict['tags']) = self.update_tags(vmss_dict.get('tags', dict())) if update_tags: differences.append('Tags') changed = True if bool(self.overprovision) != bool(vmss_dict['properties']['overprovision']): differences.append('overprovision') changed = True if bool(self.single_placement_group) != bool(vmss_dict['properties']['singlePlacementGroup']): differences.append('single_placement_group') changed = True vmss_dict['zones'] = [int(i) for i in vmss_dict['zones']] if 'zones' in vmss_dict and vmss_dict['zones'] else None if self.zones != vmss_dict['zones']: self.log('CHANGED: virtual machine scale sets {0} zones'.format(self.name)) differences.append('Zones') changed = True vmss_dict['zones'] = self.zones nicConfigs = vmss_dict['properties']['virtualMachineProfile']['networkProfile']['networkInterfaceConfigurations'] backend_address_pool = nicConfigs[0]['properties']['ipConfigurations'][0]['properties'].get('loadBalancerBackendAddressPools', []) backend_address_pool += nicConfigs[0]['properties']['ipConfigurations'][0]['properties'].get('applicationGatewayBackendAddressPools', []) lb_or_ag_id = None if len(nicConfigs) != 1 or len(backend_address_pool) != 1: support_lb_change = False self.module.warn('Updating more than one load balancer on VMSS is currently not supported') else: if load_balancer: lb_or_ag_id = '{0}/'.format(load_balancer.id) elif application_gateway: lb_or_ag_id = '{0}/'.format(application_gateway.id) backend_address_pool_id = backend_address_pool[0].get('id') if bool(lb_or_ag_id) != bool(backend_address_pool_id) or not backend_address_pool_id.startswith(lb_or_ag_id): differences.append('load_balancer') changed = True if self.custom_data: if self.custom_data != vmss_dict['properties']['virtualMachineProfile']['osProfile'].get('customData'): differences.append('custom_data') changed = True vmss_dict['properties']['virtualMachineProfile']['osProfile']['customData'] = self.custom_data self.differences = differences elif self.state == 'absent': self.log("CHANGED: virtual machine scale set {0} exists and requested state is 'absent'".format(self.name)) results = dict() changed = True except CloudError: self.log('Virtual machine scale set {0} does not exist'.format(self.name)) if self.state == 'present': self.log("CHANGED: virtual machine scale set {0} does not exist but state is 'present'.".format(self.name)) changed = True self.results['changed'] = changed self.results['ansible_facts']['azure_vmss'] = results if self.check_mode: return self.results if changed: if self.state == 'present': if not vmss: self.log('Create virtual machine scale set {0}'.format(self.name)) self.results['actions'].append('Created VMSS {0}'.format(self.name)) if self.os_type == 'Linux': if disable_ssh_password and (not self.ssh_public_keys): self.fail('Parameter error: ssh_public_keys required when disabling SSH password.') if not self.virtual_network_name: default_vnet = self.create_default_vnet() virtual_network = default_vnet.id self.virtual_network_name = default_vnet.name if self.subnet_name: self.log('Fetching subnet {0} in virtual network {1}'.format(self.subnet_name, self.virtual_network_name)) try: subnet = self.network_client.subnets.get(self.virtual_network_resource_group, self.virtual_network_name, self.subnet_name) except CloudError as exc: self.fail('Error: fetching subnet {0} in virtual network {1} - {2}'.format(self.subnet_name, self.virtual_network_name, str(exc))) subnet = subnet if not self.short_hostname: self.short_hostname = self.name if not image_reference: self.fail('Parameter error: an image is required when creating a virtual machine.') managed_disk = self.compute_models.VirtualMachineScaleSetManagedDiskParameters(storage_account_type=self.managed_disk_type) if self.security_group: nsg = self.security_group resource_group = self.resource_group if isinstance(self.security_group, dict): nsg = self.security_group.get('name') resource_group = self.security_group.get('resource_group', self.resource_group) id = format_resource_id(val=nsg, subscription_id=self.subscription_id, namespace='Microsoft.Network', types='networkSecurityGroups', resource_group=resource_group) name = azure_id_to_dict(id).get('name') nsg = dict(id=id, name=name) if nsg: self.security_group = self.network_models.NetworkSecurityGroup(id=nsg.get('id')) os_profile = None if self.admin_username or self.custom_data or self.ssh_public_keys: os_profile = self.compute_models.VirtualMachineScaleSetOSProfile(admin_username=self.admin_username, computer_name_prefix=self.short_hostname, custom_data=self.custom_data) vmss_resource = self.compute_models.VirtualMachineScaleSet(location=self.location, overprovision=self.overprovision, single_placement_group=self.single_placement_group, tags=self.tags, upgrade_policy=self.compute_models.UpgradePolicy(mode=self.upgrade_policy), sku=self.compute_models.Sku(name=self.vm_size, capacity=self.capacity, tier=self.tier), virtual_machine_profile=self.compute_models.VirtualMachineScaleSetVMProfile(os_profile=os_profile, storage_profile=self.compute_models.VirtualMachineScaleSetStorageProfile(os_disk=self.compute_models.VirtualMachineScaleSetOSDisk(managed_disk=managed_disk, create_option=self.compute_models.DiskCreateOptionTypes.from_image, caching=self.os_disk_caching), image_reference=image_reference), network_profile=self.compute_models.VirtualMachineScaleSetNetworkProfile(network_interface_configurations=[self.compute_models.VirtualMachineScaleSetNetworkConfiguration(name=self.name, primary=True, ip_configurations=[self.compute_models.VirtualMachineScaleSetIPConfiguration(name='default', subnet=self.compute_models.ApiEntityReference(id=subnet.id), primary=True, load_balancer_backend_address_pools=load_balancer_backend_address_pools, load_balancer_inbound_nat_pools=load_balancer_inbound_nat_pools, application_gateway_backend_address_pools=application_gateway_backend_address_pools)], enable_accelerated_networking=self.enable_accelerated_networking, network_security_group=self.security_group)])), zones=self.zones) if self.admin_password: vmss_resource.virtual_machine_profile.os_profile.admin_password = self.admin_password if self.os_type == 'Linux' and os_profile: vmss_resource.virtual_machine_profile.os_profile.linux_configuration = self.compute_models.LinuxConfiguration(disable_password_authentication=disable_ssh_password) if self.ssh_public_keys: ssh_config = self.compute_models.SshConfiguration() ssh_config.public_keys = [self.compute_models.SshPublicKey(path=key['path'], key_data=key['key_data']) for key in self.ssh_public_keys] vmss_resource.virtual_machine_profile.os_profile.linux_configuration.ssh = ssh_config if self.data_disks: data_disks = [] for data_disk in self.data_disks: data_disk_managed_disk = self.compute_models.VirtualMachineScaleSetManagedDiskParameters(storage_account_type=data_disk.get('managed_disk_type', None)) data_disk['caching'] = data_disk.get('caching', self.compute_models.CachingTypes.read_only) data_disks.append(self.compute_models.VirtualMachineScaleSetDataDisk(lun=data_disk.get('lun', None), caching=data_disk.get('caching', None), create_option=self.compute_models.DiskCreateOptionTypes.empty, disk_size_gb=data_disk.get('disk_size_gb', None), managed_disk=data_disk_managed_disk)) vmss_resource.virtual_machine_profile.storage_profile.data_disks = data_disks self.log('Create virtual machine with parameters:') try: poller = self.compute_client.virtual_machine_scale_sets.create_or_update(self.resource_group, self.name, vmss_resource) self.get_poller_result(poller) except CloudError as exc: self.fail('Error creating or updating virtual machine {0} - {1}'.format(self.name, str(exc))) elif self.differences and len(self.differences) > 0: self.log('Update virtual machine scale set {0}'.format(self.name)) self.results['actions'].append('Updated VMSS {0}'.format(self.name)) try: vmss = self.compute_client.virtual_machine_scale_sets.get(self.resource_group, self.name) vmss_resource = vmss except CloudError as exc: self.fail('Error getting virtual machine scale set {0} - {1}'.format(self.name, str(exc))) vmss_resource.virtual_machine_profile.storage_profile.os_disk.caching = self.os_disk_caching vmss_resource.sku.capacity = self.capacity vmss_resource.overprovision = self.overprovision vmss_resource.single_placement_group = self.single_placement_group if support_lb_change: if self.load_balancer: vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0].ip_configurations[0].load_balancer_backend_address_pools = load_balancer_backend_address_pools vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0].ip_configurations[0].load_balancer_inbound_nat_pools = load_balancer_inbound_nat_pools vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0].ip_configurations[0].application_gateway_backend_address_pools = None elif self.application_gateway: vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0].ip_configurations[0].application_gateway_backend_address_pools = application_gateway_backend_address_pools vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0].ip_configurations[0].load_balancer_backend_address_pools = None vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0].ip_configurations[0].load_balancer_inbound_nat_pools = None if self.data_disks is not None: data_disks = [] for data_disk in self.data_disks: data_disks.append(self.compute_models.VirtualMachineScaleSetDataDisk(lun=data_disk['lun'], caching=data_disk['caching'], create_option=self.compute_models.DiskCreateOptionTypes.empty, disk_size_gb=data_disk['disk_size_gb'], managed_disk=self.compute_models.VirtualMachineScaleSetManagedDiskParameters(storage_account_type=data_disk['managed_disk_type']))) vmss_resource.virtual_machine_profile.storage_profile.data_disks = data_disks if image_reference is not None: vmss_resource.virtual_machine_profile.storage_profile.image_reference = image_reference self.log('Update virtual machine with parameters:') try: poller = self.compute_client.virtual_machine_scale_sets.create_or_update(self.resource_group, self.name, vmss_resource) self.get_poller_result(poller) except CloudError as exc: self.fail('Error creating or updating virtual machine {0} - {1}'.format(self.name, str(exc))) result = self.serialize_obj(self.get_vmss(), AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES) result['id'] = self.get_vmss().id result['name'] = self.get_vmss().name result['type'] = self.get_vmss().type result['location'] = self.get_vmss().location result['tags'] = self.get_vmss().tags self.results['ansible_facts']['azure_vmss'] = result elif self.state == 'absent': self.log('Delete virtual machine scale set {0}'.format(self.name)) self.results['ansible_facts']['azure_vmss'] = None self.log('Deleting virtual machine scale set {0}'.format(self.name)) self.results['actions'].append('Deleted virtual machine scale set {0}'.format(self.name)) try: poller = self.compute_client.virtual_machine_scale_sets.delete(self.resource_group, self.name) self.get_poller_result(poller) except CloudError as exc: self.fail('Error deleting virtual machine scale set {0} - {1}'.format(self.name, str(exc))) return True del self.results['actions'] return self.results
AnsibleLabs
positive
def fine_tune_train_and_val(args, recorder): global lowest_val_loss, best_prec1 os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' torch.manual_seed(1) cudnn.benchmark = True timer = Timer() (num_class, data_length, image_tmpl) = ft_data_config(args) (train_transforms, test_transforms, eval_transforms) = ft_augmentation_config(args) (train_data_loader, val_data_loader, _, _, _, _) = ft_data_loader_init(args, data_length, image_tmpl, train_transforms, test_transforms, eval_transforms) model = ft_model_config(args, num_class) recorder.record_message('a', '=' * 100) recorder.record_message('a', '-' * 40 + 'finetune' + '-' * 40) recorder.record_message('a', '=' * 100) (train_criterion, val_criterion, optimizer) = ft_optim_init(args, model) tc = TC(args) print('*' * 70 + 'Step2: fine tune' + '*' * 50) for epoch in range(args.ft_start_epoch, args.ft_epochs): timer.tic() ft_adjust_learning_rate(optimizer, args.ft_lr, epoch, args.ft_lr_steps) <DeepExtract> batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top3 = AverageMeter() if MoCo_init: model.eval() else: model.train() end = time.time() for (i, (input, target, index)) in enumerate(train_data_loader): data_time.update(time.time() - end) target = target.cuda() index = index.cuda() inputs = tc(input) target = torch.autograd.Variable(target) output = model(inputs) loss = train_criterion(output, target) (prec1, prec3) = accuracy(output.data, target, topk=(1, 3)) losses.update(loss.data.item(), input.size(0)) top1.update(prec1.item(), input.size(0)) top3.update(prec3.item(), input.size(0)) optimizer.zero_grad() loss.backward() optimizer.step() batch_time.update(time.time() - end) end = time.time() if i % args.ft_print_freq == 0: message = 'Epoch: [{0}][{1}/{2}], lr: {lr:.5f}\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tData {data_time.val:.3f} ({data_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch, i, len(train_data_loader), batch_time=batch_time, data_time=data_time, loss=losses, lr=optimizer.param_groups[-1]['lr']) print(message) recorder.record_message('a', message) message = 'Finetune Training: Top1:{} Top3:{}'.format(top1.avg, top3.avg) print(message) recorder.record_message('a', message) (train_prec1, train_loss) = (top1.avg, losses.avg) </DeepExtract> recorder.record_ft_train(train_loss / 5.0, train_prec1 / 100.0) if (epoch + 1) % args.ft_eval_freq == 0: <DeepExtract> batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top3 = AverageMeter() model.eval() end = time.time() with torch.no_grad(): for (i, (input, target, index)) in enumerate(val_data_loader): target = target.cuda() inputs = tc(input) target = torch.autograd.Variable(target) output = model(inputs) loss = val_criterion(output, target) (prec1, prec3) = accuracy(output.data, target, topk=(1, 3)) losses.update(loss.data.item(), input.size(0)) top1.update(prec1.item(), input.size(0)) top3.update(prec3.item(), input.size(0)) batch_time.update(time.time() - end) end = time.time() if i % args.ft_print_freq == 0: message = 'Test: [{0}/{1}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\t'.format(i, len(val_data_loader), batch_time=batch_time, loss=losses) print(message) recorder.record_message('a', message) message = 'Finetune Eval: Top1:{} Top3:{}'.format(top1.avg, top3.avg) print(message) recorder.record_message('a', message) (val_prec1, val_loss) = (top1.avg, losses.avg) </DeepExtract> recorder.record_ft_val(val_loss / 5.0, val_prec1 / 100.0) is_best = val_prec1 > best_prec1 best_prec1 = max(val_prec1, best_prec1) checkpoint = {'epoch': epoch + 1, 'arch': 'i3d', 'state_dict': model.state_dict(), 'best_prec1': best_prec1} recorder.save_ft_model(checkpoint, is_best) timer.toc() left_time = timer.average_time * (args.ft_epochs - epoch) message = 'Step2: fine tune best_prec1 is: {} left time is : {} now is : {}'.format(best_prec1, timer.format(left_time), datetime.now()) print(message) recorder.record_message('a', message) return recorder.filename
def fine_tune_train_and_val(args, recorder): global lowest_val_loss, best_prec1 os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' torch.manual_seed(1) cudnn.benchmark = True timer = Timer() (num_class, data_length, image_tmpl) = ft_data_config(args) (train_transforms, test_transforms, eval_transforms) = ft_augmentation_config(args) (train_data_loader, val_data_loader, _, _, _, _) = ft_data_loader_init(args, data_length, image_tmpl, train_transforms, test_transforms, eval_transforms) model = ft_model_config(args, num_class) recorder.record_message('a', '=' * 100) recorder.record_message('a', '-' * 40 + 'finetune' + '-' * 40) recorder.record_message('a', '=' * 100) (train_criterion, val_criterion, optimizer) = ft_optim_init(args, model) tc = TC(args) print('*' * 70 + 'Step2: fine tune' + '*' * 50) for epoch in range(args.ft_start_epoch, args.ft_epochs): timer.tic() ft_adjust_learning_rate(optimizer, args.ft_lr, epoch, args.ft_lr_steps) batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top3 = AverageMeter() if MoCo_init: model.eval() else: model.train() end = time.time() for (i, (input, target, index)) in enumerate(train_data_loader): data_time.update(time.time() - end) target = target.cuda() index = index.cuda() inputs = tc(input) target = torch.autograd.Variable(target) output = model(inputs) loss = train_criterion(output, target) (prec1, prec3) = accuracy(output.data, target, topk=(1, 3)) losses.update(loss.data.item(), input.size(0)) top1.update(prec1.item(), input.size(0)) top3.update(prec3.item(), input.size(0)) optimizer.zero_grad() loss.backward() optimizer.step() batch_time.update(time.time() - end) end = time.time() if i % args.ft_print_freq == 0: message = 'Epoch: [{0}][{1}/{2}], lr: {lr:.5f}\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tData {data_time.val:.3f} ({data_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch, i, len(train_data_loader), batch_time=batch_time, data_time=data_time, loss=losses, lr=optimizer.param_groups[-1]['lr']) print(message) recorder.record_message('a', message) message = 'Finetune Training: Top1:{} Top3:{}'.format(top1.avg, top3.avg) print(message) recorder.record_message('a', message) (train_prec1, train_loss) = (top1.avg, losses.avg) recorder.record_ft_train(train_loss / 5.0, train_prec1 / 100.0) if (epoch + 1) % args.ft_eval_freq == 0: batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top3 = AverageMeter() model.eval() end = time.time() with torch.no_grad(): for (i, (input, target, index)) in enumerate(val_data_loader): target = target.cuda() inputs = tc(input) target = torch.autograd.Variable(target) output = model(inputs) loss = val_criterion(output, target) (prec1, prec3) = accuracy(output.data, target, topk=(1, 3)) losses.update(loss.data.item(), input.size(0)) top1.update(prec1.item(), input.size(0)) top3.update(prec3.item(), input.size(0)) batch_time.update(time.time() - end) end = time.time() if i % args.ft_print_freq == 0: message = 'Test: [{0}/{1}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\t'.format(i, len(val_data_loader), batch_time=batch_time, loss=losses) print(message) recorder.record_message('a', message) message = 'Finetune Eval: Top1:{} Top3:{}'.format(top1.avg, top3.avg) print(message) recorder.record_message('a', message) (val_prec1, val_loss) = (top1.avg, losses.avg) recorder.record_ft_val(val_loss / 5.0, val_prec1 / 100.0) is_best = val_prec1 > best_prec1 best_prec1 = max(val_prec1, best_prec1) checkpoint = {'epoch': epoch + 1, 'arch': 'i3d', 'state_dict': model.state_dict(), 'best_prec1': best_prec1} recorder.save_ft_model(checkpoint, is_best) timer.toc() left_time = timer.average_time * (args.ft_epochs - epoch) message = 'Step2: fine tune best_prec1 is: {} left time is : {} now is : {}'.format(best_prec1, timer.format(left_time), datetime.now()) print(message) recorder.record_message('a', message) return recorder.filename
BE
positive
def register(self, subject, avro_schema): """ POST /subjects/(string: subject)/versions Register a schema with the registry under the given subject and receive a schema id. avro_schema must be a parsed schema from the python avro library Multiple instances of the same schema will result in cache misses. :param str subject: subject name :param schema avro_schema: Avro schema to be registered :returns: schema_id :rtype: int """ schemas_to_id = self.subject_to_schema_ids[subject] schema_id = schemas_to_id.get(avro_schema, None) if schema_id is not None: return schema_id url = '/'.join([self.url, 'subjects', subject, 'versions']) body = {'schema': str(avro_schema)} <DeepExtract> if 'POST' not in VALID_METHODS: raise ClientError('Method {} is invalid; valid methods include {}'.format('POST', VALID_METHODS)) if url.startswith('https') and self._is_key_password_provided: response = self._send_https_session_request(url, 'POST', headers, body) try: (result, code) = (json.loads(response.data), response.status) except ValueError: (result, code) = (response.content, response.status) _headers = {'Accept': ACCEPT_HDR} if body: _headers['Content-Length'] = str(len(body)) _headers['Content-Type'] = 'application/vnd.schemaregistry.v1+json' _headers.update(headers) response = self._session.request('POST', url, headers=_headers, json=body) try: (result, code) = (response.json(), response.status_code) except ValueError: (result, code) = (response.content, response.status_code) </DeepExtract> if code == 401 or code == 403: raise ClientError('Unauthorized access. Error code:' + str(code) + ' message:' + str(result)) elif code == 409: raise ClientError('Incompatible Avro schema:' + str(code) + ' message:' + str(result)) elif code == 422: raise ClientError('Invalid Avro schema:' + str(code) + ' message:' + str(result)) elif not (code >= 200 and code <= 299): raise ClientError('Unable to register schema. Error code:' + str(code) + ' message:' + str(result)) schema_id = result['id'] <DeepExtract> if schema_id in self.id_to_schema: avro_schema = self.id_to_schema[schema_id] else: self.id_to_schema[schema_id] = avro_schema if subject: self._add_to_cache(self.subject_to_schema_ids, subject, avro_schema, schema_id) if version: self._add_to_cache(self.subject_to_schema_versions, subject, avro_schema, version) </DeepExtract> return schema_id
def register(self, subject, avro_schema): """ POST /subjects/(string: subject)/versions Register a schema with the registry under the given subject and receive a schema id. avro_schema must be a parsed schema from the python avro library Multiple instances of the same schema will result in cache misses. :param str subject: subject name :param schema avro_schema: Avro schema to be registered :returns: schema_id :rtype: int """ schemas_to_id = self.subject_to_schema_ids[subject] schema_id = schemas_to_id.get(avro_schema, None) if schema_id is not None: return schema_id url = '/'.join([self.url, 'subjects', subject, 'versions']) body = {'schema': str(avro_schema)} if 'POST' not in VALID_METHODS: raise ClientError('Method {} is invalid; valid methods include {}'.format('POST', VALID_METHODS)) if url.startswith('https') and self._is_key_password_provided: response = self._send_https_session_request(url, 'POST', headers, body) try: (result, code) = (json.loads(response.data), response.status) except ValueError: (result, code) = (response.content, response.status) _headers = {'Accept': ACCEPT_HDR} if body: _headers['Content-Length'] = str(len(body)) _headers['Content-Type'] = 'application/vnd.schemaregistry.v1+json' _headers.update(headers) response = self._session.request('POST', url, headers=_headers, json=body) try: (result, code) = (response.json(), response.status_code) except ValueError: (result, code) = (response.content, response.status_code) if code == 401 or code == 403: raise ClientError('Unauthorized access. Error code:' + str(code) + ' message:' + str(result)) elif code == 409: raise ClientError('Incompatible Avro schema:' + str(code) + ' message:' + str(result)) elif code == 422: raise ClientError('Invalid Avro schema:' + str(code) + ' message:' + str(result)) elif not (code >= 200 and code <= 299): raise ClientError('Unable to register schema. Error code:' + str(code) + ' message:' + str(result)) schema_id = result['id'] if schema_id in self.id_to_schema: avro_schema = self.id_to_schema[schema_id] else: self.id_to_schema[schema_id] = avro_schema if subject: self._add_to_cache(self.subject_to_schema_ids, subject, avro_schema, schema_id) if version: self._add_to_cache(self.subject_to_schema_versions, subject, avro_schema, version) return schema_id
confluent-kafka-python
positive
def constraints_evaluations(self, autopass=True): """Return the evaluation of constraints. The "autopass_constraints" enables to just assume that constraints enforced by the mutation space are verified. """ <DeepExtract> circularized = CircularViewProblem(sequence=3 * self.sequence, constraints=self._circularized_specs(self.constraints, central_specs_only=False) if True else [], objectives=self._circularized_specs(self.objectives, central_specs_only=False) if with_objectives else [], logger=self.logger) </DeepExtract> evals = circularized.constraints_evaluations(autopass=autopass) return self._recentered_evaluations(evals)
def constraints_evaluations(self, autopass=True): """Return the evaluation of constraints. The "autopass_constraints" enables to just assume that constraints enforced by the mutation space are verified. """ circularized = CircularViewProblem(sequence=3 * self.sequence, constraints=self._circularized_specs(self.constraints, central_specs_only=False) if True else [], objectives=self._circularized_specs(self.objectives, central_specs_only=False) if with_objectives else [], logger=self.logger) evals = circularized.constraints_evaluations(autopass=autopass) return self._recentered_evaluations(evals)
DnaChisel
positive
def decode(self, codedict): """decode(code) -> list Given a prefix code (a dict mapping symbols to bitarrays), decode the content of the bitarray and return the list of symbols.""" <DeepExtract> if not isinstance(codedict, dict): raise TypeError('dictionary expected') if len(codedict) == 0: raise ValueError('prefix code empty') for (k, v) in codedict.items(): if not isinstance(v, bitarray): raise TypeError('bitarray expected for dictionary value') if v.length() == 0: raise ValueError('non-empty bitarray expected') </DeepExtract> return self._decode(_mk_tree(codedict))
def decode(self, codedict): """decode(code) -> list Given a prefix code (a dict mapping symbols to bitarrays), decode the content of the bitarray and return the list of symbols.""" if not isinstance(codedict, dict): raise TypeError('dictionary expected') if len(codedict) == 0: raise ValueError('prefix code empty') for (k, v) in codedict.items(): if not isinstance(v, bitarray): raise TypeError('bitarray expected for dictionary value') if v.length() == 0: raise ValueError('non-empty bitarray expected') return self._decode(_mk_tree(codedict))
acestream-openelec
positive
def parse_args(self, args=None, namespace=None): <DeepExtract> if args is None: args = _sys.argv[1:] if namespace is None: namespace = Namespace() for action in self._actions: if action.dest is not SUPPRESS: if not hasattr(namespace, action.dest): if action.default is not SUPPRESS: default = action.default if isinstance(action.default, _basestring): default = self._get_value(action, default) setattr(namespace, action.dest, default) for dest in self._defaults: if not hasattr(namespace, dest): setattr(namespace, dest, self._defaults[dest]) try: (args, argv) = self._parse_known_args(args, namespace) except ArgumentError: err = _sys.exc_info()[1] self.error(str(err)) </DeepExtract> if argv: msg = _('unrecognized arguments: %s') <DeepExtract> self.print_usage(_sys.stderr) self.exit(2, _('%s: error: %s\n') % (self.prog, msg % ' '.join(argv))) </DeepExtract> return args
def parse_args(self, args=None, namespace=None): if args is None: args = _sys.argv[1:] if namespace is None: namespace = Namespace() for action in self._actions: if action.dest is not SUPPRESS: if not hasattr(namespace, action.dest): if action.default is not SUPPRESS: default = action.default if isinstance(action.default, _basestring): default = self._get_value(action, default) setattr(namespace, action.dest, default) for dest in self._defaults: if not hasattr(namespace, dest): setattr(namespace, dest, self._defaults[dest]) try: (args, argv) = self._parse_known_args(args, namespace) except ArgumentError: err = _sys.exc_info()[1] self.error(str(err)) if argv: msg = _('unrecognized arguments: %s') self.print_usage(_sys.stderr) self.exit(2, _('%s: error: %s\n') % (self.prog, msg % ' '.join(argv))) return args
BioNLP-2016
positive
def AddNodeBuffer(currentstate=None, issaved=False): <DeepExtract> global CurrentID CurrentID += 1 self.NodeIndex = CurrentID </DeepExtract> self.UndoBuffers[self.NodeIndex] = UndoBuffer(currentstate, issaved) self.FilePaths[self.NodeIndex] = '' self.FileNames[self.NodeIndex] = '' return self.NodeIndex
def AddNodeBuffer(currentstate=None, issaved=False): global CurrentID CurrentID += 1 self.NodeIndex = CurrentID self.UndoBuffers[self.NodeIndex] = UndoBuffer(currentstate, issaved) self.FilePaths[self.NodeIndex] = '' self.FileNames[self.NodeIndex] = '' return self.NodeIndex
CANFestivino
positive
def __call__(self, expression, target): if isinstance(target, str): target = store(target) try: <DeepExtract> if isinstance(target, str): target = store(target) cached = self.cache.get(expression) if cached is not None: stmts = [ast.Assign(targets=[target], value=cached)] elif isinstance(expression, ast.expr): stmts = [ast.Assign(targets=[target], value=expression)] else: if isinstance(expression, str): expression = Value(expression, True) kind = type(expression).__name__ visitor = getattr(self, 'visit_%s' % kind) stmts = visitor(expression, target) target_id = getattr(target, 'id', target) comment = Comment(' {!r} -> {}'.format(expression, target_id)) stmts.insert(0, comment) stmts = stmts </DeepExtract> except ExpressionError as exc: if self.strict: raise p = pickle.dumps(exc, -1) stmts = template('__exc = loads(p)', loads=self.loads_symbol, p=ast.Str(s=p)) stmts += set_token([ast.Raise(exc=load('__exc'))], exc.token) for stmt in stmts: self.visitor(stmt) return stmts
def __call__(self, expression, target): if isinstance(target, str): target = store(target) try: if isinstance(target, str): target = store(target) cached = self.cache.get(expression) if cached is not None: stmts = [ast.Assign(targets=[target], value=cached)] elif isinstance(expression, ast.expr): stmts = [ast.Assign(targets=[target], value=expression)] else: if isinstance(expression, str): expression = Value(expression, True) kind = type(expression).__name__ visitor = getattr(self, 'visit_%s' % kind) stmts = visitor(expression, target) target_id = getattr(target, 'id', target) comment = Comment(' {!r} -> {}'.format(expression, target_id)) stmts.insert(0, comment) stmts = stmts except ExpressionError as exc: if self.strict: raise p = pickle.dumps(exc, -1) stmts = template('__exc = loads(p)', loads=self.loads_symbol, p=ast.Str(s=p)) stmts += set_token([ast.Raise(exc=load('__exc'))], exc.token) for stmt in stmts: self.visitor(stmt) return stmts
chameleon
positive
def load(dataset=dataset): <DeepExtract> data_dir = '../data/' + dataset root = os.path.join(data_dir, 'rt-polaritydata') saved_path = data_dir if not os.path.exists(saved_path): os.makedirs(saved_path) datas = [] for polarity in ('neg', 'pos'): filename = os.path.join(root, polarity) records = [] with open(filename, encoding='utf-8', errors='replace') as f: for (i, line) in enumerate(f): records.append({'text': clean_str(line).strip(), 'label': 0 if polarity == 'pos' else 1}) datas.append(pd.DataFrame(records)) df = pd.concat(datas) from sklearn.utils import shuffle df = shuffle(df).reset_index() split_index = [True] * int(len(df) * 0.9) + [False] * (len(df) - int(len(df) * 0.9)) train = df[split_index] dev = df[~np.array(split_index)] train_filename = os.path.join(saved_path, 'train.csv') test_filename = os.path.join(saved_path, 'dev.csv') train[['text', 'label']].to_csv(train_filename, encoding='utf-8', sep='\t', index=False, header=None) dev[['text', 'label']].to_csv(test_filename, encoding='utf-8', sep='\t', index=False, header=None) print('processing into formated files over') </DeepExtract> data_dir = '../data/' + dataset datas = [] for data_name in ['train.csv', 'dev.csv']: if data_name == 'train.csv': data_file = os.path.join(data_dir, data_name) data = pd.read_csv(data_file, header=None, sep='\t', names=['question', 'flag'], quoting=3).fillna('WASHINGTON') datas.append(data) if data_name == 'dev.csv': data_file = os.path.join(data_dir, data_name) data = pd.read_csv(data_file, header=None, sep='\t', names=['question', 'flag'], quoting=3).fillna('WASHINGTON') datas.append(data) return tuple(datas)
def load(dataset=dataset): data_dir = '../data/' + dataset root = os.path.join(data_dir, 'rt-polaritydata') saved_path = data_dir if not os.path.exists(saved_path): os.makedirs(saved_path) datas = [] for polarity in ('neg', 'pos'): filename = os.path.join(root, polarity) records = [] with open(filename, encoding='utf-8', errors='replace') as f: for (i, line) in enumerate(f): records.append({'text': clean_str(line).strip(), 'label': 0 if polarity == 'pos' else 1}) datas.append(pd.DataFrame(records)) df = pd.concat(datas) from sklearn.utils import shuffle df = shuffle(df).reset_index() split_index = [True] * int(len(df) * 0.9) + [False] * (len(df) - int(len(df) * 0.9)) train = df[split_index] dev = df[~np.array(split_index)] train_filename = os.path.join(saved_path, 'train.csv') test_filename = os.path.join(saved_path, 'dev.csv') train[['text', 'label']].to_csv(train_filename, encoding='utf-8', sep='\t', index=False, header=None) dev[['text', 'label']].to_csv(test_filename, encoding='utf-8', sep='\t', index=False, header=None) print('processing into formated files over') data_dir = '../data/' + dataset datas = [] for data_name in ['train.csv', 'dev.csv']: if data_name == 'train.csv': data_file = os.path.join(data_dir, data_name) data = pd.read_csv(data_file, header=None, sep='\t', names=['question', 'flag'], quoting=3).fillna('WASHINGTON') datas.append(data) if data_name == 'dev.csv': data_file = os.path.join(data_dir, data_name) data = pd.read_csv(data_file, header=None, sep='\t', names=['question', 'flag'], quoting=3).fillna('WASHINGTON') datas.append(data) return tuple(datas)
complex-order
positive
def ResNet152(include_top=True, weights=None, input_shape=None, pooling=None, classes=1000, weight_decay=0.0001, norm_fn=None, **kwargs): def stack_fn(x): <DeepExtract> x = block1(x, 64, stride=1, avg_down=avg_down, norm_fn=norm_fn, name='conv2' + '_block1', weight_decay=weight_decay) for i in range(2, 3 + 1): x = block1(x, 64, conv_shortcut=False, avg_down=avg_down, norm_fn=norm_fn, name='conv2' + '_block' + str(i), weight_decay=weight_decay) x = x </DeepExtract> <DeepExtract> x = block1(x, 128, stride=stride1, avg_down=avg_down, norm_fn=norm_fn, name='conv3' + '_block1', weight_decay=weight_decay) for i in range(2, 8 + 1): x = block1(x, 128, conv_shortcut=False, avg_down=avg_down, norm_fn=norm_fn, name='conv3' + '_block' + str(i), weight_decay=weight_decay) x = x </DeepExtract> <DeepExtract> x = block1(x, 256, stride=stride1, avg_down=avg_down, norm_fn=norm_fn, name='conv4' + '_block1', weight_decay=weight_decay) for i in range(2, 36 + 1): x = block1(x, 256, conv_shortcut=False, avg_down=avg_down, norm_fn=norm_fn, name='conv4' + '_block' + str(i), weight_decay=weight_decay) x = x </DeepExtract> <DeepExtract> x = block1(x, 512, stride=stride1, avg_down=avg_down, norm_fn=norm_fn, name='conv5' + '_block1', weight_decay=weight_decay) for i in range(2, 3 + 1): x = block1(x, 512, conv_shortcut=False, avg_down=avg_down, norm_fn=norm_fn, name='conv5' + '_block' + str(i), weight_decay=weight_decay) x = x </DeepExtract> return x return ResNet(stack_fn, norm_fn, False, True, 'resnet152', include_top, weights, input_shape, pooling, classes, **kwargs)
def ResNet152(include_top=True, weights=None, input_shape=None, pooling=None, classes=1000, weight_decay=0.0001, norm_fn=None, **kwargs): def stack_fn(x): x = block1(x, 64, stride=1, avg_down=avg_down, norm_fn=norm_fn, name='conv2' + '_block1', weight_decay=weight_decay) for i in range(2, 3 + 1): x = block1(x, 64, conv_shortcut=False, avg_down=avg_down, norm_fn=norm_fn, name='conv2' + '_block' + str(i), weight_decay=weight_decay) x = x x = block1(x, 128, stride=stride1, avg_down=avg_down, norm_fn=norm_fn, name='conv3' + '_block1', weight_decay=weight_decay) for i in range(2, 8 + 1): x = block1(x, 128, conv_shortcut=False, avg_down=avg_down, norm_fn=norm_fn, name='conv3' + '_block' + str(i), weight_decay=weight_decay) x = x x = block1(x, 256, stride=stride1, avg_down=avg_down, norm_fn=norm_fn, name='conv4' + '_block1', weight_decay=weight_decay) for i in range(2, 36 + 1): x = block1(x, 256, conv_shortcut=False, avg_down=avg_down, norm_fn=norm_fn, name='conv4' + '_block' + str(i), weight_decay=weight_decay) x = x x = block1(x, 512, stride=stride1, avg_down=avg_down, norm_fn=norm_fn, name='conv5' + '_block1', weight_decay=weight_decay) for i in range(2, 3 + 1): x = block1(x, 512, conv_shortcut=False, avg_down=avg_down, norm_fn=norm_fn, name='conv5' + '_block' + str(i), weight_decay=weight_decay) x = x return x return ResNet(stack_fn, norm_fn, False, True, 'resnet152', include_top, weights, input_shape, pooling, classes, **kwargs)
deep-learning-models
positive
def test_noising_dataset_without_eos(self): """ Similar to test noising dataset with eos except that we have to set use_append_eos_dataset=True so that we wrap the source dataset in the AppendEosDataset when using it as the target in LanguagePairDataset. """ <DeepExtract> vocab = Dictionary() vocab.add_symbol('he@@') vocab.add_symbol('llo') vocab.add_symbol('how') vocab.add_symbol('are') vocab.add_symbol('y@@') vocab.add_symbol('ou') vocab.add_symbol('n@@') vocab.add_symbol('ew') vocab.add_symbol('or@@') vocab.add_symbol('k') src_tokens = [['he@@', 'llo', 'n@@', 'ew', 'y@@', 'or@@', 'k'], ['how', 'are', 'y@@', 'ou']] src_len = [len(x) for x in src_tokens] if False: src_len = [length + 1 for length in src_len] x = torch.LongTensor(len(src_tokens), max(src_len)).fill_(vocab.pad()) for i in range(len(src_tokens)): for j in range(len(src_tokens[i])): x[i][j] = vocab.index(src_tokens[i][j]) if False: x[i][j + 1] = vocab.eos() x = x.transpose(1, 0) (src_dict, src_tokens, _) = (vocab, x, torch.LongTensor(src_len)) </DeepExtract> src_tokens = torch.t(src_tokens) src_tokens_no_pad = [] for src_sentence in src_tokens: src_tokens_no_pad.append(utils.strip_pad(tensor=src_sentence, pad=src_dict.pad())) <DeepExtract> src_dataset = test_utils.TestDataset(data=src_tokens_no_pad) noising_dataset = noising.NoisingDataset(src_dataset=src_dataset, src_dict=src_dict, seed=1234, max_word_shuffle_distance=3, word_dropout_prob=0.2, word_blanking_prob=0.2, noising_class=noising.UnsupervisedMTNoising) tgt = src_dataset if True: tgt = AppendEosDataset(src_dataset, src_dict.eos()) language_pair_dataset = LanguagePairDataset(src=noising_dataset, tgt=tgt, src_sizes=None, src_dict=src_dict) dataloader = torch.utils.data.DataLoader(dataset=language_pair_dataset, batch_size=2, collate_fn=language_pair_dataset.collater) denoising_batch_result = next(iter(dataloader)) denoising_batch_result = denoising_batch_result </DeepExtract> (eos, pad) = (src_dict.eos(), src_dict.pad()) expected_src = torch.LongTensor([[4, 5, 10, 11, 8, 12, 13], [pad, pad, pad, 6, 8, 9, 7]]) expected_tgt = torch.LongTensor([[4, 5, 10, 11, 8, 12, 13, eos], [6, 7, 8, 9, eos, pad, pad, pad]]) generated_src = denoising_batch_result['net_input']['src_tokens'] tgt_tokens = denoising_batch_result['target'] <DeepExtract> self.assertEqual(expected_src.size(), generated_src.size(), 'size mismatch') self.assertEqual(expected_src.ne(generated_src).long().sum(), 0) </DeepExtract> <DeepExtract> self.assertEqual(expected_tgt.size(), tgt_tokens.size(), 'size mismatch') self.assertEqual(expected_tgt.ne(tgt_tokens).long().sum(), 0) </DeepExtract>
def test_noising_dataset_without_eos(self): """ Similar to test noising dataset with eos except that we have to set use_append_eos_dataset=True so that we wrap the source dataset in the AppendEosDataset when using it as the target in LanguagePairDataset. """ vocab = Dictionary() vocab.add_symbol('he@@') vocab.add_symbol('llo') vocab.add_symbol('how') vocab.add_symbol('are') vocab.add_symbol('y@@') vocab.add_symbol('ou') vocab.add_symbol('n@@') vocab.add_symbol('ew') vocab.add_symbol('or@@') vocab.add_symbol('k') src_tokens = [['he@@', 'llo', 'n@@', 'ew', 'y@@', 'or@@', 'k'], ['how', 'are', 'y@@', 'ou']] src_len = [len(x) for x in src_tokens] if False: src_len = [length + 1 for length in src_len] x = torch.LongTensor(len(src_tokens), max(src_len)).fill_(vocab.pad()) for i in range(len(src_tokens)): for j in range(len(src_tokens[i])): x[i][j] = vocab.index(src_tokens[i][j]) if False: x[i][j + 1] = vocab.eos() x = x.transpose(1, 0) (src_dict, src_tokens, _) = (vocab, x, torch.LongTensor(src_len)) src_tokens = torch.t(src_tokens) src_tokens_no_pad = [] for src_sentence in src_tokens: src_tokens_no_pad.append(utils.strip_pad(tensor=src_sentence, pad=src_dict.pad())) src_dataset = test_utils.TestDataset(data=src_tokens_no_pad) noising_dataset = noising.NoisingDataset(src_dataset=src_dataset, src_dict=src_dict, seed=1234, max_word_shuffle_distance=3, word_dropout_prob=0.2, word_blanking_prob=0.2, noising_class=noising.UnsupervisedMTNoising) tgt = src_dataset if True: tgt = AppendEosDataset(src_dataset, src_dict.eos()) language_pair_dataset = LanguagePairDataset(src=noising_dataset, tgt=tgt, src_sizes=None, src_dict=src_dict) dataloader = torch.utils.data.DataLoader(dataset=language_pair_dataset, batch_size=2, collate_fn=language_pair_dataset.collater) denoising_batch_result = next(iter(dataloader)) denoising_batch_result = denoising_batch_result (eos, pad) = (src_dict.eos(), src_dict.pad()) expected_src = torch.LongTensor([[4, 5, 10, 11, 8, 12, 13], [pad, pad, pad, 6, 8, 9, 7]]) expected_tgt = torch.LongTensor([[4, 5, 10, 11, 8, 12, 13, eos], [6, 7, 8, 9, eos, pad, pad, pad]]) generated_src = denoising_batch_result['net_input']['src_tokens'] tgt_tokens = denoising_batch_result['target'] self.assertEqual(expected_src.size(), generated_src.size(), 'size mismatch') self.assertEqual(expected_src.ne(generated_src).long().sum(), 0) self.assertEqual(expected_tgt.size(), tgt_tokens.size(), 'size mismatch') self.assertEqual(expected_tgt.ne(tgt_tokens).long().sum(), 0) </DeepExtract>
control-length
positive
def save(self, project: Project, example: Example, user: User): <DeepExtract> mapping = {c.text: c for c in self.label_type.objects.filter(project=project)} annotations = [] for label in self.labels: if label['label'] not in mapping: continue label['example'] = example label['label'] = mapping[label['label']] label['user'] = user annotations.append(self.model(**label)) labels = annotations </DeepExtract> labels = self.model.objects.filter_annotatable_labels(labels, project) self.model.objects.bulk_create(labels)
def save(self, project: Project, example: Example, user: User): mapping = {c.text: c for c in self.label_type.objects.filter(project=project)} annotations = [] for label in self.labels: if label['label'] not in mapping: continue label['example'] = example label['label'] = mapping[label['label']] label['user'] = user annotations.append(self.model(**label)) labels = annotations labels = self.model.objects.filter_annotatable_labels(labels, project) self.model.objects.bulk_create(labels)
doccano
positive
def decryptSessionToken(self, username, request): try: <DeepExtract> result = self.dbConnector.execute('SELECT last_login, session_token, secret_token FROM aide_admin.user WHERE name = %s;', (username,), numReturn=1) if not len(result): userdata = None result = result[0] userdata = result </DeepExtract> return request.get_cookie('session_token', secret=userdata['secret_token']) except: return None
def decryptSessionToken(self, username, request): try: result = self.dbConnector.execute('SELECT last_login, session_token, secret_token FROM aide_admin.user WHERE name = %s;', (username,), numReturn=1) if not len(result): userdata = None result = result[0] userdata = result return request.get_cookie('session_token', secret=userdata['secret_token']) except: return None
aerial_wildlife_detection
positive
def is_incorrect(self, model: str=None) -> bool: """ Check if the model has a correct prediction. This function gets the prediction (``Label``) from the model, and then call ``prediction.is_incorrect()``. Parameters ---------- model : str, optional The selected model, by default ``None``. If ``None``, resolve to ``Instance.model``. Returns ------- bool If the model is incorrect. """ try: <DeepExtract> if 'prediction' == 'instance': prediction = self.key() if 'prediction' in self.entries: output = getattr(self, 'prediction', None) prediction = output elif 'prediction' == 'groundtruth': groundtruths = getattr(self, 'groundtruths', []) groundtruths = sorted(groundtruths, key=lambda g: getattr(g, 'count', -1), reverse=True) prediction = groundtruths[0] if groundtruths else None elif 'prediction' == 'prediction': predictions = getattr(self, 'predictions', []) predictions = [p for p in predictions if p.model == Instance.resolve_default_model(model)] prediction = predictions[0] if predictions else None prediction = None </DeepExtract> if prediction: return prediction.is_incorrect() return False except: raise
def is_incorrect(self, model: str=None) -> bool: """ Check if the model has a correct prediction. This function gets the prediction (``Label``) from the model, and then call ``prediction.is_incorrect()``. Parameters ---------- model : str, optional The selected model, by default ``None``. If ``None``, resolve to ``Instance.model``. Returns ------- bool If the model is incorrect. """ try: if 'prediction' == 'instance': prediction = self.key() if 'prediction' in self.entries: output = getattr(self, 'prediction', None) prediction = output elif 'prediction' == 'groundtruth': groundtruths = getattr(self, 'groundtruths', []) groundtruths = sorted(groundtruths, key=lambda g: getattr(g, 'count', -1), reverse=True) prediction = groundtruths[0] if groundtruths else None elif 'prediction' == 'prediction': predictions = getattr(self, 'predictions', []) predictions = [p for p in predictions if p.model == Instance.resolve_default_model(model)] prediction = predictions[0] if predictions else None prediction = None if prediction: return prediction.is_incorrect() return False except: raise
errudite
positive
def check_call_arg_value(self, argument_name, argument_values=None): """Checks for a value of a named argument in a function call. Returns none if the specified argument is not found. :param argument_name: A string - name of the argument to look for :param argument_values: the value, or list of values to test against :return: Boolean True if argument found and matched, False if found and not matched, None if argument not found at all """ <DeepExtract> kwd_values = self.call_keywords if kwd_values is not None and argument_name in kwd_values: arg_value = kwd_values[argument_name] </DeepExtract> if arg_value is not None: if not isinstance(argument_values, list): argument_values = list((argument_values,)) for val in argument_values: if arg_value == val: return True return False else: return None
def check_call_arg_value(self, argument_name, argument_values=None): """Checks for a value of a named argument in a function call. Returns none if the specified argument is not found. :param argument_name: A string - name of the argument to look for :param argument_values: the value, or list of values to test against :return: Boolean True if argument found and matched, False if found and not matched, None if argument not found at all """ kwd_values = self.call_keywords if kwd_values is not None and argument_name in kwd_values: arg_value = kwd_values[argument_name] if arg_value is not None: if not isinstance(argument_values, list): argument_values = list((argument_values,)) for val in argument_values: if arg_value == val: return True return False else: return None
bandit
positive
def plot_results(title, y2y1_list, results, outfilename=None): """Plot the results. **Parameters:** title : string The plot title y2y1_list : list A list of tuples. Each tuple has to be in the format ``(y2, y1)``. Each member of the tuple has to be a valid identifier. You can check the possible voltage and current identifiers in the result set calling ``res.keys()``, where ``res`` is a solution object. result : solution object or derivate The results to be plotted. outfilename : string, optional The filename of the output file. If left unset, the plot will not be written to disk. The format is set through ``options.plotting_outtype``. **Returns:** ``None``. """ if results is None: printing.print_warning('No results available for plotting. Skipping.') return fig = pylab.figure(figsize=options.plotting_display_figsize) analysis = results.sol_type.upper() gdata = [] (x, xlabel) = (results.get_x(), results.get_xlabel()) xunit = results.units[xlabel] yvu = [] for (y2label, y1label) in y2y1_list: if y1label is not None and y1label != '': try: <DeepExtract> if y1label[0] == y1label[-1] == '|': data = np.absolute(results[y1label[1:-1]]) units = results.units[y1label[1:-1]] elif y1label[0:4] == 'arg(' and y1label[-1] == ')': data = np.angle(results[y1label[4:-1]], deg=options.ac_phase_in_deg) units = results.units[y1label[4:-1]] else: data = results[y1label] units = results.units[y1label] (data1, _) = (data, units) </DeepExtract> except ValueError as e: printing.print_warning(str(e) + ' ' + y1label) continue line_label = y2label + '-' + y1label else: line_label = y2label data1 = 0 try: <DeepExtract> if y2label[0] == y2label[-1] == '|': data = np.absolute(results[y2label[1:-1]]) units = results.units[y2label[1:-1]] elif y2label[0:4] == 'arg(' and y2label[-1] == ')': data = np.angle(results[y2label[4:-1]], deg=options.ac_phase_in_deg) units = results.units[y2label[4:-1]] else: data = results[y2label] units = results.units[y2label] (data2, units) = (data, units) </DeepExtract> except ValueError as e: printing.print_warning(str(e) + ' ' + y2label) continue yvu += [(line_label, units)] gdata.append((data2 - data1, line_label)) if xlabel == 'f': xlog = True else: xlog = False <DeepExtract> pylab.figure(fig.number) pylab.title(title.upper()) ax = pylab.gca() ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') ax.xaxis.grid(False) ax.yaxis.grid(False) if log or xlog: ax.set_xscale('log') pylab.xlabel('%s [%s]' % (xlabel, xunit)) yunits = [] yinitials = [] for (yv, yu) in yvu: yv = yv[:].replace('|', '') if yu not in yunits: yunits.append(yu) yinitials.append(yv[0]) ylabel = '' for (yi, yu) in zip(yinitials, yunits): ylabel += '%s [%s] , ' % (yi, yu) ylabel = ylabel[:-3] pylab.ylabel(ylabel) if log or ylog: ax.set_yscale('log') </DeepExtract> ms = 7.0 / (1.0 + max(np.log(len(x) / 100.0), 0)) ms = ms if ms > 2 else 0.0 (ymax, ymin) = (None, None) for (y, label) in gdata: [line] = pylab.plot(x, y, options.plotting_style, label=label + ' (' + analysis + ')', ms=ms, mfc='w', lw=options.plotting_lw, mew=options.plotting_lw) line.set_mec(line.get_color()) ymax = y.max() if ymax is None or y.max() > ymax else ymax ymin = y.min() if ymin is None or y.min() < ymin else ymin pylab.xlim((x.min(), x.max())) pylab.ylim((ymin - (ymax - ymin) * 0.01, ymax + (ymax - ymin) * 0.01)) pylab.grid(True) pylab.legend() if outfilename is not None and options.plotting_outtype is not None: <DeepExtract> if fig is None: fig = pylab.gcf() fig.set_size_inches(*options.plotting_save_figsize) pylab.savefig(outfilename, dpi=100, bbox_inches='tight', format=options.plotting_outtype, pad=0.1) fig.set_size_inches(*options.plotting_display_figsize) </DeepExtract> return
def plot_results(title, y2y1_list, results, outfilename=None): """Plot the results. **Parameters:** title : string The plot title y2y1_list : list A list of tuples. Each tuple has to be in the format ``(y2, y1)``. Each member of the tuple has to be a valid identifier. You can check the possible voltage and current identifiers in the result set calling ``res.keys()``, where ``res`` is a solution object. result : solution object or derivate The results to be plotted. outfilename : string, optional The filename of the output file. If left unset, the plot will not be written to disk. The format is set through ``options.plotting_outtype``. **Returns:** ``None``. """ if results is None: printing.print_warning('No results available for plotting. Skipping.') return fig = pylab.figure(figsize=options.plotting_display_figsize) analysis = results.sol_type.upper() gdata = [] (x, xlabel) = (results.get_x(), results.get_xlabel()) xunit = results.units[xlabel] yvu = [] for (y2label, y1label) in y2y1_list: if y1label is not None and y1label != '': try: if y1label[0] == y1label[-1] == '|': data = np.absolute(results[y1label[1:-1]]) units = results.units[y1label[1:-1]] elif y1label[0:4] == 'arg(' and y1label[-1] == ')': data = np.angle(results[y1label[4:-1]], deg=options.ac_phase_in_deg) units = results.units[y1label[4:-1]] else: data = results[y1label] units = results.units[y1label] (data1, _) = (data, units) except ValueError as e: printing.print_warning(str(e) + ' ' + y1label) continue line_label = y2label + '-' + y1label else: line_label = y2label data1 = 0 try: if y2label[0] == y2label[-1] == '|': data = np.absolute(results[y2label[1:-1]]) units = results.units[y2label[1:-1]] elif y2label[0:4] == 'arg(' and y2label[-1] == ')': data = np.angle(results[y2label[4:-1]], deg=options.ac_phase_in_deg) units = results.units[y2label[4:-1]] else: data = results[y2label] units = results.units[y2label] (data2, units) = (data, units) except ValueError as e: printing.print_warning(str(e) + ' ' + y2label) continue yvu += [(line_label, units)] gdata.append((data2 - data1, line_label)) if xlabel == 'f': xlog = True else: xlog = False pylab.figure(fig.number) pylab.title(title.upper()) ax = pylab.gca() ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') ax.xaxis.grid(False) ax.yaxis.grid(False) if log or xlog: ax.set_xscale('log') pylab.xlabel('%s [%s]' % (xlabel, xunit)) yunits = [] yinitials = [] for (yv, yu) in yvu: yv = yv[:].replace('|', '') if yu not in yunits: yunits.append(yu) yinitials.append(yv[0]) ylabel = '' for (yi, yu) in zip(yinitials, yunits): ylabel += '%s [%s] , ' % (yi, yu) ylabel = ylabel[:-3] pylab.ylabel(ylabel) if log or ylog: ax.set_yscale('log') ms = 7.0 / (1.0 + max(np.log(len(x) / 100.0), 0)) ms = ms if ms > 2 else 0.0 (ymax, ymin) = (None, None) for (y, label) in gdata: [line] = pylab.plot(x, y, options.plotting_style, label=label + ' (' + analysis + ')', ms=ms, mfc='w', lw=options.plotting_lw, mew=options.plotting_lw) line.set_mec(line.get_color()) ymax = y.max() if ymax is None or y.max() > ymax else ymax ymin = y.min() if ymin is None or y.min() < ymin else ymin pylab.xlim((x.min(), x.max())) pylab.ylim((ymin - (ymax - ymin) * 0.01, ymax + (ymax - ymin) * 0.01)) pylab.grid(True) pylab.legend() if outfilename is not None and options.plotting_outtype is not None: if fig is None: fig = pylab.gcf() fig.set_size_inches(*options.plotting_save_figsize) pylab.savefig(outfilename, dpi=100, bbox_inches='tight', format=options.plotting_outtype, pad=0.1) fig.set_size_inches(*options.plotting_display_figsize) return
ahkab
positive
def train(self): """ Run training. Returns: OrderedDict of results, if evaluation is enabled. Otherwise None. """ <DeepExtract> logger = logging.getLogger('adet.trainer') logger.info('Starting training from iteration {}'.format(self.start_iter)) self.iter = self.start_iter = self.start_iter self.max_iter = self.max_iter with EventStorage(self.start_iter) as self.storage: self.before_train() for self.iter in range(self.start_iter, self.max_iter): self.before_step() self.run_step() self.after_step() self.after_train() </DeepExtract> if hasattr(self, '_last_eval_results') and comm.is_main_process(): verify_results(self.cfg, self._last_eval_results) return self._last_eval_results
def train(self): """ Run training. Returns: OrderedDict of results, if evaluation is enabled. Otherwise None. """ logger = logging.getLogger('adet.trainer') logger.info('Starting training from iteration {}'.format(self.start_iter)) self.iter = self.start_iter = self.start_iter self.max_iter = self.max_iter with EventStorage(self.start_iter) as self.storage: self.before_train() for self.iter in range(self.start_iter, self.max_iter): self.before_step() self.run_step() self.after_step() self.after_train() if hasattr(self, '_last_eval_results') and comm.is_main_process(): verify_results(self.cfg, self._last_eval_results) return self._last_eval_results
AdelaiDet
positive
def apply_request_checksum(request): checksum_context = request.get('context', {}).get('checksum', {}) algorithm = checksum_context.get('request_algorithm') if not algorithm: return if algorithm == 'conditional-md5': conditionally_calculate_md5(request) elif algorithm['in'] == 'header': _apply_request_header_checksum(request) elif algorithm['in'] == 'trailer': <DeepExtract> checksum_context = request.get('context', {}).get('checksum', {}) algorithm = checksum_context.get('request_algorithm') location_name = algorithm['name'] checksum_cls = _CHECKSUM_CLS.get(algorithm['algorithm']) headers = request['headers'] body = request['body'] if location_name in headers: return headers['Transfer-Encoding'] = 'chunked' if 'Content-Encoding' in headers: headers['Content-Encoding'] += ',aws-chunked' else: headers['Content-Encoding'] = 'aws-chunked' headers['X-Amz-Trailer'] = location_name content_length = determine_content_length(body) if content_length is not None: headers['X-Amz-Decoded-Content-Length'] = str(content_length) if isinstance(body, (bytes, bytearray)): body = io.BytesIO(body) request['body'] = AioAwsChunkedWrapper(body, checksum_cls=checksum_cls, checksum_name=location_name) </DeepExtract> else: raise FlexibleChecksumError(error_msg='Unknown checksum variant: %s' % algorithm['in'])
def apply_request_checksum(request): checksum_context = request.get('context', {}).get('checksum', {}) algorithm = checksum_context.get('request_algorithm') if not algorithm: return if algorithm == 'conditional-md5': conditionally_calculate_md5(request) elif algorithm['in'] == 'header': _apply_request_header_checksum(request) elif algorithm['in'] == 'trailer': checksum_context = request.get('context', {}).get('checksum', {}) algorithm = checksum_context.get('request_algorithm') location_name = algorithm['name'] checksum_cls = _CHECKSUM_CLS.get(algorithm['algorithm']) headers = request['headers'] body = request['body'] if location_name in headers: return headers['Transfer-Encoding'] = 'chunked' if 'Content-Encoding' in headers: headers['Content-Encoding'] += ',aws-chunked' else: headers['Content-Encoding'] = 'aws-chunked' headers['X-Amz-Trailer'] = location_name content_length = determine_content_length(body) if content_length is not None: headers['X-Amz-Decoded-Content-Length'] = str(content_length) if isinstance(body, (bytes, bytearray)): body = io.BytesIO(body) request['body'] = AioAwsChunkedWrapper(body, checksum_cls=checksum_cls, checksum_name=location_name) else: raise FlexibleChecksumError(error_msg='Unknown checksum variant: %s' % algorithm['in'])
aiobotocore
positive
def Mstep(self, x, z): """ Mstep of the model: maximum likelihood estimation of the parameters of the model Parameters ---------- x : array of shape (nbitems,) input data z array of shape(nbitrems, 2) the membership matrix """ tiny = 1e-15 sz = np.maximum(tiny, np.sum(z, 0)) <DeepExtract> eps = 1e-05 i = np.ravel(np.nonzero(x > 0)) szi = np.sum(z[:, 0][i]) if szi > 0: shape = _compute_c(x[i], z[:, 0][i], eps) scale = np.dot(x[i], z[:, 0][i]) / (szi * shape) else: shape = 1 scale = 1 (self.shape, self.scale) = (shape, scale) </DeepExtract> self.mean = np.dot(x, z[:, 1]) / sz[1] self.var = np.dot((x - self.mean) ** 2, z[:, 1]) / sz[1] self.mixt = sz[0] / np.size(x)
def Mstep(self, x, z): """ Mstep of the model: maximum likelihood estimation of the parameters of the model Parameters ---------- x : array of shape (nbitems,) input data z array of shape(nbitrems, 2) the membership matrix """ tiny = 1e-15 sz = np.maximum(tiny, np.sum(z, 0)) eps = 1e-05 i = np.ravel(np.nonzero(x > 0)) szi = np.sum(z[:, 0][i]) if szi > 0: shape = _compute_c(x[i], z[:, 0][i], eps) scale = np.dot(x[i], z[:, 0][i]) / (szi * shape) else: shape = 1 scale = 1 (self.shape, self.scale) = (shape, scale) self.mean = np.dot(x, z[:, 1]) / sz[1] self.var = np.dot((x - self.mean) ** 2, z[:, 1]) / sz[1] self.mixt = sz[0] / np.size(x)
3DUnetCNN
positive
def group_tokens(self, grp_cls, tokens, ignore_ws=False): """Replace tokens by an instance of *grp_cls*.""" <DeepExtract> if start > 0: for i in range(start, len(self.tokens)): if self.tokens[i] == tokens[0]: idx = i idx = -1 idx = self.tokens.index(tokens[0]) </DeepExtract> if ignore_ws: while tokens and tokens[-1].is_whitespace(): tokens = tokens[:-1] for t in tokens: self.tokens.remove(t) grp = grp_cls(tokens) for token in tokens: token.parent = grp grp.parent = self self.tokens.insert(idx, grp) return grp
def group_tokens(self, grp_cls, tokens, ignore_ws=False): """Replace tokens by an instance of *grp_cls*.""" if start > 0: for i in range(start, len(self.tokens)): if self.tokens[i] == tokens[0]: idx = i idx = -1 idx = self.tokens.index(tokens[0]) if ignore_ws: while tokens and tokens[-1].is_whitespace(): tokens = tokens[:-1] for t in tokens: self.tokens.remove(t) grp = grp_cls(tokens) for token in tokens: token.parent = grp grp.parent = self self.tokens.insert(idx, grp) return grp
es-monitor
positive
def PrintFindings(findings, filter_text): """Prints the findings of the ontology validation. Args: findings: a list of Finding objects. filter_text: command line arguments. The only available argument is 'match:<value>' which will simply perform a simple string 'contains' on the finding output and cause only matching findings to print. """ <DeepExtract> file_findings_map = {} for finding in findings: filepath = finding.file_context.filepath file_list = file_findings_map.get(filepath) if file_list is None: file_findings_map[filepath] = [finding] else: file_findings_map[filepath].append(finding) findings_by_file = file_findings_map </DeepExtract> for filepath in findings_by_file: findings_for_file = findings_by_file[filepath] if not findings_for_file: print('no Findings in {0}'.format(filepath)) continue print('Findings in {0}'.format(filepath)) for finding in findings_for_file: if not filter_text or filter_text in str(finding): print(finding) print('\n' + str(len(findings)) + ' findings.\n')
def PrintFindings(findings, filter_text): """Prints the findings of the ontology validation. Args: findings: a list of Finding objects. filter_text: command line arguments. The only available argument is 'match:<value>' which will simply perform a simple string 'contains' on the finding output and cause only matching findings to print. """ file_findings_map = {} for finding in findings: filepath = finding.file_context.filepath file_list = file_findings_map.get(filepath) if file_list is None: file_findings_map[filepath] = [finding] else: file_findings_map[filepath].append(finding) findings_by_file = file_findings_map for filepath in findings_by_file: findings_for_file = findings_by_file[filepath] if not findings_for_file: print('no Findings in {0}'.format(filepath)) continue print('Findings in {0}'.format(filepath)) for finding in findings_for_file: if not filter_text or filter_text in str(finding): print(finding) print('\n' + str(len(findings)) + ' findings.\n')
digitalbuildings
positive
def postTravel(res, root): if root: <DeepExtract> if root.left: postTravel(res, root.left.left) postTravel(res, root.left.right) res.append(root.left.val) </DeepExtract> <DeepExtract> if root.right: postTravel(res, root.right.left) postTravel(res, root.right.right) res.append(root.right.val) </DeepExtract> res.append(root.val)
def postTravel(res, root): if root: if root.left: postTravel(res, root.left.left) postTravel(res, root.left.right) res.append(root.left.val) if root.right: postTravel(res, root.right.left) postTravel(res, root.right.right) res.append(root.right.val) res.append(root.val)
cabbird
positive
def assert_statement(a: rd.StatementNode, b: capa_pb2.StatementNode): assert a.type == b.type sa = a.statement sb = getattr(b, str(b.WhichOneof('statement'))) assert sa.type == sb.type assert cmp_optional(sa.description, sb.description) if isinstance(sa, rd.RangeStatement): assert isinstance(sb, capa_pb2.RangeStatement) assert sa.min == sb.min assert sa.max == sa.max <DeepExtract> sb.child = getattr(sb.child, sb.child.WhichOneof('feature')) assert sa.child.type == sb.child.type assert cmp_optional(sa.child.description, sb.child.description) if isinstance(sa.child, capa.features.freeze.features.OSFeature): assert sa.child.os == sb.child.os elif isinstance(sa.child, capa.features.freeze.features.ArchFeature): assert sa.child.arch == sb.child.arch elif isinstance(sa.child, capa.features.freeze.features.FormatFeature): assert sa.child.format == sb.child.format elif isinstance(sa.child, capa.features.freeze.features.MatchFeature): assert sa.child.match == sb.child.match elif isinstance(sa.child, capa.features.freeze.features.CharacteristicFeature): assert sa.child.characteristic == sb.child.characteristic elif isinstance(sa.child, capa.features.freeze.features.ExportFeature): assert sa.child.export == sb.child.export elif isinstance(sa.child, capa.features.freeze.features.ImportFeature): assert sa.child.import_ == sb.child.import_ elif isinstance(sa.child, capa.features.freeze.features.SectionFeature): assert sa.child.section == sb.child.section elif isinstance(sa.child, capa.features.freeze.features.FunctionNameFeature): assert sa.child.function_name == sb.child.function_name elif isinstance(sa.child, capa.features.freeze.features.SubstringFeature): assert sa.child.substring == sb.child.substring elif isinstance(sa.child, capa.features.freeze.features.RegexFeature): assert sa.child.regex == sb.child.regex elif isinstance(sa.child, capa.features.freeze.features.StringFeature): assert sa.child.string == sb.child.string elif isinstance(sa.child, capa.features.freeze.features.ClassFeature): assert sa.child.class_ == sb.child.class_ elif isinstance(sa.child, capa.features.freeze.features.NamespaceFeature): assert sa.child.namespace == sb.child.namespace elif isinstance(sa.child, capa.features.freeze.features.BasicBlockFeature): pass elif isinstance(sa.child, capa.features.freeze.features.APIFeature): assert sa.child.api == sb.child.api elif isinstance(sa.child, capa.features.freeze.features.PropertyFeature): assert sa.child.property == sb.child.property_ assert sa.child.access == sb.child.access elif isinstance(sa.child, capa.features.freeze.features.NumberFeature): n = getattr(sb.child.number, sb.child.number.WhichOneof('value')) assert sa.child.number == n elif isinstance(sa.child, capa.features.freeze.features.BytesFeature): assert sa.child.bytes == sb.child.bytes elif isinstance(sa.child, capa.features.freeze.features.OffsetFeature): assert sa.child.offset == getattr(sb.child.offset, sb.child.offset.WhichOneof('value')) elif isinstance(sa.child, capa.features.freeze.features.MnemonicFeature): assert sa.child.mnemonic == sb.child.mnemonic elif isinstance(sa.child, capa.features.freeze.features.OperandNumberFeature): assert sa.child.index == sb.child.index assert sa.child.operand_number == getattr(sb.child.operand_number, sb.child.operand_number.WhichOneof('value')) elif isinstance(sa.child, capa.features.freeze.features.OperandOffsetFeature): assert sa.child.index == sb.child.index assert sa.child.operand_offset == getattr(sb.child.operand_offset, sb.child.operand_offset.WhichOneof('value')) else: raise NotImplementedError(f'unhandled feature: {type(sa.child)}: {sa.child}') </DeepExtract> elif isinstance(sa, rd.SomeStatement): assert sa.count == sb.count elif isinstance(sa, rd.SubscopeStatement): assert capa.render.proto.scope_to_pb2(sa.scope) == sb.scope elif isinstance(sa, rd.CompoundStatement): pass else: assert False
def assert_statement(a: rd.StatementNode, b: capa_pb2.StatementNode): assert a.type == b.type sa = a.statement sb = getattr(b, str(b.WhichOneof('statement'))) assert sa.type == sb.type assert cmp_optional(sa.description, sb.description) if isinstance(sa, rd.RangeStatement): assert isinstance(sb, capa_pb2.RangeStatement) assert sa.min == sb.min assert sa.max == sa.max sb.child = getattr(sb.child, sb.child.WhichOneof('feature')) assert sa.child.type == sb.child.type assert cmp_optional(sa.child.description, sb.child.description) if isinstance(sa.child, capa.features.freeze.features.OSFeature): assert sa.child.os == sb.child.os elif isinstance(sa.child, capa.features.freeze.features.ArchFeature): assert sa.child.arch == sb.child.arch elif isinstance(sa.child, capa.features.freeze.features.FormatFeature): assert sa.child.format == sb.child.format elif isinstance(sa.child, capa.features.freeze.features.MatchFeature): assert sa.child.match == sb.child.match elif isinstance(sa.child, capa.features.freeze.features.CharacteristicFeature): assert sa.child.characteristic == sb.child.characteristic elif isinstance(sa.child, capa.features.freeze.features.ExportFeature): assert sa.child.export == sb.child.export elif isinstance(sa.child, capa.features.freeze.features.ImportFeature): assert sa.child.import_ == sb.child.import_ elif isinstance(sa.child, capa.features.freeze.features.SectionFeature): assert sa.child.section == sb.child.section elif isinstance(sa.child, capa.features.freeze.features.FunctionNameFeature): assert sa.child.function_name == sb.child.function_name elif isinstance(sa.child, capa.features.freeze.features.SubstringFeature): assert sa.child.substring == sb.child.substring elif isinstance(sa.child, capa.features.freeze.features.RegexFeature): assert sa.child.regex == sb.child.regex elif isinstance(sa.child, capa.features.freeze.features.StringFeature): assert sa.child.string == sb.child.string elif isinstance(sa.child, capa.features.freeze.features.ClassFeature): assert sa.child.class_ == sb.child.class_ elif isinstance(sa.child, capa.features.freeze.features.NamespaceFeature): assert sa.child.namespace == sb.child.namespace elif isinstance(sa.child, capa.features.freeze.features.BasicBlockFeature): pass elif isinstance(sa.child, capa.features.freeze.features.APIFeature): assert sa.child.api == sb.child.api elif isinstance(sa.child, capa.features.freeze.features.PropertyFeature): assert sa.child.property == sb.child.property_ assert sa.child.access == sb.child.access elif isinstance(sa.child, capa.features.freeze.features.NumberFeature): n = getattr(sb.child.number, sb.child.number.WhichOneof('value')) assert sa.child.number == n elif isinstance(sa.child, capa.features.freeze.features.BytesFeature): assert sa.child.bytes == sb.child.bytes elif isinstance(sa.child, capa.features.freeze.features.OffsetFeature): assert sa.child.offset == getattr(sb.child.offset, sb.child.offset.WhichOneof('value')) elif isinstance(sa.child, capa.features.freeze.features.MnemonicFeature): assert sa.child.mnemonic == sb.child.mnemonic elif isinstance(sa.child, capa.features.freeze.features.OperandNumberFeature): assert sa.child.index == sb.child.index assert sa.child.operand_number == getattr(sb.child.operand_number, sb.child.operand_number.WhichOneof('value')) elif isinstance(sa.child, capa.features.freeze.features.OperandOffsetFeature): assert sa.child.index == sb.child.index assert sa.child.operand_offset == getattr(sb.child.operand_offset, sb.child.operand_offset.WhichOneof('value')) else: raise NotImplementedError(f'unhandled feature: {type(sa.child)}: {sa.child}') elif isinstance(sa, rd.SomeStatement): assert sa.count == sb.count elif isinstance(sa, rd.SubscopeStatement): assert capa.render.proto.scope_to_pb2(sa.scope) == sb.scope elif isinstance(sa, rd.CompoundStatement): pass else: assert False
capa
positive
def subscribe_anchore_feed(feed, user_tier=0): success = True msg = str(feed) + ': subscribed.' <DeepExtract> feedmeta = contexts['anchore_db'].load_feedmeta() </DeepExtract> if feed in feedmeta: if user_tier >= int(feedmeta[feed]['access_tier']): if not feedmeta[feed]['subscribed']: feedmeta[feed]['subscribed'] = True if not save_anchore_feedmeta(feedmeta): msg = str(feed) + ': failed to subscribe to feed (check debug output).' success = False else: msg = 'Current user does not have sufficient access tier to subscribe to feed {0}. Current tier is {1}, must be {2} to access feed'.format(feed, user_tier, feedmeta[feed]['access_tier']) success = False else: msg = 'cannot find specified feed (' + str(feed) + '): please review the feeds list and try again' success = False return (success, msg)
def subscribe_anchore_feed(feed, user_tier=0): success = True msg = str(feed) + ': subscribed.' feedmeta = contexts['anchore_db'].load_feedmeta() if feed in feedmeta: if user_tier >= int(feedmeta[feed]['access_tier']): if not feedmeta[feed]['subscribed']: feedmeta[feed]['subscribed'] = True if not save_anchore_feedmeta(feedmeta): msg = str(feed) + ': failed to subscribe to feed (check debug output).' success = False else: msg = 'Current user does not have sufficient access tier to subscribe to feed {0}. Current tier is {1}, must be {2} to access feed'.format(feed, user_tier, feedmeta[feed]['access_tier']) success = False else: msg = 'cannot find specified feed (' + str(feed) + '): please review the feeds list and try again' success = False return (success, msg)
anchore
positive
def _MoveExtendPropertiesInPlaceOneClass(extend_property_decls, extend_getter_decls, outer_class_names, class_decl): """Helper for MoveExtendPropertiesInPlace.""" member_delete_indices = [] for (member_index, member) in enumerate(class_decl.members): if member.decltype == ast_pb2.Decl.Type.CLASS: <DeepExtract> member_delete_indices = [] for (member_index, member) in enumerate(member.class_.members): if member.decltype == ast_pb2.Decl.Type.CLASS: _MoveExtendPropertiesInPlaceOneClass(extend_property_decls, extend_getter_decls, outer_class_names + [class_decl.name] + [member.class_.name], member.class_) if member.decltype != ast_pb2.Decl.Type.VAR: continue if not member.var.is_extend_variable: continue fq_native = '_'.join([n.native for n in outer_class_names + [class_decl.name] + [member.class_.name]]) property_decl = ast_pb2.Decl() property_decl.CopyFrom(member) property_decl.var.name.native = fq_native + EXTEND_INFIX + member.var.name.native if member.var.name.cpp_name == member.var.name.native: property_decl.var.name.cpp_name = fq_native + EXTEND_INFIX + member.var.name.cpp_name p = _GenerateParameterSelf(member.class_, outer_class_names + [class_decl.name]) property_decl.var.cpp_get.params.insert(0, p) property_decl.var.cpp_get.name.cpp_name = fq_native + EXTEND_INFIX + member.var.cpp_get.name.cpp_name if member.var.HasField('cpp_set'): property_decl.var.cpp_set.name.cpp_name = fq_native + EXTEND_INFIX + member.var.cpp_set.name.cpp_name p = _GenerateParameterSelf(member.class_, outer_class_names + [class_decl.name]) property_decl.var.cpp_set.params.insert(0, p) extend_property_decls.append(property_decl) member_delete_indices.append(member_index) getter_decl = ast_pb2.FuncDecl() getter_decl.CopyFrom(member.var.cpp_get) getter_decl.name.native = getter_decl.name.cpp_name = fq_native + EXTEND_INFIX + member.var.cpp_get.name.cpp_name p = _GenerateParameterSelf(member.class_, outer_class_names + [class_decl.name]) getter_decl.params.insert(0, p) getter_decl.is_extend_method = True func_decl = ast_pb2.Decl() func_decl.func.CopyFrom(getter_decl) func_decl.decltype = ast_pb2.Decl.Type.FUNC extend_getter_decls.append(func_decl) for member_index in reversed(member_delete_indices): del member.class_.members[member_index] </DeepExtract> if member.decltype != ast_pb2.Decl.Type.VAR: continue if not member.var.is_extend_variable: continue fq_native = '_'.join([n.native for n in outer_class_names + [class_decl.name]]) property_decl = ast_pb2.Decl() property_decl.CopyFrom(member) property_decl.var.name.native = fq_native + EXTEND_INFIX + member.var.name.native if member.var.name.cpp_name == member.var.name.native: property_decl.var.name.cpp_name = fq_native + EXTEND_INFIX + member.var.name.cpp_name <DeepExtract> p = ast_pb2.ParamDecl() p.name.native = 'self' p.name.cpp_name = 'self' nested_class_names = outer_class_names + [class_decl.name] p.type.lang_type = '.'.join([n.native for n in nested_class_names]) p.type.cpp_type = '::'.join([n.cpp_name for n in nested_class_names]) p = p </DeepExtract> property_decl.var.cpp_get.params.insert(0, p) property_decl.var.cpp_get.name.cpp_name = fq_native + EXTEND_INFIX + member.var.cpp_get.name.cpp_name if member.var.HasField('cpp_set'): property_decl.var.cpp_set.name.cpp_name = fq_native + EXTEND_INFIX + member.var.cpp_set.name.cpp_name <DeepExtract> p = ast_pb2.ParamDecl() p.name.native = 'self' p.name.cpp_name = 'self' nested_class_names = outer_class_names + [class_decl.name] p.type.lang_type = '.'.join([n.native for n in nested_class_names]) p.type.cpp_type = '::'.join([n.cpp_name for n in nested_class_names]) p = p </DeepExtract> property_decl.var.cpp_set.params.insert(0, p) extend_property_decls.append(property_decl) member_delete_indices.append(member_index) getter_decl = ast_pb2.FuncDecl() getter_decl.CopyFrom(member.var.cpp_get) getter_decl.name.native = getter_decl.name.cpp_name = fq_native + EXTEND_INFIX + member.var.cpp_get.name.cpp_name <DeepExtract> p = ast_pb2.ParamDecl() p.name.native = 'self' p.name.cpp_name = 'self' nested_class_names = outer_class_names + [class_decl.name] p.type.lang_type = '.'.join([n.native for n in nested_class_names]) p.type.cpp_type = '::'.join([n.cpp_name for n in nested_class_names]) p = p </DeepExtract> getter_decl.params.insert(0, p) getter_decl.is_extend_method = True func_decl = ast_pb2.Decl() func_decl.func.CopyFrom(getter_decl) func_decl.decltype = ast_pb2.Decl.Type.FUNC extend_getter_decls.append(func_decl) for member_index in reversed(member_delete_indices): del class_decl.members[member_index]
def _MoveExtendPropertiesInPlaceOneClass(extend_property_decls, extend_getter_decls, outer_class_names, class_decl): """Helper for MoveExtendPropertiesInPlace.""" member_delete_indices = [] for (member_index, member) in enumerate(class_decl.members): if member.decltype == ast_pb2.Decl.Type.CLASS: member_delete_indices = [] for (member_index, member) in enumerate(member.class_.members): if member.decltype == ast_pb2.Decl.Type.CLASS: _MoveExtendPropertiesInPlaceOneClass(extend_property_decls, extend_getter_decls, outer_class_names + [class_decl.name] + [member.class_.name], member.class_) if member.decltype != ast_pb2.Decl.Type.VAR: continue if not member.var.is_extend_variable: continue fq_native = '_'.join([n.native for n in outer_class_names + [class_decl.name] + [member.class_.name]]) property_decl = ast_pb2.Decl() property_decl.CopyFrom(member) property_decl.var.name.native = fq_native + EXTEND_INFIX + member.var.name.native if member.var.name.cpp_name == member.var.name.native: property_decl.var.name.cpp_name = fq_native + EXTEND_INFIX + member.var.name.cpp_name p = _GenerateParameterSelf(member.class_, outer_class_names + [class_decl.name]) property_decl.var.cpp_get.params.insert(0, p) property_decl.var.cpp_get.name.cpp_name = fq_native + EXTEND_INFIX + member.var.cpp_get.name.cpp_name if member.var.HasField('cpp_set'): property_decl.var.cpp_set.name.cpp_name = fq_native + EXTEND_INFIX + member.var.cpp_set.name.cpp_name p = _GenerateParameterSelf(member.class_, outer_class_names + [class_decl.name]) property_decl.var.cpp_set.params.insert(0, p) extend_property_decls.append(property_decl) member_delete_indices.append(member_index) getter_decl = ast_pb2.FuncDecl() getter_decl.CopyFrom(member.var.cpp_get) getter_decl.name.native = getter_decl.name.cpp_name = fq_native + EXTEND_INFIX + member.var.cpp_get.name.cpp_name p = _GenerateParameterSelf(member.class_, outer_class_names + [class_decl.name]) getter_decl.params.insert(0, p) getter_decl.is_extend_method = True func_decl = ast_pb2.Decl() func_decl.func.CopyFrom(getter_decl) func_decl.decltype = ast_pb2.Decl.Type.FUNC extend_getter_decls.append(func_decl) for member_index in reversed(member_delete_indices): del member.class_.members[member_index] if member.decltype != ast_pb2.Decl.Type.VAR: continue if not member.var.is_extend_variable: continue fq_native = '_'.join([n.native for n in outer_class_names + [class_decl.name]]) property_decl = ast_pb2.Decl() property_decl.CopyFrom(member) property_decl.var.name.native = fq_native + EXTEND_INFIX + member.var.name.native if member.var.name.cpp_name == member.var.name.native: property_decl.var.name.cpp_name = fq_native + EXTEND_INFIX + member.var.name.cpp_name p = ast_pb2.ParamDecl() p.name.native = 'self' p.name.cpp_name = 'self' nested_class_names = outer_class_names + [class_decl.name] p.type.lang_type = '.'.join([n.native for n in nested_class_names]) p.type.cpp_type = '::'.join([n.cpp_name for n in nested_class_names]) p = p property_decl.var.cpp_get.params.insert(0, p) property_decl.var.cpp_get.name.cpp_name = fq_native + EXTEND_INFIX + member.var.cpp_get.name.cpp_name if member.var.HasField('cpp_set'): property_decl.var.cpp_set.name.cpp_name = fq_native + EXTEND_INFIX + member.var.cpp_set.name.cpp_name p = ast_pb2.ParamDecl() p.name.native = 'self' p.name.cpp_name = 'self' nested_class_names = outer_class_names + [class_decl.name] p.type.lang_type = '.'.join([n.native for n in nested_class_names]) p.type.cpp_type = '::'.join([n.cpp_name for n in nested_class_names]) p = p property_decl.var.cpp_set.params.insert(0, p) extend_property_decls.append(property_decl) member_delete_indices.append(member_index) getter_decl = ast_pb2.FuncDecl() getter_decl.CopyFrom(member.var.cpp_get) getter_decl.name.native = getter_decl.name.cpp_name = fq_native + EXTEND_INFIX + member.var.cpp_get.name.cpp_name p = ast_pb2.ParamDecl() p.name.native = 'self' p.name.cpp_name = 'self' nested_class_names = outer_class_names + [class_decl.name] p.type.lang_type = '.'.join([n.native for n in nested_class_names]) p.type.cpp_type = '::'.join([n.cpp_name for n in nested_class_names]) p = p getter_decl.params.insert(0, p) getter_decl.is_extend_method = True func_decl = ast_pb2.Decl() func_decl.func.CopyFrom(getter_decl) func_decl.decltype = ast_pb2.Decl.Type.FUNC extend_getter_decls.append(func_decl) for member_index in reversed(member_delete_indices): del class_decl.members[member_index]
clif
positive
def get_variable_values(self, variables): var_v = [] var_names = [] for v in variables: if v is not None: var_v.append(v) var_names.append(v.name) <DeepExtract> if var_v is None: var_values = None if not (type(var_v) is list or type(var_v) is tuple): var_values = self.sess.run(var_v) fetch_list = [] none_idxes = [] for (i, itm) in enumerate(var_v): if itm is None: none_idxes.append(i) else: fetch_list.append(itm) print('fetch_list len: ', len(fetch_list)) values = self.sess.run(fetch_list) print('fetched len: ', len(values)) result = [] values_i = 0 for i in range(len(var_v)): if i in none_idxes: result.append(None) else: result.append(values[values_i]) values_i += 1 var_values = result </DeepExtract> result = {} for (n, v) in zip(var_names, var_values): result[n] = v return result
def get_variable_values(self, variables): var_v = [] var_names = [] for v in variables: if v is not None: var_v.append(v) var_names.append(v.name) if var_v is None: var_values = None if not (type(var_v) is list or type(var_v) is tuple): var_values = self.sess.run(var_v) fetch_list = [] none_idxes = [] for (i, itm) in enumerate(var_v): if itm is None: none_idxes.append(i) else: fetch_list.append(itm) print('fetch_list len: ', len(fetch_list)) values = self.sess.run(fetch_list) print('fetched len: ', len(values)) result = [] values_i = 0 for i in range(len(var_v)): if i in none_idxes: result.append(None) else: result.append(values[values_i]) values_i += 1 var_values = result result = {} for (n, v) in zip(var_names, var_values): result[n] = v return result
AOFP
positive
def build_transform_gen(cfg, is_train): """ Create a list of :class:`TransformGen` from config. Now it includes resizing and flipping. Returns: list[TransformGen] """ logger = logging.getLogger(__name__) tfm_gens = [] if is_train: for (aug, args) in cfg.INPUT.AUG.TRAIN_PIPELINES: if aug == 'ResizeShortestEdge': <DeepExtract> if args['sample_style'] == 'range': assert len(args['min_size']) == 2, f"more than 2 ({len(args['min_size'])}) min_size(s) are provided for ranges" </DeepExtract> tfm_gens.append(getattr(T, aug)(**args)) else: for (aug, args) in cfg.INPUT.AUG.TEST_PIPELINES: if aug == 'ResizeShortestEdge': <DeepExtract> if args['sample_style'] == 'range': assert len(args['min_size']) == 2, f"more than 2 ({len(args['min_size'])}) min_size(s) are provided for ranges" </DeepExtract> tfm_gens.append(getattr(T, aug)(**args)) logger.info('TransformGens used: ' + str(tfm_gens)) return tfm_gens
def build_transform_gen(cfg, is_train): """ Create a list of :class:`TransformGen` from config. Now it includes resizing and flipping. Returns: list[TransformGen] """ logger = logging.getLogger(__name__) tfm_gens = [] if is_train: for (aug, args) in cfg.INPUT.AUG.TRAIN_PIPELINES: if aug == 'ResizeShortestEdge': if args['sample_style'] == 'range': assert len(args['min_size']) == 2, f"more than 2 ({len(args['min_size'])}) min_size(s) are provided for ranges" tfm_gens.append(getattr(T, aug)(**args)) else: for (aug, args) in cfg.INPUT.AUG.TEST_PIPELINES: if aug == 'ResizeShortestEdge': if args['sample_style'] == 'range': assert len(args['min_size']) == 2, f"more than 2 ({len(args['min_size'])}) min_size(s) are provided for ranges" tfm_gens.append(getattr(T, aug)(**args)) logger.info('TransformGens used: ' + str(tfm_gens)) return tfm_gens
CenterNet-better
positive
@asyncio.coroutine def web_soc(self, request): """ Handles web-socket connections in a totally non-obvious way. Based upon the example in the aiohttp docs here: https://aiohttp.readthedocs.org/en/v0.16.2/web.html#websockets Put simply, this co-routine never returns. It yields from the receive method to process incoming messages. If / when the connection is closed then yielding from receive will raise an exception and the co-routine will complete. """ ws = web.WebSocketResponse() ws.start(request) peer = request.transport.get_extra_info('peername')[0] while True: incoming = (yield from ws.receive()) if incoming.tp == aiohttp.MsgType.text: log.info('Incoming request from {}'.format(peer)) log.info(incoming) if incoming.data == 'close': yield from ws.close() else: try: message = json.loads(incoming.data) msg_type = message.get('type', None) if msg_type == 'get': <DeepExtract> def handle_getter(getter, ws=ws, message=message): """ Return the result to the client when it becomes known. If there are any errors, these will be logged by the local_node. """ try: result = getter.result() except Exception: result = {'key': message['key'], 'error': True} finally: msg = json.dumps(result) ws.send_str(msg) key = message['key'] log.info('Websocket GET lookup for {}'.format(key)) forced = message.get('forced', False) getter = self.connector.async_get(key, self.local_node, forced) getter.add_done_callback(handle_getter) </DeepExtract> elif msg_type == 'set': <DeepExtract> def handle_setter(setter, ws=ws, message=message): """ Return confirmation messages to indicate the progress of setting a value in the DHT. If the setter has a result it will be a list of Future object representing N number of calls to peers in the DHT to store the item. If there are errors these will be logged by the local node. """ try: rpcs = setter.result() result = {'key': message['key'], 'duplication_count': len(rpcs)} def handle_rpc(rpc_task, ws=ws): """ Return a status indication for an RPC that is part of a put operation. """ msg = {'key': message['key']} try: rpc_task.result() msg['status'] = 'ok' except: msg['status'] = 'failed' finally: ws.send_str(json.dumps(msg)) for task in rpcs: task.add_done_callback(handle_rpc) except Exception: result = {'error': True} finally: msg = json.dumps(result) ws.send_str(msg) setter = self.connector.async_set(self.local_node, message) setter.add_done_callback(handle_setter) </DeepExtract> except Exception as ex: error_msg = 'WEBSOCKET bad data from {}'.format(peer) log.error(error_msg) log.error(incoming.data) log.error(ex) yield from ws.send_str(json.dumps({'error': True})) elif incoming.tp == aiohttp.MsgType.close: log.info('Websocket with {} closed'.format(peer)) elif incoming.tp == aiohttp.MsgType.closed: break elif incoming.tp == aiohttp.MsgType.error: log.error('Websocket connection closed with error') log.error(ws.exception())
@asyncio.coroutine def web_soc(self, request): """ Handles web-socket connections in a totally non-obvious way. Based upon the example in the aiohttp docs here: https://aiohttp.readthedocs.org/en/v0.16.2/web.html#websockets Put simply, this co-routine never returns. It yields from the receive method to process incoming messages. If / when the connection is closed then yielding from receive will raise an exception and the co-routine will complete. """ ws = web.WebSocketResponse() ws.start(request) peer = request.transport.get_extra_info('peername')[0] while True: incoming = (yield from ws.receive()) if incoming.tp == aiohttp.MsgType.text: log.info('Incoming request from {}'.format(peer)) log.info(incoming) if incoming.data == 'close': yield from ws.close() else: try: message = json.loads(incoming.data) msg_type = message.get('type', None) if msg_type == 'get': def handle_getter(getter, ws=ws, message=message): """ Return the result to the client when it becomes known. If there are any errors, these will be logged by the local_node. """ try: result = getter.result() except Exception: result = {'key': message['key'], 'error': True} finally: msg = json.dumps(result) ws.send_str(msg) key = message['key'] log.info('Websocket GET lookup for {}'.format(key)) forced = message.get('forced', False) getter = self.connector.async_get(key, self.local_node, forced) getter.add_done_callback(handle_getter) elif msg_type == 'set': def handle_setter(setter, ws=ws, message=message): """ Return confirmation messages to indicate the progress of setting a value in the DHT. If the setter has a result it will be a list of Future object representing N number of calls to peers in the DHT to store the item. If there are errors these will be logged by the local node. """ try: rpcs = setter.result() result = {'key': message['key'], 'duplication_count': len(rpcs)} def handle_rpc(rpc_task, ws=ws): """ Return a status indication for an RPC that is part of a put operation. """ msg = {'key': message['key']} try: rpc_task.result() msg['status'] = 'ok' except: msg['status'] = 'failed' finally: ws.send_str(json.dumps(msg)) for task in rpcs: task.add_done_callback(handle_rpc) except Exception: result = {'error': True} finally: msg = json.dumps(result) ws.send_str(msg) setter = self.connector.async_set(self.local_node, message) setter.add_done_callback(handle_setter) except Exception as ex: error_msg = 'WEBSOCKET bad data from {}'.format(peer) log.error(error_msg) log.error(incoming.data) log.error(ex) yield from ws.send_str(json.dumps({'error': True})) elif incoming.tp == aiohttp.MsgType.close: log.info('Websocket with {} closed'.format(peer)) elif incoming.tp == aiohttp.MsgType.closed: break elif incoming.tp == aiohttp.MsgType.error: log.error('Websocket connection closed with error') log.error(ws.exception())
drogulus
positive
def test_fault(): status_code = 500 url = 'https://{}/status/{}'.format(BASE_URL, status_code) <DeepExtract> parts = urlparse(url) (host, _, port) = parts.netloc.partition(':') if port == '': port = None if use_https: conn = httplib.HTTPSConnection(parts.netloc, port) else: conn = httplib.HTTPConnection(parts.netloc, port) path = '{}?{}'.format(parts.path, parts.query) if parts.query else parts.path conn.request('PUT', path) resp = conn.getresponse() </DeepExtract> subsegment = xray_recorder.current_segment().subsegments[1] assert subsegment.name == get_hostname(url) assert subsegment.fault http_meta = subsegment.http assert http_meta['request']['url'] == strip_url(url) assert http_meta['request']['method'].upper() == 'PUT' assert http_meta['response']['status'] == status_code
def test_fault(): status_code = 500 url = 'https://{}/status/{}'.format(BASE_URL, status_code) parts = urlparse(url) (host, _, port) = parts.netloc.partition(':') if port == '': port = None if use_https: conn = httplib.HTTPSConnection(parts.netloc, port) else: conn = httplib.HTTPConnection(parts.netloc, port) path = '{}?{}'.format(parts.path, parts.query) if parts.query else parts.path conn.request('PUT', path) resp = conn.getresponse() subsegment = xray_recorder.current_segment().subsegments[1] assert subsegment.name == get_hostname(url) assert subsegment.fault http_meta = subsegment.http assert http_meta['request']['url'] == strip_url(url) assert http_meta['request']['method'].upper() == 'PUT' assert http_meta['response']['status'] == status_code
aws-xray-sdk-python
positive
@misc.assert_tps(scale=repeat) def test_dotdict_performance(): count = repeat while count: count -= 1 <DeepExtract> assert 'a' in dotdict({'a': 1}) assert dotdict({'a': 1})['a'] == 1 assert 'b' in dotdict({'a': 1}, b=2) assert dotdict({'a': 1}, b=2)['b'] == 2 assert 'c' in dotdict([('c', 3)], d=4) assert dotdict([('c', 3)], d=4)['c'] == 3 assert 'e' in dotdict(e=5) assert dotdict(e=5)['e'] == 5 d = dotdict() d['a.b'] = 1 assert d['a.b'] == 1 assert d.a.b == 1 d.a.b = 2 assert d['a.b'] == 2 assert d.a.b == 2 try: d.x.y = 99 assert False, "Shouldn't be able to create y in non-existent x!" except AttributeError as e: assert "'x'" in str(e) d2 = {'c.d': 2} d.a.b = d2 assert d.a.b.c.d == 2 assert 'b.c.d' in d.a assert 'b.c.x' not in d.a assert 'e.f' not in d.a assert 'a.b' in d assert 'a.b.x...b.c.d' in d assert 'a.b.x....a.b.c.d' in d assert 'a.b.x' not in d assert 'a.b.c' in d assert isinstance(d.a.b.setdefault('c', 'boo'), dotdict) d.a.x = 3 assert d['a.x'] == 3 assert d['.a.x'] == 3 assert d['a.b..x'] == 3 assert d['a.b.c.d....x'] == 3 d['a...a.x'] d['a.b.c...x'] assert 'a.....a.x' in d try: d['a.b.c...y'] assert False, 'Should have failed trying to find y in root' except KeyError as e: assert "'y'" in str(e) assert d['a.b.c.d.e.f....d'] == 2 assert list(sorted((k for k in d))) == ['a.b.c.d', 'a.x'] import types assert isinstance(d.keys(), list if sys.version_info[0] < 3 else types.GeneratorType) assert isinstance(d.items(), list if sys.version_info[0] < 3 else types.GeneratorType) try: del d['a.b.c'] except KeyError as e: assert '(partial key)' in str(e) del d['a.b.c.d'] assert list(sorted((k for k in d))) == ['a.b.c', 'a.x'] del d['a.b.c'] assert list(sorted((k for k in d))) == ['a.b', 'a.x'] assert d.a.x == 3 try: del d.a.x except AttributeError as e: assert 'x' in str(e) del d.a['x'] assert list(sorted((k for k in d))) == ['a.b'] assert 'a' in d assert 'b' in d.a assert 'c' not in d.a.b del d['a.b'] del d['a'] assert list(sorted((k for k in d))) == [] d['a.b.c.d'] = 2 d['a.x'] = 3 assert d.a.b.c.d == 2 assert d.pop('a.b.c') == {'d': 2} assert 'a.b' in d assert 'a.b.c' not in d assert 'x' in d.a assert d.pop('a.b.c...x') == 3 assert 'x' not in d.a </DeepExtract> <DeepExtract> d = dotdict() d['a.b'] = 1 d['c'] = 2 d['l'] = [1, 2, 3, dotdict({'d': 3})] assert d._resolve('a') == ('a', None) assert d._resolve('l[a.b+c].d') == ('l[a.b+c]', 'd') assert d['l[a.b+c].d'] == 3 try: assert d['l[a.b+c-1].d'] == 3 assert False, 'Indexing int, then trying to resolve another level should fail' except KeyError as exc: assert 'not subscriptable' in str(exc) pass assert d.get('l[a.b+c-1].d') == None assert d.get('l[a.b+c].d') == 3 d['l[3].d'] = 4 assert d.get('l[a.b+c].d') == 4 d['l[a.b+c].d'] = 5 assert d.get('l[a.b+c].d') == 5 assert d['l[c-1]'] == 2 d['l[c-1]'] = 99 assert d['l[c-1]'] == 99 try: d['l[c+3]'] = 3 assert False, 'Indexing with a bad index should fail' except IndexError as exc: assert 'index out of range' in str(exc) pass </DeepExtract>
@misc.assert_tps(scale=repeat) def test_dotdict_performance(): count = repeat while count: count -= 1 assert 'a' in dotdict({'a': 1}) assert dotdict({'a': 1})['a'] == 1 assert 'b' in dotdict({'a': 1}, b=2) assert dotdict({'a': 1}, b=2)['b'] == 2 assert 'c' in dotdict([('c', 3)], d=4) assert dotdict([('c', 3)], d=4)['c'] == 3 assert 'e' in dotdict(e=5) assert dotdict(e=5)['e'] == 5 d = dotdict() d['a.b'] = 1 assert d['a.b'] == 1 assert d.a.b == 1 d.a.b = 2 assert d['a.b'] == 2 assert d.a.b == 2 try: d.x.y = 99 assert False, "Shouldn't be able to create y in non-existent x!" except AttributeError as e: assert "'x'" in str(e) d2 = {'c.d': 2} d.a.b = d2 assert d.a.b.c.d == 2 assert 'b.c.d' in d.a assert 'b.c.x' not in d.a assert 'e.f' not in d.a assert 'a.b' in d assert 'a.b.x...b.c.d' in d assert 'a.b.x....a.b.c.d' in d assert 'a.b.x' not in d assert 'a.b.c' in d assert isinstance(d.a.b.setdefault('c', 'boo'), dotdict) d.a.x = 3 assert d['a.x'] == 3 assert d['.a.x'] == 3 assert d['a.b..x'] == 3 assert d['a.b.c.d....x'] == 3 d['a...a.x'] d['a.b.c...x'] assert 'a.....a.x' in d try: d['a.b.c...y'] assert False, 'Should have failed trying to find y in root' except KeyError as e: assert "'y'" in str(e) assert d['a.b.c.d.e.f....d'] == 2 assert list(sorted((k for k in d))) == ['a.b.c.d', 'a.x'] import types assert isinstance(d.keys(), list if sys.version_info[0] < 3 else types.GeneratorType) assert isinstance(d.items(), list if sys.version_info[0] < 3 else types.GeneratorType) try: del d['a.b.c'] except KeyError as e: assert '(partial key)' in str(e) del d['a.b.c.d'] assert list(sorted((k for k in d))) == ['a.b.c', 'a.x'] del d['a.b.c'] assert list(sorted((k for k in d))) == ['a.b', 'a.x'] assert d.a.x == 3 try: del d.a.x except AttributeError as e: assert 'x' in str(e) del d.a['x'] assert list(sorted((k for k in d))) == ['a.b'] assert 'a' in d assert 'b' in d.a assert 'c' not in d.a.b del d['a.b'] del d['a'] assert list(sorted((k for k in d))) == [] d['a.b.c.d'] = 2 d['a.x'] = 3 assert d.a.b.c.d == 2 assert d.pop('a.b.c') == {'d': 2} assert 'a.b' in d assert 'a.b.c' not in d assert 'x' in d.a assert d.pop('a.b.c...x') == 3 assert 'x' not in d.a d = dotdict() d['a.b'] = 1 d['c'] = 2 d['l'] = [1, 2, 3, dotdict({'d': 3})] assert d._resolve('a') == ('a', None) assert d._resolve('l[a.b+c].d') == ('l[a.b+c]', 'd') assert d['l[a.b+c].d'] == 3 try: assert d['l[a.b+c-1].d'] == 3 assert False, 'Indexing int, then trying to resolve another level should fail' except KeyError as exc: assert 'not subscriptable' in str(exc) pass assert d.get('l[a.b+c-1].d') == None assert d.get('l[a.b+c].d') == 3 d['l[3].d'] = 4 assert d.get('l[a.b+c].d') == 4 d['l[a.b+c].d'] = 5 assert d.get('l[a.b+c].d') == 5 assert d['l[c-1]'] == 2 d['l[c-1]'] = 99 assert d['l[c-1]'] == 99 try: d['l[c+3]'] = 3 assert False, 'Indexing with a bad index should fail' except IndexError as exc: assert 'index out of range' in str(exc) pass </DeepExtract>
cpppo
positive
def test_old_iam_role_creds_are_served_on_error(self): <DeepExtract> creds_json = '{"AccessKeyId" : "' + 'ACCESS_KEY' + '", "SecretAccessKey" : "' + 'SECRET_KEY' + '", "Token" : "' + 'TOKEN' + '" }' self.server.set_expected_response(creds_json, 200) ConfigHelper._DEFAULT_CREDENTIALS_PATH = '' self.config_helper = ConfigHelper(config_path=ConfigHelperTest.VALID_CONFIG_WITHOUT_CREDS, metadata_server=self.server.get_url()) assert_credentials(self.config_helper._credentials, 'ACCESS_KEY', 'SECRET_KEY', 'TOKEN') </DeepExtract> creds_json = '{"AccessKeyId" : "NEW_ACCESS_KEY", "SecretAccessKey" : "NEW_SECRET_KEY",}' <DeepExtract> self.server.set_expected_response(creds_json, 200) creds = self.config_helper.credentials assert_credentials(creds, 'ACCESS_KEY', 'SECRET_KEY', 'TOKEN') </DeepExtract>
def test_old_iam_role_creds_are_served_on_error(self): creds_json = '{"AccessKeyId" : "' + 'ACCESS_KEY' + '", "SecretAccessKey" : "' + 'SECRET_KEY' + '", "Token" : "' + 'TOKEN' + '" }' self.server.set_expected_response(creds_json, 200) ConfigHelper._DEFAULT_CREDENTIALS_PATH = '' self.config_helper = ConfigHelper(config_path=ConfigHelperTest.VALID_CONFIG_WITHOUT_CREDS, metadata_server=self.server.get_url()) assert_credentials(self.config_helper._credentials, 'ACCESS_KEY', 'SECRET_KEY', 'TOKEN') creds_json = '{"AccessKeyId" : "NEW_ACCESS_KEY", "SecretAccessKey" : "NEW_SECRET_KEY",}' self.server.set_expected_response(creds_json, 200) creds = self.config_helper.credentials assert_credentials(creds, 'ACCESS_KEY', 'SECRET_KEY', 'TOKEN') </DeepExtract>
collectd-cloudwatch
positive
def build_model(model_opt, opt, fields, checkpoint): logger.info('Building model...') <DeepExtract> if model_opt.model_type == 'text': src_field = fields['src'] src_emb = build_embeddings(model_opt, src_field) else: src_emb = None encoder = build_encoder(model_opt, src_emb) tgt_field = fields['tgt'] tgt_emb = build_embeddings(model_opt, tgt_field, for_encoder=False) if model_opt.share_embeddings: assert src_field.base_field.vocab == tgt_field.base_field.vocab, 'preprocess with -share_vocab if you use share_embeddings' tgt_emb.word_lut.weight = src_emb.word_lut.weight if model_opt.share_position_embeddings: tgt_emb.make_embedding.pe.pe.weight = src_emb.make_embedding.pe.pe.weight decoder = build_decoder(model_opt, tgt_emb) if use_gpu(opt) and gpu_id is not None: device = torch.device('cuda', gpu_id) elif use_gpu(opt) and (not gpu_id): device = torch.device('cuda') elif not use_gpu(opt): device = torch.device('cpu') if model_opt.simple_fusion: layers = 12 size = 768 heads = 12 lm_decoder_opt = copy.deepcopy(model_opt) lm_decoder_opt.dec_layers = layers lm_decoder_opt.use_GPT_version_ctxattn = False lm_decoder_opt.use_GPT_version_psa = False lm_decoder_opt.use_GPT_version_unconditional = True lm_decoder_opt.tgt_word_vec_size = size lm_decoder_opt.rnn_size = size lm_decoder_opt.dec_rnn_size = size lm_decoder_opt.transformer_ff = size * 4 lm_decoder_opt.dec_heads = heads lm_decoder_opt.position_encoding_learned_dec = True lm_decoder_opt.share_decoder_embeddings = True lm_decoder_opt.dropout = 0 lm_decoder_emb = build_embeddings(lm_decoder_opt, tgt_field, for_encoder=False) logger.info(lm_decoder_emb) lm_decoder = build_decoder(lm_decoder_opt, lm_decoder_emb) load_decoder = lm_decoder model = onmt.models.SimpleFusionModel(encoder, decoder, lm_decoder) generator = SimpleFusionGenerator(model_opt.dec_rnn_size, lm_decoder_opt.dec_rnn_size, len(fields['tgt'].base_field.vocab)) generator.lm_linear.weight = lm_decoder.embeddings.word_lut.weight if model_opt.share_decoder_embeddings: generator.decoder_linear.weight = decoder.embeddings.word_lut.weight gen_linear = generator.lm_linear else: load_decoder = decoder if model_opt.unconditional: model = onmt.models.UncondModel(decoder) else: model = onmt.models.NMTModel(encoder, decoder) if not model_opt.copy_attn: if model_opt.generator_function == 'sparsemax': gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1) else: gen_func = nn.LogSoftmax(dim=-1) if model_opt.padded_vocab_fix_me_later: gen_func = nn.Sequential(PadGen(), gen_func) generator = nn.Sequential(nn.Linear(model_opt.dec_rnn_size, len(fields['tgt'].base_field.vocab)), Cast(torch.float32), gen_func) if model_opt.share_decoder_embeddings: generator[0].weight = decoder.embeddings.word_lut.weight gen_linear = generator[0] else: tgt_base_field = fields['tgt'].base_field vocab_size = len(tgt_base_field.vocab) pad_idx = tgt_base_field.vocab.stoi[tgt_base_field.pad_token] generator = CopyGenerator(model_opt.dec_rnn_size, vocab_size, pad_idx) if model_opt.share_decoder_embeddings: generator.linear.weight = decoder.embeddings.word_lut.weight gen_linear = generator.linear if model_opt.encdec_share_params: for (name, p) in decoder.named_parameters(): if 'ctx' in name or 'context' in name: continue pointer = encoder attrs = name.split('.') for attr_name in attrs[:-1]: pointer = getattr(pointer, attr_name) setattr(pointer, attrs[-1], p) if checkpoint is not None: if 'gpt2_params' not in checkpoint and 'enc_model' not in checkpoint: def fix_key(s): s = re.sub('(.*)\\.layer_norm((_\\d+)?)\\.b_2', '\\1.layer_norm\\2.bias', s) s = re.sub('(.*)\\.layer_norm((_\\d+)?)\\.a_2', '\\1.layer_norm\\2.weight', s) model = s checkpoint['model'] = {fix_key(k): v for (k, v) in checkpoint['model'].items()} if hasattr(model_opt, 'load_uncond_from') and model_opt.load_uncond_from: for p in decoder.parameters(): if p.dim() > 1: xavier_uniform_(p) for p in encoder.parameters(): if p.dim() > 1: xavier_uniform_(p) if model_opt.ctx_weight_param: for (name, p) in decoder.named_parameters(): if 'ctx_weight' in name: p.data.zero_() if 'ctx_bias' in name: p.data.fill_(-10) model.load_state_dict(checkpoint['model'], strict=False) generator.load_state_dict(checkpoint['generator'], strict=False) else: if 'gpt2_params' in checkpoint: init_something = model_opt.gpt2_init_embanddec or model_opt.simple_fusion or model_opt.gpt2_init_embandenc or (model_opt.GPT_representation_mode != 'none') if init_something: if model_opt.gpt2_init_zero: for p in decoder.parameters(): p.data.zero_() if model_opt.simple_fusion: generator.decoder_linear.weight.data.zero_() generator.decoder_linear.bias.data.zero_() else: for p in decoder.parameters(): if p.dim() > 1: xavier_uniform_(p) if encoder is not None: for p in encoder.parameters(): if p.dim() > 1: xavier_uniform_(p) for p in generator.parameters(): if p.dim() > 1: xavier_uniform_(p) if model_opt.zero_bias_init: gen_linear.bias.data.zero_() if model_opt.ctx_weight_param: for (name, p) in decoder.named_parameters(): if 'ctx_weight' in name: p.data.zero_() if 'ctx_bias' in name: p.data.fill_(-10) gen_linear.bias.data.zero_() load_models = [] if model_opt.GPT_representation_mode != 'none': load_embs = [] if model_opt.GPT_representation_loc in ['both', 'src']: load_models.append(src_emb.gpt_model) load_embs.append(src_emb) if model_opt.GPT_representation_loc in ['both', 'tgt']: load_models.append(tgt_emb.gpt_model) load_embs.append(tgt_emb) elif model_opt.gpt2_init_embanddec or model_opt.simple_fusion: load_models = [load_decoder] elif model_opt.gpt2_init_embandenc: load_models = [encoder] it_list = list(checkpoint['gpt2_params']) for (lm_idx, load_model) in enumerate(load_models): for (name, array) in it_list: name = name[6:] name = name.split('/') assigned = False if name[0] == 'wpe': if model_opt.GPT_representation_mode != 'none': pointer = load_embs[lm_idx].make_embedding.pe.pe.weight else: pointer = load_model.embeddings.make_embedding.pe.pe.weight elif name[0] == 'wte': if model_opt.GPT_representation_mode != 'none': pointer = [load_embs[lm_idx].make_embedding.emb_luts[0].weight, gen_linear.weight] else: pointer = [load_model.embeddings.make_embedding.emb_luts[0].weight] if not model_opt.nopretrain_decemb: pointer.append(gen_linear.weight) if model_opt.simple_fusion and model_opt.sf_pretrain_dec_emb: pointer.append(decoder.embeddings.make_embedding.emb_luts[0].weight) elif name[0] == 'ln_f': if name[1] == 'g': pointer = load_model.layer_norm.weight elif name[1] == 'b': pointer = load_model.layer_norm.bias else: raise ValueError('I am missing something here!') elif name[0][0] == 'h': layer_num = name[0][1:] pointer = getattr(load_model.transformer_layers, layer_num) if name[1] == 'attn': assigned = True pointer = pointer.self_attn full_data = torch.from_numpy(array) if name[2] == 'c_attn': end_size = full_data.shape[-1] // 3 assert full_data.shape[-1] % 3 == 0 if name[3] == 'b': if init_something: pointer.linear_query.bias.data = full_data[:end_size] pointer.linear_keys.bias.data = full_data[end_size:end_size * 2] pointer.linear_values.bias.data = full_data[end_size * 2:] if model_opt.gpt2_params_std > 0: pointer.linear_query.bias.orig = full_data[:end_size].clone() pointer.linear_keys.bias.orig = full_data[end_size:end_size * 2].clone() pointer.linear_values.bias.orig = full_data[end_size * 2:].clone() elif name[3] == 'w': if init_something: pointer.linear_query.weight.data = full_data[:, :end_size].t().contiguous() pointer.linear_keys.weight.data = full_data[:, end_size:end_size * 2].t().contiguous() pointer.linear_values.weight.data = full_data[:, end_size * 2:].t().contiguous() if model_opt.gpt2_params_std > 0: pointer.linear_query.weight.orig = full_data[:, :end_size].t().contiguous().clone() pointer.linear_keys.weight.orig = full_data[:, end_size:end_size * 2].t().contiguous().clone() pointer.linear_values.weight.orig = full_data[:, end_size * 2:].t().contiguous().clone() else: raise ValueError('I am missing something here!') elif name[2] == 'c_proj': if name[3] == 'b': if init_something: pointer.final_linear.bias.data = full_data if model_opt.gpt2_params_std > 0: pointer.final_linear.bias.orig = full_data.clone() elif name[3] == 'w': if init_something: pointer.final_linear.weight.data = full_data.t().contiguous() if model_opt.gpt2_params_std > 0: pointer.final_linear.weight.orig = full_data.t().contiguous().clone() else: raise ValueError('I am missing something here!') elif name[1] == 'ln_1' or name[1] == 'ln_2': num = name[1][3] pointer = getattr(pointer, 'layer_norm_' + num) if name[2] == 'b': pointer = pointer.bias elif name[2] == 'g': pointer = pointer.weight else: raise ValueError('I am missing something here!') elif name[1] == 'mlp': pointer = pointer.feed_forward pointer = getattr(pointer, name[2]) if name[3] == 'b': pointer = pointer.bias elif name[3] == 'w': pointer = pointer.weight else: raise ValueError('I am missing something here!') else: raise ValueError('I am missing something here!') else: raise ValueError('I am missing something here!') if not assigned: if name[-1] == 'w' or name[-1] == 'g': array = array.T if not isinstance(pointer, list): pointer = [pointer] for pointer_i in pointer: target_size = int(math.ceil(array.shape[0] / 8)) * 8 padded_vocab = name[0] == 'wte' and pointer_i.shape[0] == target_size padded_vocab = padded_vocab and pointer_i.shape[1:] == array.shape[1:] try: assert pointer_i.shape == array.shape or padded_vocab except AssertionError as e: e.args += (pointer_i.shape, array.shape) raise if init_something: print('Initialize PyTorch weight {}'.format(name)) if padded_vocab: pointer_i.data[:array.shape[0]] = torch.from_numpy(array) else: pointer_i.data = torch.from_numpy(array) if model_opt.gpt2_params_std > 0: if padded_vocab: raise NotImplementedError else: pointer_i.orig = torch.from_numpy(array).clone() if 'enc_model' in checkpoint: load_dict = {k[8:]: v for (k, v) in checkpoint['enc_model'] if 'encoder' in k} encoder.load_state_dict(load_dict, strict=True) else: if model_opt.param_init != 0.0: for p in model.parameters(): p.data.uniform_(-model_opt.param_init, model_opt.param_init) for p in generator.parameters(): p.data.uniform_(-model_opt.param_init, model_opt.param_init) if model_opt.param_init_glorot: for p in model.parameters(): if p.dim() > 1: xavier_uniform_(p) for p in generator.parameters(): if p.dim() > 1: xavier_uniform_(p) if not model_opt.unconditional and hasattr(model.encoder, 'embeddings') and (model.encoder.embeddings is not None): model.encoder.embeddings.load_pretrained_vectors(model_opt.pre_word_vecs_enc) if hasattr(model.decoder, 'embeddings'): model.decoder.embeddings.load_pretrained_vectors(model_opt.pre_word_vecs_dec) if model_opt.notrain_emb or model_opt.notrain_embanddec: if model_opt.position_encoding_learned_enc and model_opt.share_position_embeddings: model.encoder.embeddings.make_embedding.pe.pe.weight.requires_grad = False if model_opt.share_embeddings: model.encoder.embeddings.make_embedding.emb_luts[0].weight.requires_grad = False model.decoder.embeddings.make_embedding.pe.pe.weight.requires_grad = False model.decoder.embeddings.make_embedding.emb_luts[0].weight.requires_grad = False generator[0].weight.requires_grad = False if model_opt.notrain_genbias: generator[0].bias.requires_grad = False if model_opt.notrain_embanddec: for (name, p) in load_decoder.layer_norm.named_parameters(): p.requires_grad = False for (name, p) in load_decoder.transformer_layers.named_parameters(): if 'context' not in name and 'ctx' not in name: p.requires_grad = False if model_opt.onlytrainln: for (name, p) in model.decoder.named_parameters(): if 'layer_norm' not in name: p.requires_grad = False for p in generator.parameters(): p.requires_grad = False if model_opt.onlytrainoutp: if model_opt.share_decoder_embeddings: raise ValueError for p in model.decoder.parameters(): p.requires_grad = False if model_opt.simple_fusion: for p in lm_decoder.parameters(): p.requires_grad = False for p in generator.lm_linear.parameters(): p.requires_grad = False model.generator = generator model.to(device) if model_opt.model_dtype == 'fp16': model.half() for p in model.parameters(): if hasattr(p, 'orig'): p.orig = p.orig.to(device) if model_opt.model_dtype == 'fp16': p.orig = p.orig.half() model = model </DeepExtract> nn.Linear.extra_repr = linear_repr_patch nn.LayerNorm.extra_repr = ln_repr_patch nn.Embedding.extra_repr = emb_repr_patch logger.info(model) return model
def build_model(model_opt, opt, fields, checkpoint): logger.info('Building model...') if model_opt.model_type == 'text': src_field = fields['src'] src_emb = build_embeddings(model_opt, src_field) else: src_emb = None encoder = build_encoder(model_opt, src_emb) tgt_field = fields['tgt'] tgt_emb = build_embeddings(model_opt, tgt_field, for_encoder=False) if model_opt.share_embeddings: assert src_field.base_field.vocab == tgt_field.base_field.vocab, 'preprocess with -share_vocab if you use share_embeddings' tgt_emb.word_lut.weight = src_emb.word_lut.weight if model_opt.share_position_embeddings: tgt_emb.make_embedding.pe.pe.weight = src_emb.make_embedding.pe.pe.weight decoder = build_decoder(model_opt, tgt_emb) if use_gpu(opt) and gpu_id is not None: device = torch.device('cuda', gpu_id) elif use_gpu(opt) and (not gpu_id): device = torch.device('cuda') elif not use_gpu(opt): device = torch.device('cpu') if model_opt.simple_fusion: layers = 12 size = 768 heads = 12 lm_decoder_opt = copy.deepcopy(model_opt) lm_decoder_opt.dec_layers = layers lm_decoder_opt.use_GPT_version_ctxattn = False lm_decoder_opt.use_GPT_version_psa = False lm_decoder_opt.use_GPT_version_unconditional = True lm_decoder_opt.tgt_word_vec_size = size lm_decoder_opt.rnn_size = size lm_decoder_opt.dec_rnn_size = size lm_decoder_opt.transformer_ff = size * 4 lm_decoder_opt.dec_heads = heads lm_decoder_opt.position_encoding_learned_dec = True lm_decoder_opt.share_decoder_embeddings = True lm_decoder_opt.dropout = 0 lm_decoder_emb = build_embeddings(lm_decoder_opt, tgt_field, for_encoder=False) logger.info(lm_decoder_emb) lm_decoder = build_decoder(lm_decoder_opt, lm_decoder_emb) load_decoder = lm_decoder model = onmt.models.SimpleFusionModel(encoder, decoder, lm_decoder) generator = SimpleFusionGenerator(model_opt.dec_rnn_size, lm_decoder_opt.dec_rnn_size, len(fields['tgt'].base_field.vocab)) generator.lm_linear.weight = lm_decoder.embeddings.word_lut.weight if model_opt.share_decoder_embeddings: generator.decoder_linear.weight = decoder.embeddings.word_lut.weight gen_linear = generator.lm_linear else: load_decoder = decoder if model_opt.unconditional: model = onmt.models.UncondModel(decoder) else: model = onmt.models.NMTModel(encoder, decoder) if not model_opt.copy_attn: if model_opt.generator_function == 'sparsemax': gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1) else: gen_func = nn.LogSoftmax(dim=-1) if model_opt.padded_vocab_fix_me_later: gen_func = nn.Sequential(PadGen(), gen_func) generator = nn.Sequential(nn.Linear(model_opt.dec_rnn_size, len(fields['tgt'].base_field.vocab)), Cast(torch.float32), gen_func) if model_opt.share_decoder_embeddings: generator[0].weight = decoder.embeddings.word_lut.weight gen_linear = generator[0] else: tgt_base_field = fields['tgt'].base_field vocab_size = len(tgt_base_field.vocab) pad_idx = tgt_base_field.vocab.stoi[tgt_base_field.pad_token] generator = CopyGenerator(model_opt.dec_rnn_size, vocab_size, pad_idx) if model_opt.share_decoder_embeddings: generator.linear.weight = decoder.embeddings.word_lut.weight gen_linear = generator.linear if model_opt.encdec_share_params: for (name, p) in decoder.named_parameters(): if 'ctx' in name or 'context' in name: continue pointer = encoder attrs = name.split('.') for attr_name in attrs[:-1]: pointer = getattr(pointer, attr_name) setattr(pointer, attrs[-1], p) if checkpoint is not None: if 'gpt2_params' not in checkpoint and 'enc_model' not in checkpoint: def fix_key(s): s = re.sub('(.*)\\.layer_norm((_\\d+)?)\\.b_2', '\\1.layer_norm\\2.bias', s) s = re.sub('(.*)\\.layer_norm((_\\d+)?)\\.a_2', '\\1.layer_norm\\2.weight', s) model = s checkpoint['model'] = {fix_key(k): v for (k, v) in checkpoint['model'].items()} if hasattr(model_opt, 'load_uncond_from') and model_opt.load_uncond_from: for p in decoder.parameters(): if p.dim() > 1: xavier_uniform_(p) for p in encoder.parameters(): if p.dim() > 1: xavier_uniform_(p) if model_opt.ctx_weight_param: for (name, p) in decoder.named_parameters(): if 'ctx_weight' in name: p.data.zero_() if 'ctx_bias' in name: p.data.fill_(-10) model.load_state_dict(checkpoint['model'], strict=False) generator.load_state_dict(checkpoint['generator'], strict=False) else: if 'gpt2_params' in checkpoint: init_something = model_opt.gpt2_init_embanddec or model_opt.simple_fusion or model_opt.gpt2_init_embandenc or (model_opt.GPT_representation_mode != 'none') if init_something: if model_opt.gpt2_init_zero: for p in decoder.parameters(): p.data.zero_() if model_opt.simple_fusion: generator.decoder_linear.weight.data.zero_() generator.decoder_linear.bias.data.zero_() else: for p in decoder.parameters(): if p.dim() > 1: xavier_uniform_(p) if encoder is not None: for p in encoder.parameters(): if p.dim() > 1: xavier_uniform_(p) for p in generator.parameters(): if p.dim() > 1: xavier_uniform_(p) if model_opt.zero_bias_init: gen_linear.bias.data.zero_() if model_opt.ctx_weight_param: for (name, p) in decoder.named_parameters(): if 'ctx_weight' in name: p.data.zero_() if 'ctx_bias' in name: p.data.fill_(-10) gen_linear.bias.data.zero_() load_models = [] if model_opt.GPT_representation_mode != 'none': load_embs = [] if model_opt.GPT_representation_loc in ['both', 'src']: load_models.append(src_emb.gpt_model) load_embs.append(src_emb) if model_opt.GPT_representation_loc in ['both', 'tgt']: load_models.append(tgt_emb.gpt_model) load_embs.append(tgt_emb) elif model_opt.gpt2_init_embanddec or model_opt.simple_fusion: load_models = [load_decoder] elif model_opt.gpt2_init_embandenc: load_models = [encoder] it_list = list(checkpoint['gpt2_params']) for (lm_idx, load_model) in enumerate(load_models): for (name, array) in it_list: name = name[6:] name = name.split('/') assigned = False if name[0] == 'wpe': if model_opt.GPT_representation_mode != 'none': pointer = load_embs[lm_idx].make_embedding.pe.pe.weight else: pointer = load_model.embeddings.make_embedding.pe.pe.weight elif name[0] == 'wte': if model_opt.GPT_representation_mode != 'none': pointer = [load_embs[lm_idx].make_embedding.emb_luts[0].weight, gen_linear.weight] else: pointer = [load_model.embeddings.make_embedding.emb_luts[0].weight] if not model_opt.nopretrain_decemb: pointer.append(gen_linear.weight) if model_opt.simple_fusion and model_opt.sf_pretrain_dec_emb: pointer.append(decoder.embeddings.make_embedding.emb_luts[0].weight) elif name[0] == 'ln_f': if name[1] == 'g': pointer = load_model.layer_norm.weight elif name[1] == 'b': pointer = load_model.layer_norm.bias else: raise ValueError('I am missing something here!') elif name[0][0] == 'h': layer_num = name[0][1:] pointer = getattr(load_model.transformer_layers, layer_num) if name[1] == 'attn': assigned = True pointer = pointer.self_attn full_data = torch.from_numpy(array) if name[2] == 'c_attn': end_size = full_data.shape[-1] // 3 assert full_data.shape[-1] % 3 == 0 if name[3] == 'b': if init_something: pointer.linear_query.bias.data = full_data[:end_size] pointer.linear_keys.bias.data = full_data[end_size:end_size * 2] pointer.linear_values.bias.data = full_data[end_size * 2:] if model_opt.gpt2_params_std > 0: pointer.linear_query.bias.orig = full_data[:end_size].clone() pointer.linear_keys.bias.orig = full_data[end_size:end_size * 2].clone() pointer.linear_values.bias.orig = full_data[end_size * 2:].clone() elif name[3] == 'w': if init_something: pointer.linear_query.weight.data = full_data[:, :end_size].t().contiguous() pointer.linear_keys.weight.data = full_data[:, end_size:end_size * 2].t().contiguous() pointer.linear_values.weight.data = full_data[:, end_size * 2:].t().contiguous() if model_opt.gpt2_params_std > 0: pointer.linear_query.weight.orig = full_data[:, :end_size].t().contiguous().clone() pointer.linear_keys.weight.orig = full_data[:, end_size:end_size * 2].t().contiguous().clone() pointer.linear_values.weight.orig = full_data[:, end_size * 2:].t().contiguous().clone() else: raise ValueError('I am missing something here!') elif name[2] == 'c_proj': if name[3] == 'b': if init_something: pointer.final_linear.bias.data = full_data if model_opt.gpt2_params_std > 0: pointer.final_linear.bias.orig = full_data.clone() elif name[3] == 'w': if init_something: pointer.final_linear.weight.data = full_data.t().contiguous() if model_opt.gpt2_params_std > 0: pointer.final_linear.weight.orig = full_data.t().contiguous().clone() else: raise ValueError('I am missing something here!') elif name[1] == 'ln_1' or name[1] == 'ln_2': num = name[1][3] pointer = getattr(pointer, 'layer_norm_' + num) if name[2] == 'b': pointer = pointer.bias elif name[2] == 'g': pointer = pointer.weight else: raise ValueError('I am missing something here!') elif name[1] == 'mlp': pointer = pointer.feed_forward pointer = getattr(pointer, name[2]) if name[3] == 'b': pointer = pointer.bias elif name[3] == 'w': pointer = pointer.weight else: raise ValueError('I am missing something here!') else: raise ValueError('I am missing something here!') else: raise ValueError('I am missing something here!') if not assigned: if name[-1] == 'w' or name[-1] == 'g': array = array.T if not isinstance(pointer, list): pointer = [pointer] for pointer_i in pointer: target_size = int(math.ceil(array.shape[0] / 8)) * 8 padded_vocab = name[0] == 'wte' and pointer_i.shape[0] == target_size padded_vocab = padded_vocab and pointer_i.shape[1:] == array.shape[1:] try: assert pointer_i.shape == array.shape or padded_vocab except AssertionError as e: e.args += (pointer_i.shape, array.shape) raise if init_something: print('Initialize PyTorch weight {}'.format(name)) if padded_vocab: pointer_i.data[:array.shape[0]] = torch.from_numpy(array) else: pointer_i.data = torch.from_numpy(array) if model_opt.gpt2_params_std > 0: if padded_vocab: raise NotImplementedError else: pointer_i.orig = torch.from_numpy(array).clone() if 'enc_model' in checkpoint: load_dict = {k[8:]: v for (k, v) in checkpoint['enc_model'] if 'encoder' in k} encoder.load_state_dict(load_dict, strict=True) else: if model_opt.param_init != 0.0: for p in model.parameters(): p.data.uniform_(-model_opt.param_init, model_opt.param_init) for p in generator.parameters(): p.data.uniform_(-model_opt.param_init, model_opt.param_init) if model_opt.param_init_glorot: for p in model.parameters(): if p.dim() > 1: xavier_uniform_(p) for p in generator.parameters(): if p.dim() > 1: xavier_uniform_(p) if not model_opt.unconditional and hasattr(model.encoder, 'embeddings') and (model.encoder.embeddings is not None): model.encoder.embeddings.load_pretrained_vectors(model_opt.pre_word_vecs_enc) if hasattr(model.decoder, 'embeddings'): model.decoder.embeddings.load_pretrained_vectors(model_opt.pre_word_vecs_dec) if model_opt.notrain_emb or model_opt.notrain_embanddec: if model_opt.position_encoding_learned_enc and model_opt.share_position_embeddings: model.encoder.embeddings.make_embedding.pe.pe.weight.requires_grad = False if model_opt.share_embeddings: model.encoder.embeddings.make_embedding.emb_luts[0].weight.requires_grad = False model.decoder.embeddings.make_embedding.pe.pe.weight.requires_grad = False model.decoder.embeddings.make_embedding.emb_luts[0].weight.requires_grad = False generator[0].weight.requires_grad = False if model_opt.notrain_genbias: generator[0].bias.requires_grad = False if model_opt.notrain_embanddec: for (name, p) in load_decoder.layer_norm.named_parameters(): p.requires_grad = False for (name, p) in load_decoder.transformer_layers.named_parameters(): if 'context' not in name and 'ctx' not in name: p.requires_grad = False if model_opt.onlytrainln: for (name, p) in model.decoder.named_parameters(): if 'layer_norm' not in name: p.requires_grad = False for p in generator.parameters(): p.requires_grad = False if model_opt.onlytrainoutp: if model_opt.share_decoder_embeddings: raise ValueError for p in model.decoder.parameters(): p.requires_grad = False if model_opt.simple_fusion: for p in lm_decoder.parameters(): p.requires_grad = False for p in generator.lm_linear.parameters(): p.requires_grad = False model.generator = generator model.to(device) if model_opt.model_dtype == 'fp16': model.half() for p in model.parameters(): if hasattr(p, 'orig'): p.orig = p.orig.to(device) if model_opt.model_dtype == 'fp16': p.orig = p.orig.half() model = model nn.Linear.extra_repr = linear_repr_patch nn.LayerNorm.extra_repr = ln_repr_patch nn.Embedding.extra_repr = emb_repr_patch logger.info(model) return model
encoder-agnostic-adaptation
positive
def inorder(self, node): """ Function to print the inorder traversal of the tree """ if node is None: return <DeepExtract> if node.left is None: return self.inorder(node.left.left) print(node.left.data) self.inorder(node.left.right) </DeepExtract> print(node.data) <DeepExtract> if node.right is None: return self.inorder(node.right.left) print(node.right.data) self.inorder(node.right.right) </DeepExtract>
def inorder(self, node): """ Function to print the inorder traversal of the tree """ if node is None: return if node.left is None: return self.inorder(node.left.left) print(node.left.data) self.inorder(node.left.right) print(node.data) if node.right is None: return self.inorder(node.right.left) print(node.right.data) self.inorder(node.right.right) </DeepExtract>
data-structures-and-algorithms
positive
def __init__(self, data): """Initializes. data: data in whatever form is relevant """ self.data = data <DeepExtract> pass </DeepExtract> <DeepExtract> raise UnimplementedMethodException() </DeepExtract> self.test_stats = None self.test_cdf = None
def __init__(self, data): """Initializes. data: data in whatever form is relevant """ self.data = data pass raise UnimplementedMethodException() self.test_stats = None self.test_cdf = None
bayesianGameofThrones
positive
def forward(self, input: Tensor) -> Tensor: if self.train: <DeepExtract> if is_1d(input): self.mask = [lambda _: 0 if random.random() < self.p else 1(x) for x in input] else: self.mask = [tensor_apply(lambda _: 0 if random.random() < self.p else 1, tensor_i) for tensor_i in input] </DeepExtract> return tensor_combine(operator.mul, input, self.mask) else: return tensor_apply(lambda x: x * (1 - self.p), input)
def forward(self, input: Tensor) -> Tensor: if self.train: if is_1d(input): self.mask = [lambda _: 0 if random.random() < self.p else 1(x) for x in input] else: self.mask = [tensor_apply(lambda _: 0 if random.random() < self.p else 1, tensor_i) for tensor_i in input] return tensor_combine(operator.mul, input, self.mask) else: return tensor_apply(lambda x: x * (1 - self.p), input)
data-science-from-scratch
positive
def test_01_basic_harvester(self): if config.get('ckan.harvest.mq.type') == 'redis': redis = queue.get_connection() redis.flushdb() consumer = queue.get_gather_consumer() consumer_fetch = queue.get_fetch_consumer() consumer.queue_purge(queue=queue.get_gather_queue_name()) consumer_fetch.queue_purge(queue=queue.get_fetch_queue_name()) user = toolkit.get_action('get_site_user')({'model': model, 'ignore_auth': True}, {})['name'] context = {'model': model, 'session': model.Session, 'user': user, 'api_version': 3, 'ignore_auth': True} <DeepExtract> source_dict = {'title': 'Test Source', 'name': 'test-source', 'url': 'basic_test', 'source_type': 'test'} try: harvest_source = toolkit.get_action('harvest_source_create')(context, source_dict) except toolkit.ValidationError: harvest_source = toolkit.get_action('harvest_source_show')(context, {'id': source_dict['name']}) pass assert harvest_source['source_type'] == 'test', harvest_source assert harvest_source['url'] == 'basic_test', harvest_source harvest_job = toolkit.get_action('harvest_job_create')(context, {'source_id': harvest_source['id'], 'run': True}) job_id = harvest_job['id'] assert harvest_job['source_id'] == harvest_source['id'], harvest_job assert harvest_job['status'] == u'Running' assert toolkit.get_action('harvest_job_show')(context, {'id': job_id})['status'] == u'Running' reply = consumer.basic_get(queue='ckan.harvest.gather') queue.gather_callback(consumer, *reply) all_objects = model.Session.query(HarvestObject).all() assert len(all_objects) == 3 assert all_objects[0].state == 'WAITING' assert all_objects[1].state == 'WAITING' assert all_objects[2].state == 'WAITING' assert len(model.Session.query(HarvestObject).all()) == 3 assert len(model.Session.query(HarvestObjectExtra).all()) == 1 (harvest_source, job_id) = (harvest_source, job_id) </DeepExtract> reply = consumer_fetch.basic_get(queue='ckan.harvest.fetch') queue.fetch_callback(consumer_fetch, *reply) reply = consumer_fetch.basic_get(queue='ckan.harvest.fetch') queue.fetch_callback(consumer_fetch, *reply) reply = consumer_fetch.basic_get(queue='ckan.harvest.fetch') queue.fetch_callback(consumer_fetch, *reply) count = model.Session.query(model.Package).filter(model.Package.type == 'dataset').count() assert count == 3 all_objects = model.Session.query(HarvestObject).filter_by(current=True).all() assert len(all_objects) == 3 assert all_objects[0].state == 'COMPLETE' assert all_objects[0].report_status == 'added' assert all_objects[1].state == 'COMPLETE' assert all_objects[1].report_status == 'added' assert all_objects[2].state == 'COMPLETE' assert all_objects[2].report_status == 'added' toolkit.get_action('harvest_jobs_run')(context, {'source_id': harvest_source['id']}) harvest_job = toolkit.get_action('harvest_job_show')(context, {'id': job_id}) assert harvest_job['status'] == u'Finished' assert harvest_job['stats'] == {'added': 3, 'updated': 0, 'not modified': 0, 'errored': 0, 'deleted': 0} harvest_source_dict = toolkit.get_action('harvest_source_show')(context, {'id': harvest_source['id']}) assert harvest_source_dict['status']['last_job']['stats'] == {'added': 3, 'updated': 0, 'not modified': 0, 'errored': 0, 'deleted': 0} assert harvest_source_dict['status']['total_datasets'] == 3 assert harvest_source_dict['status']['job_count'] == 1 harvest_job = toolkit.get_action('harvest_job_create')(context, {'source_id': harvest_source['id'], 'run': True}) job_id = harvest_job['id'] assert toolkit.get_action('harvest_job_show')(context, {'id': job_id})['status'] == u'Running' reply = consumer.basic_get(queue='ckan.harvest.gather') queue.gather_callback(consumer, *reply) all_objects = model.Session.query(HarvestObject).all() assert len(all_objects) == 6 reply = consumer_fetch.basic_get(queue='ckan.harvest.fetch') queue.fetch_callback(consumer_fetch, *reply) reply = consumer_fetch.basic_get(queue='ckan.harvest.fetch') queue.fetch_callback(consumer_fetch, *reply) reply = consumer_fetch.basic_get(queue='ckan.harvest.fetch') queue.fetch_callback(consumer_fetch, *reply) count = model.Session.query(model.Package).filter(model.Package.type == 'dataset').count() assert count == 3 all_objects = model.Session.query(HarvestObject).filter_by(report_status='added').all() assert len(all_objects) == 3 all_objects = model.Session.query(HarvestObject).filter_by(report_status='updated').all() assert len(all_objects) == 2 all_objects = model.Session.query(HarvestObject).filter_by(report_status='deleted').all() assert len(all_objects) == 1 toolkit.get_action('harvest_jobs_run')(context, {'source_id': harvest_source['id']}) harvest_job = toolkit.get_action('harvest_job_show')(context, {'id': job_id}) assert harvest_job['stats'] == {'added': 0, 'updated': 2, 'not modified': 0, 'errored': 0, 'deleted': 1} harvest_source_dict = toolkit.get_action('harvest_source_show')(context, {'id': harvest_source['id']}) assert harvest_source_dict['status']['last_job']['stats'] == {'added': 0, 'updated': 2, 'not modified': 0, 'errored': 0, 'deleted': 1} assert harvest_source_dict['status']['total_datasets'] == 2 assert harvest_source_dict['status']['job_count'] == 2
def test_01_basic_harvester(self): if config.get('ckan.harvest.mq.type') == 'redis': redis = queue.get_connection() redis.flushdb() consumer = queue.get_gather_consumer() consumer_fetch = queue.get_fetch_consumer() consumer.queue_purge(queue=queue.get_gather_queue_name()) consumer_fetch.queue_purge(queue=queue.get_fetch_queue_name()) user = toolkit.get_action('get_site_user')({'model': model, 'ignore_auth': True}, {})['name'] context = {'model': model, 'session': model.Session, 'user': user, 'api_version': 3, 'ignore_auth': True} source_dict = {'title': 'Test Source', 'name': 'test-source', 'url': 'basic_test', 'source_type': 'test'} try: harvest_source = toolkit.get_action('harvest_source_create')(context, source_dict) except toolkit.ValidationError: harvest_source = toolkit.get_action('harvest_source_show')(context, {'id': source_dict['name']}) pass assert harvest_source['source_type'] == 'test', harvest_source assert harvest_source['url'] == 'basic_test', harvest_source harvest_job = toolkit.get_action('harvest_job_create')(context, {'source_id': harvest_source['id'], 'run': True}) job_id = harvest_job['id'] assert harvest_job['source_id'] == harvest_source['id'], harvest_job assert harvest_job['status'] == u'Running' assert toolkit.get_action('harvest_job_show')(context, {'id': job_id})['status'] == u'Running' reply = consumer.basic_get(queue='ckan.harvest.gather') queue.gather_callback(consumer, *reply) all_objects = model.Session.query(HarvestObject).all() assert len(all_objects) == 3 assert all_objects[0].state == 'WAITING' assert all_objects[1].state == 'WAITING' assert all_objects[2].state == 'WAITING' assert len(model.Session.query(HarvestObject).all()) == 3 assert len(model.Session.query(HarvestObjectExtra).all()) == 1 (harvest_source, job_id) = (harvest_source, job_id) reply = consumer_fetch.basic_get(queue='ckan.harvest.fetch') queue.fetch_callback(consumer_fetch, *reply) reply = consumer_fetch.basic_get(queue='ckan.harvest.fetch') queue.fetch_callback(consumer_fetch, *reply) reply = consumer_fetch.basic_get(queue='ckan.harvest.fetch') queue.fetch_callback(consumer_fetch, *reply) count = model.Session.query(model.Package).filter(model.Package.type == 'dataset').count() assert count == 3 all_objects = model.Session.query(HarvestObject).filter_by(current=True).all() assert len(all_objects) == 3 assert all_objects[0].state == 'COMPLETE' assert all_objects[0].report_status == 'added' assert all_objects[1].state == 'COMPLETE' assert all_objects[1].report_status == 'added' assert all_objects[2].state == 'COMPLETE' assert all_objects[2].report_status == 'added' toolkit.get_action('harvest_jobs_run')(context, {'source_id': harvest_source['id']}) harvest_job = toolkit.get_action('harvest_job_show')(context, {'id': job_id}) assert harvest_job['status'] == u'Finished' assert harvest_job['stats'] == {'added': 3, 'updated': 0, 'not modified': 0, 'errored': 0, 'deleted': 0} harvest_source_dict = toolkit.get_action('harvest_source_show')(context, {'id': harvest_source['id']}) assert harvest_source_dict['status']['last_job']['stats'] == {'added': 3, 'updated': 0, 'not modified': 0, 'errored': 0, 'deleted': 0} assert harvest_source_dict['status']['total_datasets'] == 3 assert harvest_source_dict['status']['job_count'] == 1 harvest_job = toolkit.get_action('harvest_job_create')(context, {'source_id': harvest_source['id'], 'run': True}) job_id = harvest_job['id'] assert toolkit.get_action('harvest_job_show')(context, {'id': job_id})['status'] == u'Running' reply = consumer.basic_get(queue='ckan.harvest.gather') queue.gather_callback(consumer, *reply) all_objects = model.Session.query(HarvestObject).all() assert len(all_objects) == 6 reply = consumer_fetch.basic_get(queue='ckan.harvest.fetch') queue.fetch_callback(consumer_fetch, *reply) reply = consumer_fetch.basic_get(queue='ckan.harvest.fetch') queue.fetch_callback(consumer_fetch, *reply) reply = consumer_fetch.basic_get(queue='ckan.harvest.fetch') queue.fetch_callback(consumer_fetch, *reply) count = model.Session.query(model.Package).filter(model.Package.type == 'dataset').count() assert count == 3 all_objects = model.Session.query(HarvestObject).filter_by(report_status='added').all() assert len(all_objects) == 3 all_objects = model.Session.query(HarvestObject).filter_by(report_status='updated').all() assert len(all_objects) == 2 all_objects = model.Session.query(HarvestObject).filter_by(report_status='deleted').all() assert len(all_objects) == 1 toolkit.get_action('harvest_jobs_run')(context, {'source_id': harvest_source['id']}) harvest_job = toolkit.get_action('harvest_job_show')(context, {'id': job_id}) assert harvest_job['stats'] == {'added': 0, 'updated': 2, 'not modified': 0, 'errored': 0, 'deleted': 1} harvest_source_dict = toolkit.get_action('harvest_source_show')(context, {'id': harvest_source['id']}) assert harvest_source_dict['status']['last_job']['stats'] == {'added': 0, 'updated': 2, 'not modified': 0, 'errored': 0, 'deleted': 1} assert harvest_source_dict['status']['total_datasets'] == 2 assert harvest_source_dict['status']['job_count'] == 2
ckanext-harvest
positive
def to_etree(self, element, mapping_=None, nsmap_=None): if self.category == MixedContainer.CategoryText: if self.value.strip(): if len(element) > 0: if element[-1].tail is None: element[-1].tail = self.value else: element[-1].tail += self.value elif element.text is None: element.text = self.value else: element.text += self.value elif self.category == MixedContainer.CategorySimple: subelement = etree_.SubElement(element, '%s' % self.name) <DeepExtract> if self.content_type == MixedContainer.TypeString: text = self.value elif self.content_type == MixedContainer.TypeInteger or self.content_type == MixedContainer.TypeBoolean: text = '%d' % self.value elif self.content_type == MixedContainer.TypeFloat or self.content_type == MixedContainer.TypeDecimal: text = '%f' % self.value elif self.content_type == MixedContainer.TypeDouble: text = '%g' % self.value elif self.content_type == MixedContainer.TypeBase64: text = '%s' % base64.b64encode(self.value) subelement.text = text </DeepExtract> else: self.value.to_etree(element)
def to_etree(self, element, mapping_=None, nsmap_=None): if self.category == MixedContainer.CategoryText: if self.value.strip(): if len(element) > 0: if element[-1].tail is None: element[-1].tail = self.value else: element[-1].tail += self.value elif element.text is None: element.text = self.value else: element.text += self.value elif self.category == MixedContainer.CategorySimple: subelement = etree_.SubElement(element, '%s' % self.name) if self.content_type == MixedContainer.TypeString: text = self.value elif self.content_type == MixedContainer.TypeInteger or self.content_type == MixedContainer.TypeBoolean: text = '%d' % self.value elif self.content_type == MixedContainer.TypeFloat or self.content_type == MixedContainer.TypeDecimal: text = '%f' % self.value elif self.content_type == MixedContainer.TypeDouble: text = '%g' % self.value elif self.content_type == MixedContainer.TypeBase64: text = '%s' % base64.b64encode(self.value) subelement.text = text else: self.value.to_etree(element)
autopkg
positive
def __init__(self): FieldId.__init__(self) self.Images = {} <DeepExtract> self.IdList.append(wx.ID_ADD) self.ADD = wx.ID_ADD </DeepExtract> <DeepExtract> new_id = wx.NewId() if isinstance('append', list): 'append'.append(new_id) self.APPEND = new_id </DeepExtract> <DeepExtract> new_id = wx.NewId() if isinstance(member_list, list): member_list.append(new_id) self.BIN = new_id </DeepExtract> <DeepExtract> new_id = wx.NewId() if isinstance('browse', list): 'browse'.append(new_id) self.BROWSE = new_id </DeepExtract> <DeepExtract> new_id = wx.NewId() if isinstance('build', list): 'build'.append(new_id) self.BUILD = new_id </DeepExtract> <DeepExtract> self.IdList.append(wx.ID_CANCEL) self.CANCEL = wx.ID_CANCEL </DeepExtract> <DeepExtract> self.IdList.append(wx.ID_CLEAR) self.CLEAR = wx.ID_CLEAR </DeepExtract> <DeepExtract> self.IdList.append(wx.ID_CLOSE) self.CLOSE = wx.ID_CLOSE </DeepExtract> <DeepExtract> self.IdList.append(wx.ID_OK) self.CONFIRM = wx.ID_OK </DeepExtract> <DeepExtract> self.IdList.append(wx.ID_EXIT) self.EXIT = wx.ID_EXIT </DeepExtract> <DeepExtract> new_id = wx.NewId() if isinstance('full', list): 'full'.append(new_id) self.FULL = new_id </DeepExtract> <DeepExtract> self.IdList.append(wx.ID_HELP) self.HELP = wx.ID_HELP </DeepExtract> <DeepExtract> new_id = wx.NewId() if isinstance('hide', list): 'hide'.append(new_id) self.HIDE = new_id </DeepExtract> <DeepExtract> new_id = wx.NewId() if isinstance('import', list): 'import'.append(new_id) self.IMPORT = new_id </DeepExtract> <DeepExtract> new_id = wx.NewId() if isinstance('mode', list): 'mode'.append(new_id) self.MODE = new_id </DeepExtract> <DeepExtract> new_id = wx.NewId() if isinstance('next', list): 'next'.append(new_id) self.NEXT = new_id </DeepExtract> <DeepExtract> new_id = wx.NewId() if isinstance('prev', list): 'prev'.append(new_id) self.PREV = new_id </DeepExtract> <DeepExtract> self.IdList.append(wx.ID_PREVIEW) self.PREVIEW = wx.ID_PREVIEW </DeepExtract> <DeepExtract> self.IdList.append(wx.ID_REFRESH) self.REFRESH = wx.ID_REFRESH </DeepExtract> <DeepExtract> self.IdList.append(wx.ID_REMOVE) self.REMOVE = wx.ID_REMOVE </DeepExtract> <DeepExtract> new_id = wx.NewId() if isinstance('rename', list): 'rename'.append(new_id) self.RENAME = new_id </DeepExtract> <DeepExtract> self.IdList.append(wx.ID_SAVE) self.SAVE = wx.ID_SAVE </DeepExtract> <DeepExtract> new_id = wx.NewId() if isinstance('short', list): 'short'.append(new_id) self.SHORT = new_id </DeepExtract> <DeepExtract> new_id = wx.NewId() if isinstance(member_list, list): member_list.append(new_id) self.SRC = new_id </DeepExtract> <DeepExtract> new_id = wx.NewId() if isinstance(member_list, list): member_list.append(new_id) self.STAGE = new_id </DeepExtract> <DeepExtract> new_id = wx.NewId() if isinstance(member_list, list): member_list.append(new_id) self.TARGET = new_id </DeepExtract> <DeepExtract> self.IdList.append(wx.ID_PREVIEW_ZOOM) self.ZOOM = wx.ID_PREVIEW_ZOOM </DeepExtract>
def __init__(self): FieldId.__init__(self) self.Images = {} self.IdList.append(wx.ID_ADD) self.ADD = wx.ID_ADD new_id = wx.NewId() if isinstance('append', list): 'append'.append(new_id) self.APPEND = new_id new_id = wx.NewId() if isinstance(member_list, list): member_list.append(new_id) self.BIN = new_id new_id = wx.NewId() if isinstance('browse', list): 'browse'.append(new_id) self.BROWSE = new_id new_id = wx.NewId() if isinstance('build', list): 'build'.append(new_id) self.BUILD = new_id self.IdList.append(wx.ID_CANCEL) self.CANCEL = wx.ID_CANCEL self.IdList.append(wx.ID_CLEAR) self.CLEAR = wx.ID_CLEAR self.IdList.append(wx.ID_CLOSE) self.CLOSE = wx.ID_CLOSE self.IdList.append(wx.ID_OK) self.CONFIRM = wx.ID_OK self.IdList.append(wx.ID_EXIT) self.EXIT = wx.ID_EXIT new_id = wx.NewId() if isinstance('full', list): 'full'.append(new_id) self.FULL = new_id self.IdList.append(wx.ID_HELP) self.HELP = wx.ID_HELP new_id = wx.NewId() if isinstance('hide', list): 'hide'.append(new_id) self.HIDE = new_id new_id = wx.NewId() if isinstance('import', list): 'import'.append(new_id) self.IMPORT = new_id new_id = wx.NewId() if isinstance('mode', list): 'mode'.append(new_id) self.MODE = new_id new_id = wx.NewId() if isinstance('next', list): 'next'.append(new_id) self.NEXT = new_id new_id = wx.NewId() if isinstance('prev', list): 'prev'.append(new_id) self.PREV = new_id self.IdList.append(wx.ID_PREVIEW) self.PREVIEW = wx.ID_PREVIEW self.IdList.append(wx.ID_REFRESH) self.REFRESH = wx.ID_REFRESH self.IdList.append(wx.ID_REMOVE) self.REMOVE = wx.ID_REMOVE new_id = wx.NewId() if isinstance('rename', list): 'rename'.append(new_id) self.RENAME = new_id self.IdList.append(wx.ID_SAVE) self.SAVE = wx.ID_SAVE new_id = wx.NewId() if isinstance('short', list): 'short'.append(new_id) self.SHORT = new_id new_id = wx.NewId() if isinstance(member_list, list): member_list.append(new_id) self.SRC = new_id new_id = wx.NewId() if isinstance(member_list, list): member_list.append(new_id) self.STAGE = new_id new_id = wx.NewId() if isinstance(member_list, list): member_list.append(new_id) self.TARGET = new_id self.IdList.append(wx.ID_PREVIEW_ZOOM) self.ZOOM = wx.ID_PREVIEW_ZOOM </DeepExtract>
debreate
positive
def load_compressed_tree(s): def compress_tree(tree): assert not isinstance(tree, str) if len(tree) == 1: if isinstance(tree[0], nltk.tree.Tree): return compress_tree(tree[0]) else: return tree else: for (i, t) in enumerate(tree): if isinstance(t, nltk.tree.Tree): <DeepExtract> assert not isinstance(t, str) if len(t) == 1: if isinstance(t[0], nltk.tree.Tree): t[i] = compress_tree(t[0]) else: t[i] = t else: for (i, t) in enumerate(t): if isinstance(t, nltk.tree.Tree): t[i] = compress_tree(t) else: t[i] = t t[i] = t </DeepExtract> else: tree[i] = t return tree return compress_tree(nltk.tree.Tree.fromstring(s))
def load_compressed_tree(s): def compress_tree(tree): assert not isinstance(tree, str) if len(tree) == 1: if isinstance(tree[0], nltk.tree.Tree): return compress_tree(tree[0]) else: return tree else: for (i, t) in enumerate(tree): if isinstance(t, nltk.tree.Tree): assert not isinstance(t, str) if len(t) == 1: if isinstance(t[0], nltk.tree.Tree): t[i] = compress_tree(t[0]) else: t[i] = t else: for (i, t) in enumerate(t): if isinstance(t, nltk.tree.Tree): t[i] = compress_tree(t) else: t[i] = t t[i] = t else: tree[i] = t return tree return compress_tree(nltk.tree.Tree.fromstring(s))
bi-att-flow
positive
def test_request_with_server_push(self): with h3_client_and_server() as (quic_client, quic_server): h3_client = H3Connection(quic_client) h3_server = H3Connection(quic_server) stream_id = quic_client.get_next_available_stream_id() h3_client.send_headers(stream_id=stream_id, headers=[(b':method', b'GET'), (b':scheme', b'https'), (b':authority', b'localhost'), (b':path', b'/')], end_stream=True) <DeepExtract> quic_receiver = h3_server._quic if hasattr(quic_client, 'stream_queue'): quic_receiver._events.extend(quic_client.stream_queue) quic_client.stream_queue.clear() else: transfer(quic_client, quic_receiver) http_events = [] event = quic_receiver.next_event() while event is not None: http_events.extend(h3_server.handle_event(event)) event = quic_receiver.next_event() events = http_events </DeepExtract> self.assertEqual(events, [HeadersReceived(headers=[(b':method', b'GET'), (b':scheme', b'https'), (b':authority', b'localhost'), (b':path', b'/')], stream_id=stream_id, stream_ended=True)]) push_stream_id_css = h3_server.send_push_promise(stream_id=stream_id, headers=[(b':method', b'GET'), (b':scheme', b'https'), (b':authority', b'localhost'), (b':path', b'/app.css')]) self.assertEqual(push_stream_id_css, 15) push_stream_id_js = h3_server.send_push_promise(stream_id=stream_id, headers=[(b':method', b'GET'), (b':scheme', b'https'), (b':authority', b'localhost'), (b':path', b'/app.js')]) self.assertEqual(push_stream_id_js, 19) h3_server.send_headers(stream_id=stream_id, headers=[(b':status', b'200'), (b'content-type', b'text/html; charset=utf-8')], end_stream=False) h3_server.send_data(stream_id=stream_id, data=b'<html><body>hello</body></html>', end_stream=True) h3_server.send_headers(stream_id=push_stream_id_css, headers=[(b':status', b'200'), (b'content-type', b'text/css')], end_stream=False) h3_server.send_data(stream_id=push_stream_id_css, data=b'body { color: pink }', end_stream=True) h3_server.send_headers(stream_id=push_stream_id_js, headers=[(b':status', b'200'), (b'content-type', b'application/javascript')], end_stream=False) h3_server.send_data(stream_id=push_stream_id_js, data=b"alert('howdee');", end_stream=True) <DeepExtract> quic_receiver = h3_client._quic if hasattr(quic_server, 'stream_queue'): quic_receiver._events.extend(quic_server.stream_queue) quic_server.stream_queue.clear() else: transfer(quic_server, quic_receiver) http_events = [] event = quic_receiver.next_event() while event is not None: http_events.extend(h3_client.handle_event(event)) event = quic_receiver.next_event() events = http_events </DeepExtract> self.assertEqual(events, [PushPromiseReceived(headers=[(b':method', b'GET'), (b':scheme', b'https'), (b':authority', b'localhost'), (b':path', b'/app.css')], push_id=0, stream_id=stream_id), PushPromiseReceived(headers=[(b':method', b'GET'), (b':scheme', b'https'), (b':authority', b'localhost'), (b':path', b'/app.js')], push_id=1, stream_id=stream_id), HeadersReceived(headers=[(b':status', b'200'), (b'content-type', b'text/html; charset=utf-8')], stream_id=stream_id, stream_ended=False), DataReceived(data=b'<html><body>hello</body></html>', stream_id=stream_id, stream_ended=True), HeadersReceived(headers=[(b':status', b'200'), (b'content-type', b'text/css')], push_id=0, stream_id=push_stream_id_css, stream_ended=False), DataReceived(data=b'body { color: pink }', push_id=0, stream_id=push_stream_id_css, stream_ended=True), HeadersReceived(headers=[(b':status', b'200'), (b'content-type', b'application/javascript')], push_id=1, stream_id=push_stream_id_js, stream_ended=False), DataReceived(data=b"alert('howdee');", push_id=1, stream_id=push_stream_id_js, stream_ended=True)])
def test_request_with_server_push(self): with h3_client_and_server() as (quic_client, quic_server): h3_client = H3Connection(quic_client) h3_server = H3Connection(quic_server) stream_id = quic_client.get_next_available_stream_id() h3_client.send_headers(stream_id=stream_id, headers=[(b':method', b'GET'), (b':scheme', b'https'), (b':authority', b'localhost'), (b':path', b'/')], end_stream=True) quic_receiver = h3_server._quic if hasattr(quic_client, 'stream_queue'): quic_receiver._events.extend(quic_client.stream_queue) quic_client.stream_queue.clear() else: transfer(quic_client, quic_receiver) http_events = [] event = quic_receiver.next_event() while event is not None: http_events.extend(h3_server.handle_event(event)) event = quic_receiver.next_event() events = http_events self.assertEqual(events, [HeadersReceived(headers=[(b':method', b'GET'), (b':scheme', b'https'), (b':authority', b'localhost'), (b':path', b'/')], stream_id=stream_id, stream_ended=True)]) push_stream_id_css = h3_server.send_push_promise(stream_id=stream_id, headers=[(b':method', b'GET'), (b':scheme', b'https'), (b':authority', b'localhost'), (b':path', b'/app.css')]) self.assertEqual(push_stream_id_css, 15) push_stream_id_js = h3_server.send_push_promise(stream_id=stream_id, headers=[(b':method', b'GET'), (b':scheme', b'https'), (b':authority', b'localhost'), (b':path', b'/app.js')]) self.assertEqual(push_stream_id_js, 19) h3_server.send_headers(stream_id=stream_id, headers=[(b':status', b'200'), (b'content-type', b'text/html; charset=utf-8')], end_stream=False) h3_server.send_data(stream_id=stream_id, data=b'<html><body>hello</body></html>', end_stream=True) h3_server.send_headers(stream_id=push_stream_id_css, headers=[(b':status', b'200'), (b'content-type', b'text/css')], end_stream=False) h3_server.send_data(stream_id=push_stream_id_css, data=b'body { color: pink }', end_stream=True) h3_server.send_headers(stream_id=push_stream_id_js, headers=[(b':status', b'200'), (b'content-type', b'application/javascript')], end_stream=False) h3_server.send_data(stream_id=push_stream_id_js, data=b"alert('howdee');", end_stream=True) quic_receiver = h3_client._quic if hasattr(quic_server, 'stream_queue'): quic_receiver._events.extend(quic_server.stream_queue) quic_server.stream_queue.clear() else: transfer(quic_server, quic_receiver) http_events = [] event = quic_receiver.next_event() while event is not None: http_events.extend(h3_client.handle_event(event)) event = quic_receiver.next_event() events = http_events self.assertEqual(events, [PushPromiseReceived(headers=[(b':method', b'GET'), (b':scheme', b'https'), (b':authority', b'localhost'), (b':path', b'/app.css')], push_id=0, stream_id=stream_id), PushPromiseReceived(headers=[(b':method', b'GET'), (b':scheme', b'https'), (b':authority', b'localhost'), (b':path', b'/app.js')], push_id=1, stream_id=stream_id), HeadersReceived(headers=[(b':status', b'200'), (b'content-type', b'text/html; charset=utf-8')], stream_id=stream_id, stream_ended=False), DataReceived(data=b'<html><body>hello</body></html>', stream_id=stream_id, stream_ended=True), HeadersReceived(headers=[(b':status', b'200'), (b'content-type', b'text/css')], push_id=0, stream_id=push_stream_id_css, stream_ended=False), DataReceived(data=b'body { color: pink }', push_id=0, stream_id=push_stream_id_css, stream_ended=True), HeadersReceived(headers=[(b':status', b'200'), (b'content-type', b'application/javascript')], push_id=1, stream_id=push_stream_id_js, stream_ended=False), DataReceived(data=b"alert('howdee');", push_id=1, stream_id=push_stream_id_js, stream_ended=True)])
aioquic
positive
def forward(self, x): <DeepExtract> kernel_size_effective = self.depthwise.kernel_size[0] + (self.depthwise.kernel_size[0] - 1) * (self.depthwise.dilation[0] - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg padded_inputs = F.pad(x, (pad_beg, pad_end, pad_beg, pad_end)) x = padded_inputs </DeepExtract> x = self.depthwise(x) x = self.depthwise_bn(x) x = self.pointwise(x) x = self.pointwise_bn(x) return x
def forward(self, x): kernel_size_effective = self.depthwise.kernel_size[0] + (self.depthwise.kernel_size[0] - 1) * (self.depthwise.dilation[0] - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg padded_inputs = F.pad(x, (pad_beg, pad_end, pad_beg, pad_end)) x = padded_inputs x = self.depthwise(x) x = self.depthwise_bn(x) x = self.pointwise(x) x = self.pointwise_bn(x) return x
bilayer-model
positive
def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None): <DeepExtract> x = self.backbone(img) if self.with_neck: x = self.neck(x) x = x </DeepExtract> outs = self.bbox_head(x) loss_inputs = outs + (gt_bboxes, gt_labels, img_metas, self.train_cfg) losses = self.bbox_head.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) return losses
def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None): x = self.backbone(img) if self.with_neck: x = self.neck(x) x = x outs = self.bbox_head(x) loss_inputs = outs + (gt_bboxes, gt_labels, img_metas, self.train_cfg) losses = self.bbox_head.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) return losses
DetectoRS
positive
@property def instances(self) -> Dict[type, TypeClass]: if self._instances is None: <DeepExtract> err = ImplicitInstancesNotFound(self.mod, self.cls, self.name) try: mod = importlib.import_module(self.mod) except ImportError: raise err else: if hasattr(mod, self.cls): instances = getattr(mod, self.cls)() self._instances = instances else: raise err </DeepExtract> return self._instances.instances
@property def instances(self) -> Dict[type, TypeClass]: if self._instances is None: err = ImplicitInstancesNotFound(self.mod, self.cls, self.name) try: mod = importlib.import_module(self.mod) except ImportError: raise err else: if hasattr(mod, self.cls): instances = getattr(mod, self.cls)() self._instances = instances else: raise err return self._instances.instances
amino
positive
def test_real_xhtml_document(self): """A real XHTML document should come out more or less the same as it went in.""" markup = b'<?xml version="1.0" encoding="utf-8"?>\n<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">\n<html xmlns="http://www.w3.org/1999/xhtml">\n<head><title>Hello.</title></head>\n<body>Goodbye.</body>\n</html>' <DeepExtract> builder = kwargs.pop('builder', self.default_builder) soup = BeautifulSoup(markup, builder=builder, **kwargs) </DeepExtract> self.assertEqual(soup.encode('utf-8').replace(b'\n', b''), markup.replace(b'\n', b''))
def test_real_xhtml_document(self): """A real XHTML document should come out more or less the same as it went in.""" markup = b'<?xml version="1.0" encoding="utf-8"?>\n<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">\n<html xmlns="http://www.w3.org/1999/xhtml">\n<head><title>Hello.</title></head>\n<body>Goodbye.</body>\n</html>' builder = kwargs.pop('builder', self.default_builder) soup = BeautifulSoup(markup, builder=builder, **kwargs) self.assertEqual(soup.encode('utf-8').replace(b'\n', b''), markup.replace(b'\n', b''))
coursera-python-for-everybody-specialization
positive
def get_bundle(root_dir, relative_dir, url): url_without_params = url.split('?')[0] file_name = url_without_params.split('/')[-1] file_ext = os.path.splitext(file_name)[1] logger.debug('get_bundle :: Getting %s from %s' % (file_name, url)) bundle_file = tempfile.NamedTemporaryFile(prefix='tmp', suffix=file_ext, dir=root_dir, delete=False) urllib.urlretrieve(url, bundle_file.name) bundle_path = join(root_dir, relative_dir) metadata_path = join(bundle_path, 'metadata') if file_ext == '.zip': logger.info('get_bundle :: Unzipping %s' % bundle_file.name) with ZipFile(bundle_file.file, 'r') as z: z.extractall(bundle_path) <DeepExtract> files_in_path = os.listdir(bundle_path) if len(files_in_path) > 2 and 'metadata' in files_in_path: metadata_folder = None for f in files_in_path: folder = os.path.join(bundle_path, f) if os.path.isdir(folder): if 'metadata' in os.listdir(folder): metadata_folder = folder </DeepExtract> if metadata_folder: logger.info('get_bundle :: Found a submission with an extra folder, unpacking and moving up a directory') temp_folder_name = join(root_dir, '%s%s' % (relative_dir, '_tmp')) shutil.copytree(metadata_folder, temp_folder_name) shutil.rmtree(bundle_path, ignore_errors=True) shutil.move(temp_folder_name, bundle_path) for zip_file in glob(join(bundle_path, '*.zip')): name_without_extension = os.path.splitext(zip_file)[0] with ZipFile(join(bundle_path, zip_file), 'r') as z: z.extractall(join(bundle_path, name_without_extension)) else: os.mkdir(bundle_path) shutil.copyfile(bundle_file.name, metadata_path) os.chmod(bundle_path, 511) metadata = None if os.path.exists(metadata_path): logger.info('get_bundle :: Fetching extra files specified in metadata for {}'.format(metadata_path)) with open(metadata_path) as mf: metadata = yaml.load(mf) if isinstance(metadata, dict): for (k, v) in metadata.items(): if k not in ('description', 'command', 'exitCode', 'elapsedTime', 'stdout', 'stderr', 'submitted-by', 'submitted-at'): if isinstance(v, str): logger.debug('get_bundle :: Fetching recursive bundle %s %s %s' % (bundle_path, k, v)) <DeepExtract> url_without_params = v.split('?')[0] file_name = url_without_params.split('/')[-1] file_ext = os.path.splitext(file_name)[1] logger.debug('get_bundle :: Getting %s from %s' % (file_name, v)) bundle_file = tempfile.NamedTemporaryFile(prefix='tmp', suffix=file_ext, dir=bundle_path, delete=False) urllib.urlretrieve(v, bundle_file.name) bundle_path = join(bundle_path, k) metadata_path = join(bundle_path, 'metadata') if file_ext == '.zip': logger.info('get_bundle :: Unzipping %s' % bundle_file.name) with ZipFile(bundle_file.file, 'r') as z: z.extractall(bundle_path) metadata_folder = _find_only_folder_with_metadata(bundle_path) if metadata_folder: logger.info('get_bundle :: Found a submission with an extra folder, unpacking and moving up a directory') temp_folder_name = join(bundle_path, '%s%s' % (k, '_tmp')) shutil.copytree(metadata_folder, temp_folder_name) shutil.rmtree(bundle_path, ignore_errors=True) shutil.move(temp_folder_name, bundle_path) for zip_file in glob(join(bundle_path, '*.zip')): name_without_extension = os.path.splitext(zip_file)[0] with ZipFile(join(bundle_path, zip_file), 'r') as z: z.extractall(join(bundle_path, name_without_extension)) else: os.mkdir(bundle_path) shutil.copyfile(bundle_file.name, metadata_path) os.chmod(bundle_path, 511) metadata = None if os.path.exists(metadata_path): logger.info('get_bundle :: Fetching extra files specified in metadata for {}'.format(metadata_path)) with open(metadata_path) as mf: metadata = yaml.load(mf) if isinstance(metadata, dict): for (k, v) in metadata.items(): if k not in ('description', 'command', 'exitCode', 'elapsedTime', 'stdout', 'stderr', 'submitted-by', 'submitted-at'): if isinstance(v, str): logger.debug('get_bundle :: Fetching recursive bundle %s %s %s' % (bundle_path, k, v)) metadata[k] = get_bundle(bundle_path, k, v) metadata[k] = metadata </DeepExtract> return metadata
def get_bundle(root_dir, relative_dir, url): url_without_params = url.split('?')[0] file_name = url_without_params.split('/')[-1] file_ext = os.path.splitext(file_name)[1] logger.debug('get_bundle :: Getting %s from %s' % (file_name, url)) bundle_file = tempfile.NamedTemporaryFile(prefix='tmp', suffix=file_ext, dir=root_dir, delete=False) urllib.urlretrieve(url, bundle_file.name) bundle_path = join(root_dir, relative_dir) metadata_path = join(bundle_path, 'metadata') if file_ext == '.zip': logger.info('get_bundle :: Unzipping %s' % bundle_file.name) with ZipFile(bundle_file.file, 'r') as z: z.extractall(bundle_path) files_in_path = os.listdir(bundle_path) if len(files_in_path) > 2 and 'metadata' in files_in_path: metadata_folder = None for f in files_in_path: folder = os.path.join(bundle_path, f) if os.path.isdir(folder): if 'metadata' in os.listdir(folder): metadata_folder = folder if metadata_folder: logger.info('get_bundle :: Found a submission with an extra folder, unpacking and moving up a directory') temp_folder_name = join(root_dir, '%s%s' % (relative_dir, '_tmp')) shutil.copytree(metadata_folder, temp_folder_name) shutil.rmtree(bundle_path, ignore_errors=True) shutil.move(temp_folder_name, bundle_path) for zip_file in glob(join(bundle_path, '*.zip')): name_without_extension = os.path.splitext(zip_file)[0] with ZipFile(join(bundle_path, zip_file), 'r') as z: z.extractall(join(bundle_path, name_without_extension)) else: os.mkdir(bundle_path) shutil.copyfile(bundle_file.name, metadata_path) os.chmod(bundle_path, 511) metadata = None if os.path.exists(metadata_path): logger.info('get_bundle :: Fetching extra files specified in metadata for {}'.format(metadata_path)) with open(metadata_path) as mf: metadata = yaml.load(mf) if isinstance(metadata, dict): for (k, v) in metadata.items(): if k not in ('description', 'command', 'exitCode', 'elapsedTime', 'stdout', 'stderr', 'submitted-by', 'submitted-at'): if isinstance(v, str): logger.debug('get_bundle :: Fetching recursive bundle %s %s %s' % (bundle_path, k, v)) url_without_params = v.split('?')[0] file_name = url_without_params.split('/')[-1] file_ext = os.path.splitext(file_name)[1] logger.debug('get_bundle :: Getting %s from %s' % (file_name, v)) bundle_file = tempfile.NamedTemporaryFile(prefix='tmp', suffix=file_ext, dir=bundle_path, delete=False) urllib.urlretrieve(v, bundle_file.name) bundle_path = join(bundle_path, k) metadata_path = join(bundle_path, 'metadata') if file_ext == '.zip': logger.info('get_bundle :: Unzipping %s' % bundle_file.name) with ZipFile(bundle_file.file, 'r') as z: z.extractall(bundle_path) metadata_folder = _find_only_folder_with_metadata(bundle_path) if metadata_folder: logger.info('get_bundle :: Found a submission with an extra folder, unpacking and moving up a directory') temp_folder_name = join(bundle_path, '%s%s' % (k, '_tmp')) shutil.copytree(metadata_folder, temp_folder_name) shutil.rmtree(bundle_path, ignore_errors=True) shutil.move(temp_folder_name, bundle_path) for zip_file in glob(join(bundle_path, '*.zip')): name_without_extension = os.path.splitext(zip_file)[0] with ZipFile(join(bundle_path, zip_file), 'r') as z: z.extractall(join(bundle_path, name_without_extension)) else: os.mkdir(bundle_path) shutil.copyfile(bundle_file.name, metadata_path) os.chmod(bundle_path, 511) metadata = None if os.path.exists(metadata_path): logger.info('get_bundle :: Fetching extra files specified in metadata for {}'.format(metadata_path)) with open(metadata_path) as mf: metadata = yaml.load(mf) if isinstance(metadata, dict): for (k, v) in metadata.items(): if k not in ('description', 'command', 'exitCode', 'elapsedTime', 'stdout', 'stderr', 'submitted-by', 'submitted-at'): if isinstance(v, str): logger.debug('get_bundle :: Fetching recursive bundle %s %s %s' % (bundle_path, k, v)) metadata[k] = get_bundle(bundle_path, k, v) metadata[k] = metadata return metadata
ctw-baseline
positive
@unpack_kwargs def build_python_lambda_layer(layer_root: str, bundle_dir: str, project_path: str): """ Layer root is a dir where these files exist: - lambda_layer_config.json - local_requirements.txt - requirements.txt """ with open(Path(layer_root, LAMBDA_LAYER_CONFIG_FILE_NAME), 'r') as file: layer_config = json.load(file) validate_params(layer_root, layer_config, ['name', 'deployment_package']) artifact_name = without_zip_ext(layer_config['deployment_package']) artifact_path = Path(bundle_dir, artifact_name) path_for_requirements = artifact_path / PYTHON_LAMBDA_LAYER_PATH _LOG.info(f'Artifacts path: {artifact_path}') os.makedirs(artifact_path, exist_ok=True) requirements_path = Path(layer_root, REQ_FILE_NAME) if os.path.exists(requirements_path): <DeepExtract> _LOG.info('Going to install 3-rd party dependencies') supported_platforms = config.get('platforms') if config else None python_version = _get_python_version(lambda_config=config) try: required_default_install = True if supported_platforms and python_version: required_default_install = install_requirements_for_platform(requirements_txt=requirements_path, to=path_for_requirements, supported_platforms=supported_platforms, python_version=python_version) if required_default_install: command = f'{sys.executable} -m pip install -r {str(requirements_path)} -t {str(path_for_requirements)}' subprocess.run(command.split(), stderr=subprocess.PIPE, check=True) except subprocess.CalledProcessError as e: message = f'An error: \n"{e.stderr.decode()}"\noccured while installing requirements: "{str(requirements_path)}" for package "{path_for_requirements}"' _LOG.error(message) raise RuntimeError(message) _LOG.info('3-rd party dependencies were installed successfully') </DeepExtract> local_requirements_path = Path(layer_root, LOCAL_REQ_FILE_NAME) if os.path.exists(local_requirements_path): _LOG.info('Going to install local dependencies') <DeepExtract> from syndicate.core import CONFIG with open(local_requirements_path) as f: local_req_list = f.readlines() local_req_list = [path_resolver(r.strip()) for r in local_req_list] _LOG.info(f'Installing local dependencies: {local_req_list}') for lrp in local_req_list: _LOG.info(f'Processing local dependency: {lrp}') copy_tree(str(Path(CONFIG.project_path, project_path, lrp)), str(Path(path_for_requirements, lrp))) _LOG.debug('Dependency was copied successfully') folders = [r for r in lrp.split(DEFAULT_SEP) if r] folders.insert(0, '') i = 0 temp_path = '' while i < len(folders): temp_path += DEFAULT_SEP + folders[i] src_path = Path(CONFIG.project_path, project_path, temp_path) dst_path = Path(path_for_requirements, temp_path) _copy_py_files(str(src_path), str(dst_path)) i += 1 _LOG.debug('Python files from packages were copied successfully') </DeepExtract> _LOG.info('Local dependencies were installed successfully') package_name = zip_ext(layer_config['deployment_package']) _LOG.info(f'Packaging artifacts by {artifact_path} to {package_name}') zip_dir(str(artifact_path), str(Path(bundle_dir, package_name))) _LOG.info(f"Package '{package_name}' was successfully created") <DeepExtract> removed = False while not removed: _LOG.info(f'Trying to remove "{artifact_path}"') try: shutil.rmtree(artifact_path) removed = True except Exception as e: _LOG.warn(f'An error "{e}" occurred while removing artifacts "{artifact_path}"') </DeepExtract> _LOG.info(f'"{artifact_path}" was removed successfully')
@unpack_kwargs def build_python_lambda_layer(layer_root: str, bundle_dir: str, project_path: str): """ Layer root is a dir where these files exist: - lambda_layer_config.json - local_requirements.txt - requirements.txt """ with open(Path(layer_root, LAMBDA_LAYER_CONFIG_FILE_NAME), 'r') as file: layer_config = json.load(file) validate_params(layer_root, layer_config, ['name', 'deployment_package']) artifact_name = without_zip_ext(layer_config['deployment_package']) artifact_path = Path(bundle_dir, artifact_name) path_for_requirements = artifact_path / PYTHON_LAMBDA_LAYER_PATH _LOG.info(f'Artifacts path: {artifact_path}') os.makedirs(artifact_path, exist_ok=True) requirements_path = Path(layer_root, REQ_FILE_NAME) if os.path.exists(requirements_path): _LOG.info('Going to install 3-rd party dependencies') supported_platforms = config.get('platforms') if config else None python_version = _get_python_version(lambda_config=config) try: required_default_install = True if supported_platforms and python_version: required_default_install = install_requirements_for_platform(requirements_txt=requirements_path, to=path_for_requirements, supported_platforms=supported_platforms, python_version=python_version) if required_default_install: command = f'{sys.executable} -m pip install -r {str(requirements_path)} -t {str(path_for_requirements)}' subprocess.run(command.split(), stderr=subprocess.PIPE, check=True) except subprocess.CalledProcessError as e: message = f'An error: \n"{e.stderr.decode()}"\noccured while installing requirements: "{str(requirements_path)}" for package "{path_for_requirements}"' _LOG.error(message) raise RuntimeError(message) _LOG.info('3-rd party dependencies were installed successfully') local_requirements_path = Path(layer_root, LOCAL_REQ_FILE_NAME) if os.path.exists(local_requirements_path): _LOG.info('Going to install local dependencies') from syndicate.core import CONFIG with open(local_requirements_path) as f: local_req_list = f.readlines() local_req_list = [path_resolver(r.strip()) for r in local_req_list] _LOG.info(f'Installing local dependencies: {local_req_list}') for lrp in local_req_list: _LOG.info(f'Processing local dependency: {lrp}') copy_tree(str(Path(CONFIG.project_path, project_path, lrp)), str(Path(path_for_requirements, lrp))) _LOG.debug('Dependency was copied successfully') folders = [r for r in lrp.split(DEFAULT_SEP) if r] folders.insert(0, '') i = 0 temp_path = '' while i < len(folders): temp_path += DEFAULT_SEP + folders[i] src_path = Path(CONFIG.project_path, project_path, temp_path) dst_path = Path(path_for_requirements, temp_path) _copy_py_files(str(src_path), str(dst_path)) i += 1 _LOG.debug('Python files from packages were copied successfully') _LOG.info('Local dependencies were installed successfully') package_name = zip_ext(layer_config['deployment_package']) _LOG.info(f'Packaging artifacts by {artifact_path} to {package_name}') zip_dir(str(artifact_path), str(Path(bundle_dir, package_name))) _LOG.info(f"Package '{package_name}' was successfully created") removed = False while not removed: _LOG.info(f'Trying to remove "{artifact_path}"') try: shutil.rmtree(artifact_path) removed = True except Exception as e: _LOG.warn(f'An error "{e}" occurred while removing artifacts "{artifact_path}"') _LOG.info(f'"{artifact_path}" was removed successfully')
aws-syndicate
positive
def disable(self, idx=0): self.enabled = False <DeepExtract> p.resetBasePositionAndOrientation(self.body_id, [[idx, 20, 0], [0.0, 0.0, 0.0, 1.0]][0], self.transform_orientation([[idx, 20, 0], [0.0, 0.0, 0.0, 1.0]][1])) </DeepExtract> <DeepExtract> self.set_arm_joints(self.home_config) </DeepExtract> <DeepExtract> gripper_joint_positions = p.getJointState(self.body_id, 1)[0] p.setJointMotorControlArray(self.body_id, [6, 3, 8, 5, 10], p.POSITION_CONTROL, [gripper_joint_positions, -gripper_joint_positions, -gripper_joint_positions, gripper_joint_positions, gripper_joint_positions], positionGains=np.ones(5)) </DeepExtract>
def disable(self, idx=0): self.enabled = False p.resetBasePositionAndOrientation(self.body_id, [[idx, 20, 0], [0.0, 0.0, 0.0, 1.0]][0], self.transform_orientation([[idx, 20, 0], [0.0, 0.0, 0.0, 1.0]][1])) self.set_arm_joints(self.home_config) gripper_joint_positions = p.getJointState(self.body_id, 1)[0] p.setJointMotorControlArray(self.body_id, [6, 3, 8, 5, 10], p.POSITION_CONTROL, [gripper_joint_positions, -gripper_joint_positions, -gripper_joint_positions, gripper_joint_positions, gripper_joint_positions], positionGains=np.ones(5)) </DeepExtract>
decentralized-multiarm
positive
def __init__(self, root, transform=None, return_paths=False, loader=default_loader): <DeepExtract> images = [] assert os.path.isdir(root), '%s is not a valid directory' % root for (root, _, fnames) in sorted(os.walk(root)): for fname in fnames: if is_image_file(fname): path = os.path.join(root, fname) images.append(path) imgs = images[:min(max_dataset_size, len(images))] </DeepExtract> if len(imgs) == 0: raise RuntimeError('Found 0 images in: ' + root + '\nSupported image extensions are: ' + ','.join(IMG_EXTENSIONS)) self.root = root self.imgs = imgs self.transform = transform self.return_paths = return_paths self.loader = loader
def __init__(self, root, transform=None, return_paths=False, loader=default_loader): images = [] assert os.path.isdir(root), '%s is not a valid directory' % root for (root, _, fnames) in sorted(os.walk(root)): for fname in fnames: if is_image_file(fname): path = os.path.join(root, fname) images.append(path) imgs = images[:min(max_dataset_size, len(images))] if len(imgs) == 0: raise RuntimeError('Found 0 images in: ' + root + '\nSupported image extensions are: ' + ','.join(IMG_EXTENSIONS)) self.root = root self.imgs = imgs self.transform = transform self.return_paths = return_paths self.loader = loader
disrupting-deepfakes
positive
def set_limitFW(self, value): reg = self.limitFW_ref['write'] if value < self.limitFW_ref['min']: value = self.limitFW_ref['min'] if value > self.limitFW_ref['max']: value = self.limitFW_ref['max'] <DeepExtract> result = self.i2c.write16(reg, value) </DeepExtract> if result & self.debug: print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg])) return result
def set_limitFW(self, value): reg = self.limitFW_ref['write'] if value < self.limitFW_ref['min']: value = self.limitFW_ref['min'] if value > self.limitFW_ref['max']: value = self.limitFW_ref['max'] result = self.i2c.write16(reg, value) if result & self.debug: print('Error writing address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg])) return result
choreograph-git
positive
def get_font_path(self, xml_dict, folder): (uri, path) = (None, None) if self.dcp.schema == 'SMPTE': uri = self.get_subtitle_elem(xml_dict, 'LoadFont').lower() else: <DeepExtract> subtitle_root = {'Interop': 'DCSubtitle', 'SMPTE': 'SubtitleReel'} root = xml_dict.get(subtitle_root[self.dcp.schema]) if root: uri = root.get('LoadFont@URI', '') uri = '' </DeepExtract> if uri: path = os.path.join(folder, uri) return (path, uri)
def get_font_path(self, xml_dict, folder): (uri, path) = (None, None) if self.dcp.schema == 'SMPTE': uri = self.get_subtitle_elem(xml_dict, 'LoadFont').lower() else: subtitle_root = {'Interop': 'DCSubtitle', 'SMPTE': 'SubtitleReel'} root = xml_dict.get(subtitle_root[self.dcp.schema]) if root: uri = root.get('LoadFont@URI', '') uri = '' if uri: path = os.path.join(folder, uri) return (path, uri)
ClairMeta
positive
def set_english(): global my_local_language <DeepExtract> if BV < 2.8: if bpy.context.user_preferences.system.use_international_fonts: my_local_language = 'ho_GE' else: my_local_language = 'en_US' elif BV < 2.83: if bpy.context.preferences.view.use_international_fonts: my_local_language = 'ho_GE' else: my_local_language = 'en_US' else: my_local_language = bpy.context.preferences.view.language </DeepExtract> <DeepExtract> if BV < 2.8: bpy.context.user_preferences.system.use_international_fonts = 'en_US' != 'en_US' elif BV < 2.83: bpy.context.preferences.view.use_international_fonts = 'en_US' != 'en_US' else: bpy.context.preferences.view.language = 'en_US' </DeepExtract>
def set_english(): global my_local_language if BV < 2.8: if bpy.context.user_preferences.system.use_international_fonts: my_local_language = 'ho_GE' else: my_local_language = 'en_US' elif BV < 2.83: if bpy.context.preferences.view.use_international_fonts: my_local_language = 'ho_GE' else: my_local_language = 'en_US' else: my_local_language = bpy.context.preferences.view.language if BV < 2.8: bpy.context.user_preferences.system.use_international_fonts = 'en_US' != 'en_US' elif BV < 2.83: bpy.context.preferences.view.use_international_fonts = 'en_US' != 'en_US' else: bpy.context.preferences.view.language = 'en_US' </DeepExtract>
DazToBlender
positive
def validate_language(self, lang, field): if not isinstance(lang, str): <DeepExtract> raise ParamError('language %s is not valid in %s' % (lang, field)) </DeepExtract> lang_parts = lang.split('-') for (idx, part) in enumerate(lang_parts): if part and re.match('^[A-Za-z0-9]*$', part): if len(part) > 8: <DeepExtract> raise ParamError('language %s is not valid in %s' % (lang, field)) </DeepExtract> else: <DeepExtract> raise ParamError('language %s is not valid in %s' % (lang, field)) </DeepExtract>
def validate_language(self, lang, field): if not isinstance(lang, str): raise ParamError('language %s is not valid in %s' % (lang, field)) lang_parts = lang.split('-') for (idx, part) in enumerate(lang_parts): if part and re.match('^[A-Za-z0-9]*$', part): if len(part) > 8: raise ParamError('language %s is not valid in %s' % (lang, field)) else: raise ParamError('language %s is not valid in %s' % (lang, field)) </DeepExtract>
ADL_LRS
positive
def import_excel_xls(self, filename): workbook = xlrd.open_workbook(filename) print('%sExcel file: %s%s' % (Fore.WHITE, Fore.YELLOW, filename)) for worksheet in workbook.sheets(): if worksheet.name.startswith('--'): print("%sskipping '%s' worksheet" % (Fore.GREEN, worksheet.name)) continue if config.debug: print("%simporting '%s' rows" % (Fore.CYAN, worksheet.name)) for row_num in trange(0, worksheet.nrows, unit=' row', desc="%simporting '%s' rows%s" % (Fore.CYAN, worksheet.name, Fore.GREEN), disable=bool(config.debug or not sys.stdout.isatty())): if row_num == 0: continue row = [self.convert_cell_xls(worksheet.cell(row_num, cell_num), workbook) for cell_num in range(0, worksheet.ncols)] t_row = TransactionRow(row[:len(TransactionRow.HEADER)], row_num + 1, worksheet.name) try: t_row.parse() except TransactionParserError as e: t_row.failure = e if config.debug or t_row.failure: tqdm.write('%simport: %s' % (Fore.YELLOW, t_row)) if t_row.failure: tqdm.write('%sERROR%s %s' % (Back.RED + Fore.BLACK, Back.RESET + Fore.RED, t_row.failure)) self.t_rows.append(t_row) <DeepExtract> if t_row.failure is not None: self.failure_cnt += 1 elif t_row.t_record is not None: self.success_cnt += 1 </DeepExtract> workbook.release_resources() del workbook
def import_excel_xls(self, filename): workbook = xlrd.open_workbook(filename) print('%sExcel file: %s%s' % (Fore.WHITE, Fore.YELLOW, filename)) for worksheet in workbook.sheets(): if worksheet.name.startswith('--'): print("%sskipping '%s' worksheet" % (Fore.GREEN, worksheet.name)) continue if config.debug: print("%simporting '%s' rows" % (Fore.CYAN, worksheet.name)) for row_num in trange(0, worksheet.nrows, unit=' row', desc="%simporting '%s' rows%s" % (Fore.CYAN, worksheet.name, Fore.GREEN), disable=bool(config.debug or not sys.stdout.isatty())): if row_num == 0: continue row = [self.convert_cell_xls(worksheet.cell(row_num, cell_num), workbook) for cell_num in range(0, worksheet.ncols)] t_row = TransactionRow(row[:len(TransactionRow.HEADER)], row_num + 1, worksheet.name) try: t_row.parse() except TransactionParserError as e: t_row.failure = e if config.debug or t_row.failure: tqdm.write('%simport: %s' % (Fore.YELLOW, t_row)) if t_row.failure: tqdm.write('%sERROR%s %s' % (Back.RED + Fore.BLACK, Back.RESET + Fore.RED, t_row.failure)) self.t_rows.append(t_row) if t_row.failure is not None: self.failure_cnt += 1 elif t_row.t_record is not None: self.success_cnt += 1 workbook.release_resources() del workbook
BittyTax
positive
def loader(self, name): if name == self.filename: fname = name else: <DeepExtract> if not lookup: raise depr(0, 12, 'Empty template lookup path.', 'Configure a template lookup path.') if os.path.isabs(self.lookup): raise depr(0, 12, 'Use of absolute path for template name.', 'Refer to templates with names or paths relative to the lookup path.') for spath in lookup: spath = os.path.abspath(spath) + os.sep fname = os.path.abspath(os.path.join(spath, self.lookup)) if not fname.startswith(spath): continue if os.path.isfile(fname): fname = fname for ext in name.extensions: if os.path.isfile('%s.%s' % (fname, ext)): fname = '%s.%s' % (fname, ext) </DeepExtract> if not fname: return with open(fname, 'rb') as f: return (f.read().decode(self.encoding), fname, lambda : False)
def loader(self, name): if name == self.filename: fname = name else: if not lookup: raise depr(0, 12, 'Empty template lookup path.', 'Configure a template lookup path.') if os.path.isabs(self.lookup): raise depr(0, 12, 'Use of absolute path for template name.', 'Refer to templates with names or paths relative to the lookup path.') for spath in lookup: spath = os.path.abspath(spath) + os.sep fname = os.path.abspath(os.path.join(spath, self.lookup)) if not fname.startswith(spath): continue if os.path.isfile(fname): fname = fname for ext in name.extensions: if os.path.isfile('%s.%s' % (fname, ext)): fname = '%s.%s' % (fname, ext) if not fname: return with open(fname, 'rb') as f: return (f.read().decode(self.encoding), fname, lambda : False)
DrRepair
positive
def acquire(self, blocking=False, timeout=30, polling_rate=0.4): if self.lock_acquired: return if not os.path.isfile(self.db_file): <DeepExtract> db = get_db(self.db_file) db.executescript('DROP TABLE IF EXISTS locks; CREATE TABLE locks (name TEXT NOT NULL);') db.close() </DeepExtract> cur_timeout = 0 while True and (not self.lock_acquired): <DeepExtract> db = sqlite3.connect(str(self.db_file), detect_types=sqlite3.PARSE_DECLTYPES) db.row_factory = sqlite3.Row db = db </DeepExtract> try: db.isolation_level = 'EXCLUSIVE' db.execute('BEGIN EXCLUSIVE') lock_entry = db.execute('SELECT * FROM locks WHERE name = ?', (self.lock_name,)).fetchone() if lock_entry is None: db.execute('INSERT INTO locks (name) VALUES (?)', (self.lock_name,)) self.lock_acquired = True logging.debug(_log_msg(f'Acquired lock {self.lock_name}', self.project_id)) db.commit() except sqlite3.OperationalError as e: logging.error(_log_msg(f'Encountering operational error {e}', self.project_id)) db.close() if self.lock_acquired or not blocking: break cur_timeout += polling_rate sleep(polling_rate)
def acquire(self, blocking=False, timeout=30, polling_rate=0.4): if self.lock_acquired: return if not os.path.isfile(self.db_file): db = get_db(self.db_file) db.executescript('DROP TABLE IF EXISTS locks; CREATE TABLE locks (name TEXT NOT NULL);') db.close() cur_timeout = 0 while True and (not self.lock_acquired): db = sqlite3.connect(str(self.db_file), detect_types=sqlite3.PARSE_DECLTYPES) db.row_factory = sqlite3.Row db = db try: db.isolation_level = 'EXCLUSIVE' db.execute('BEGIN EXCLUSIVE') lock_entry = db.execute('SELECT * FROM locks WHERE name = ?', (self.lock_name,)).fetchone() if lock_entry is None: db.execute('INSERT INTO locks (name) VALUES (?)', (self.lock_name,)) self.lock_acquired = True logging.debug(_log_msg(f'Acquired lock {self.lock_name}', self.project_id)) db.commit() except sqlite3.OperationalError as e: logging.error(_log_msg(f'Encountering operational error {e}', self.project_id)) db.close() if self.lock_acquired or not blocking: break cur_timeout += polling_rate sleep(polling_rate)
asreview
positive
def _generate_anchors(base_size, scales, aspect_ratios): """Generate anchor (reference) windows by enumerating aspect ratios X scales wrt a reference (0, 0, base_size - 1, base_size - 1) window. """ anchor = np.array([1, 1, base_size, base_size], dtype=np.float) - 1 <DeepExtract> (w, h, x_ctr, y_ctr) = _whctrs(anchor) size = w * h size_ratios = size / aspect_ratios ws = np.round(np.sqrt(size_ratios)) hs = np.round(ws * aspect_ratios) anchors = _mkanchors(ws, hs, x_ctr, y_ctr) anchors = anchors </DeepExtract> anchors = np.vstack([_scale_enum(anchors[i, :], scales) for i in range(anchors.shape[0])]) return torch.from_numpy(anchors)
def _generate_anchors(base_size, scales, aspect_ratios): """Generate anchor (reference) windows by enumerating aspect ratios X scales wrt a reference (0, 0, base_size - 1, base_size - 1) window. """ anchor = np.array([1, 1, base_size, base_size], dtype=np.float) - 1 (w, h, x_ctr, y_ctr) = _whctrs(anchor) size = w * h size_ratios = size / aspect_ratios ws = np.round(np.sqrt(size_ratios)) hs = np.round(ws * aspect_ratios) anchors = _mkanchors(ws, hs, x_ctr, y_ctr) anchors = anchors anchors = np.vstack([_scale_enum(anchors[i, :], scales) for i in range(anchors.shape[0])]) return torch.from_numpy(anchors)
DetNAS
positive
def get(self, id_: str, hda_ldda: HdaLdda='hda') -> wrappers.Dataset: """ Retrieve the dataset corresponding to the given id. :type hda_ldda: str :param hda_ldda: Whether to show a history dataset ('hda' - the default) or library dataset ('ldda') :rtype: :class:`~.wrappers.HistoryDatasetAssociation` or :class:`~.wrappers.LibraryDatasetDatasetAssociation` :return: the history or library dataset corresponding to ``id_`` """ res = self.gi.datasets.show_dataset(id_, hda_ldda=hda_ldda) <DeepExtract> if res is None: raise RuntimeError(f"{'show_dataset'}: no reply") elif isinstance(res, dict): ds_dict = res try: ds_dict = res[0] except (TypeError, IndexError): raise RuntimeError(f"{'show_dataset'}: unexpected reply: {res!r}") </DeepExtract> if hda_ldda == 'hda': hist = self.obj_gi.histories.get(ds_dict['history_id']) return wrappers.HistoryDatasetAssociation(ds_dict, hist, gi=self.obj_gi) elif hda_ldda == 'ldda': lib = self.obj_gi.libraries.get(ds_dict['parent_library_id']) return wrappers.LibraryDatasetDatasetAssociation(ds_dict, lib, gi=self.obj_gi) else: raise ValueError(f'Unsupported value for hda_ldda: {hda_ldda}')
def get(self, id_: str, hda_ldda: HdaLdda='hda') -> wrappers.Dataset: """ Retrieve the dataset corresponding to the given id. :type hda_ldda: str :param hda_ldda: Whether to show a history dataset ('hda' - the default) or library dataset ('ldda') :rtype: :class:`~.wrappers.HistoryDatasetAssociation` or :class:`~.wrappers.LibraryDatasetDatasetAssociation` :return: the history or library dataset corresponding to ``id_`` """ res = self.gi.datasets.show_dataset(id_, hda_ldda=hda_ldda) if res is None: raise RuntimeError(f"{'show_dataset'}: no reply") elif isinstance(res, dict): ds_dict = res try: ds_dict = res[0] except (TypeError, IndexError): raise RuntimeError(f"{'show_dataset'}: unexpected reply: {res!r}") if hda_ldda == 'hda': hist = self.obj_gi.histories.get(ds_dict['history_id']) return wrappers.HistoryDatasetAssociation(ds_dict, hist, gi=self.obj_gi) elif hda_ldda == 'ldda': lib = self.obj_gi.libraries.get(ds_dict['parent_library_id']) return wrappers.LibraryDatasetDatasetAssociation(ds_dict, lib, gi=self.obj_gi) else: raise ValueError(f'Unsupported value for hda_ldda: {hda_ldda}')
bioblend
positive
def __init__(self, dim=4): assert dim == 4 centers = numpy.array([[0.1, 0.1, 0.1, 0.1], [0.3, 0.8, 0.5, 0.2], [0, 0.7, 0.4, 0.9], [0.7, 0.1, 0.2, 0.8], [0.4, 0.3, 0.6, 0.6], [0.2, 0.8, 0.2, 0.6], [0.9, 0.2, 0.3, 0.4], [0.9, 0.4, 0.9, 0.8], [0.5, 0.5, 0.5, 0.5], [0, 0.8, 0, 0.2]]) e_mat = 10 * numpy.array([[1, 1, 4, 4], [1, 1, 4, 4], [3, 3, 4, 4], [0.5, 0.5, 2, 2], [1, 1, 0.5, 0.2], [3, 3, 1, 1], [0.5, 0.5, 4, 2], [1, 1, 2, 3], [2, 2, 3, 4], [1, 1, 0.5, 0.5]]) coefs = numpy.array([5, -4, 5, -5, 4, -2, 10, -8, -2, -5]) def kernel(x): <DeepExtract> if 'inf' == 1: ret_val = numpy.array([[numpy.sum(numpy.abs((xpt - center) * evec)) for (evec, center) in lzip(numpy.sqrt(e_mat), centers)] for xpt in x]) elif 'inf' == 2: ret_val = numpy.array([[numpy.dot((xpt - center) * evec, xpt - center) for (evec, center) in lzip(e_mat, centers)] for xpt in x]) elif 'inf' == 'inf': ret_val = numpy.array([[numpy.max(numpy.abs((xpt - center) * evec)) for (evec, center) in lzip(numpy.sqrt(e_mat), centers)] for xpt in x]) else: raise ValueError('Unrecognized distance type {0}'.format('inf')) rmax = ret_val </DeepExtract> return numpy.exp(-rmax) super(McCourt21, self).__init__(dim, kernel, e_mat, coefs, centers) self.min_loc = [0.9, 0.4, 0.9, 0.8] self.fmin = -7.74993665759 self.fmax = 8.31973328564 self.classifiers = ['nonsmooth']
def __init__(self, dim=4): assert dim == 4 centers = numpy.array([[0.1, 0.1, 0.1, 0.1], [0.3, 0.8, 0.5, 0.2], [0, 0.7, 0.4, 0.9], [0.7, 0.1, 0.2, 0.8], [0.4, 0.3, 0.6, 0.6], [0.2, 0.8, 0.2, 0.6], [0.9, 0.2, 0.3, 0.4], [0.9, 0.4, 0.9, 0.8], [0.5, 0.5, 0.5, 0.5], [0, 0.8, 0, 0.2]]) e_mat = 10 * numpy.array([[1, 1, 4, 4], [1, 1, 4, 4], [3, 3, 4, 4], [0.5, 0.5, 2, 2], [1, 1, 0.5, 0.2], [3, 3, 1, 1], [0.5, 0.5, 4, 2], [1, 1, 2, 3], [2, 2, 3, 4], [1, 1, 0.5, 0.5]]) coefs = numpy.array([5, -4, 5, -5, 4, -2, 10, -8, -2, -5]) def kernel(x): if 'inf' == 1: ret_val = numpy.array([[numpy.sum(numpy.abs((xpt - center) * evec)) for (evec, center) in lzip(numpy.sqrt(e_mat), centers)] for xpt in x]) elif 'inf' == 2: ret_val = numpy.array([[numpy.dot((xpt - center) * evec, xpt - center) for (evec, center) in lzip(e_mat, centers)] for xpt in x]) elif 'inf' == 'inf': ret_val = numpy.array([[numpy.max(numpy.abs((xpt - center) * evec)) for (evec, center) in lzip(numpy.sqrt(e_mat), centers)] for xpt in x]) else: raise ValueError('Unrecognized distance type {0}'.format('inf')) rmax = ret_val return numpy.exp(-rmax) super(McCourt21, self).__init__(dim, kernel, e_mat, coefs, centers) self.min_loc = [0.9, 0.4, 0.9, 0.8] self.fmin = -7.74993665759 self.fmax = 8.31973328564 self.classifiers = ['nonsmooth']
evalset
positive
def test_custom_status_tokyo_seattle(): context_builder = ContextBuilder('test_custom_status_seattle') <DeepExtract> context_builder.add_task_scheduled_event(name='Hello', id_=0) context_builder.add_orchestrator_completed_event() context_builder.add_orchestrator_started_event() context_builder.add_task_completed_event(id_=0, result='"Hello Tokyo!"') </DeepExtract> <DeepExtract> context_builder.add_task_scheduled_event(name='Hello', id_=1) context_builder.add_orchestrator_completed_event() context_builder.add_orchestrator_started_event() context_builder.add_task_completed_event(id_=1, result='"Hello Seattle!"') </DeepExtract> result = get_orchestration_state_result(context_builder, generator_function) <DeepExtract> expected_state = OrchestratorState(is_done=False, actions=[], output=output, replay_schema=replay_schema.value) </DeepExtract> <DeepExtract> action = CallActivityAction(function_name='Hello', input_='Tokyo') expected_state.actions.append([action]) </DeepExtract> <DeepExtract> action = CallActivityAction(function_name='Hello', input_='Seattle') expected_state.actions.append([action]) </DeepExtract> <DeepExtract> action = CallActivityAction(function_name='Hello', input_='London') expected_state.actions.append([action]) </DeepExtract> <DeepExtract> expected_state._custom_status = 'Seattle ->' </DeepExtract> expected = expected_state.to_json() assert_valid_schema(result) assert_orchestration_state_equals(expected, result)
def test_custom_status_tokyo_seattle(): context_builder = ContextBuilder('test_custom_status_seattle') context_builder.add_task_scheduled_event(name='Hello', id_=0) context_builder.add_orchestrator_completed_event() context_builder.add_orchestrator_started_event() context_builder.add_task_completed_event(id_=0, result='"Hello Tokyo!"') context_builder.add_task_scheduled_event(name='Hello', id_=1) context_builder.add_orchestrator_completed_event() context_builder.add_orchestrator_started_event() context_builder.add_task_completed_event(id_=1, result='"Hello Seattle!"') result = get_orchestration_state_result(context_builder, generator_function) expected_state = OrchestratorState(is_done=False, actions=[], output=output, replay_schema=replay_schema.value) action = CallActivityAction(function_name='Hello', input_='Tokyo') expected_state.actions.append([action]) action = CallActivityAction(function_name='Hello', input_='Seattle') expected_state.actions.append([action]) action = CallActivityAction(function_name='Hello', input_='London') expected_state.actions.append([action]) expected_state._custom_status = 'Seattle ->' expected = expected_state.to_json() assert_valid_schema(result) assert_orchestration_state_equals(expected, result)
azure-functions-durable-python
positive
def __call__(self, libname): """Given the name of a library, load it.""" <DeepExtract> if os.path.isabs(libname): yield libname else: for dir_i in self.other_dirs: for fmt in self.name_formats: yield os.path.join(dir_i, fmt % libname) try: this_file = __file__ except NameError: this_file = None if this_file is not None: for fmt in self.name_formats: yield os.path.abspath(os.path.join(os.path.dirname(__file__), fmt % libname)) for fmt in self.name_formats: path = ctypes.util.find_library(fmt % libname) if path: yield path for path in self.getplatformpaths(libname): yield path for fmt in self.name_formats: yield os.path.abspath(os.path.join(os.path.curdir, fmt % libname)) </DeepExtract> for path in paths: try: return self.Lookup(path) except Exception: pass raise ImportError('Could not load %s.' % libname)
def __call__(self, libname): """Given the name of a library, load it.""" if os.path.isabs(libname): yield libname else: for dir_i in self.other_dirs: for fmt in self.name_formats: yield os.path.join(dir_i, fmt % libname) try: this_file = __file__ except NameError: this_file = None if this_file is not None: for fmt in self.name_formats: yield os.path.abspath(os.path.join(os.path.dirname(__file__), fmt % libname)) for fmt in self.name_formats: path = ctypes.util.find_library(fmt % libname) if path: yield path for path in self.getplatformpaths(libname): yield path for fmt in self.name_formats: yield os.path.abspath(os.path.join(os.path.curdir, fmt % libname)) for path in paths: try: return self.Lookup(path) except Exception: pass raise ImportError('Could not load %s.' % libname)
ctypesgen
positive
def create_object_from_email_message(message, ticket_id, payload, files, logger): (ticket, previous_followup, new) = (None, None, False) now = timezone.now() queue = payload['queue'] sender_email = payload['sender_email'] to_list = getaddresses(message.get_all('To', [])) cc_list = getaddresses(message.get_all('Cc', [])) message_id = message.get('Message-Id') in_reply_to = message.get('In-Reply-To') if message_id: message_id = message_id.strip() if in_reply_to: in_reply_to = in_reply_to.strip() if in_reply_to is not None: try: queryset = FollowUp.objects.filter(message_id=in_reply_to).order_by('-date') if queryset.count() > 0: previous_followup = queryset.first() ticket = previous_followup.ticket except FollowUp.DoesNotExist: pass if previous_followup is None and ticket_id is not None: try: ticket = Ticket.objects.get(id=ticket_id) except Ticket.DoesNotExist: ticket = None else: new = False if ticket.merged_to: logger.info('Ticket has been merged to %s' % ticket.merged_to.ticket) ticket = ticket.merged_to if ticket is None: if not settings.QUEUE_EMAIL_BOX_UPDATE_ONLY: ticket = Ticket.objects.create(title=payload['subject'], queue=queue, submitter_email=sender_email, created=now, description=payload['body'], priority=payload['priority']) ticket.save() logger.debug('Created new ticket %s-%s' % (ticket.queue.slug, ticket.id)) new = True elif ticket.status == Ticket.CLOSED_STATUS: ticket.status = Ticket.REOPENED_STATUS ticket.save() f = FollowUp(ticket=ticket, title=_('E-Mail Received from %(sender_email)s' % {'sender_email': sender_email}), date=now, public=True, comment=payload.get('full_body', payload['body']) or '', message_id=message_id) if ticket.status == Ticket.REOPENED_STATUS: f.new_status = Ticket.REOPENED_STATUS f.title = _('Ticket Re-Opened by E-Mail Received from %(sender_email)s' % {'sender_email': sender_email}) f.save() logger.debug('Created new FollowUp for Ticket') logger.info('[%s-%s] %s' % (ticket.queue.slug, ticket.id, ticket.title)) try: attached = process_attachments(f, files) except ValidationError as e: logger.error(str(e)) else: for att_file in attached: logger.info("Attachment '%s' (with size %s) successfully added to ticket from email.", att_file[0], att_file[1].size) context = safe_template_context(ticket) new_ticket_ccs = [] new_ticket_ccs.append(create_ticket_cc(ticket, to_list + cc_list)) <DeepExtract> any_if_this = [False if not message.get('Auto-Submitted') else message.get('Auto-Submitted').lower() != 'no', True if message.get('X-Auto-Response-Suppress') in ('DR', 'AutoReply', 'All') else False, message.get('List-Id'), message.get('List-Unsubscribe')] autoreply = any(any_if_this) </DeepExtract> if autoreply: logger.info('Message seems to be auto-reply, not sending any emails back to the sender') else: <DeepExtract> extra_headers = {'In-Reply-To': message_id, 'Auto-Submitted': 'auto-replied', 'X-Auto-Response-Suppress': 'All', 'Precedence': 'auto_reply'} if new: ticket.send({'submitter': ('newticket_submitter', context), 'new_ticket_cc': ('newticket_cc', context), 'ticket_cc': ('newticket_cc', context)}, fail_silently=True, extra_headers=extra_headers) else: context.update(comment=f.comment) ticket.send({'submitter': ('updated_submitter', context), 'assigned_to': ('updated_owner', context)}, fail_silently=True, extra_headers=extra_headers) if queue.enable_notifications_on_email_events: ticket.send({'ticket_cc': ('updated_cc', context)}, fail_silently=True, extra_headers=extra_headers) </DeepExtract> return ticket
def create_object_from_email_message(message, ticket_id, payload, files, logger): (ticket, previous_followup, new) = (None, None, False) now = timezone.now() queue = payload['queue'] sender_email = payload['sender_email'] to_list = getaddresses(message.get_all('To', [])) cc_list = getaddresses(message.get_all('Cc', [])) message_id = message.get('Message-Id') in_reply_to = message.get('In-Reply-To') if message_id: message_id = message_id.strip() if in_reply_to: in_reply_to = in_reply_to.strip() if in_reply_to is not None: try: queryset = FollowUp.objects.filter(message_id=in_reply_to).order_by('-date') if queryset.count() > 0: previous_followup = queryset.first() ticket = previous_followup.ticket except FollowUp.DoesNotExist: pass if previous_followup is None and ticket_id is not None: try: ticket = Ticket.objects.get(id=ticket_id) except Ticket.DoesNotExist: ticket = None else: new = False if ticket.merged_to: logger.info('Ticket has been merged to %s' % ticket.merged_to.ticket) ticket = ticket.merged_to if ticket is None: if not settings.QUEUE_EMAIL_BOX_UPDATE_ONLY: ticket = Ticket.objects.create(title=payload['subject'], queue=queue, submitter_email=sender_email, created=now, description=payload['body'], priority=payload['priority']) ticket.save() logger.debug('Created new ticket %s-%s' % (ticket.queue.slug, ticket.id)) new = True elif ticket.status == Ticket.CLOSED_STATUS: ticket.status = Ticket.REOPENED_STATUS ticket.save() f = FollowUp(ticket=ticket, title=_('E-Mail Received from %(sender_email)s' % {'sender_email': sender_email}), date=now, public=True, comment=payload.get('full_body', payload['body']) or '', message_id=message_id) if ticket.status == Ticket.REOPENED_STATUS: f.new_status = Ticket.REOPENED_STATUS f.title = _('Ticket Re-Opened by E-Mail Received from %(sender_email)s' % {'sender_email': sender_email}) f.save() logger.debug('Created new FollowUp for Ticket') logger.info('[%s-%s] %s' % (ticket.queue.slug, ticket.id, ticket.title)) try: attached = process_attachments(f, files) except ValidationError as e: logger.error(str(e)) else: for att_file in attached: logger.info("Attachment '%s' (with size %s) successfully added to ticket from email.", att_file[0], att_file[1].size) context = safe_template_context(ticket) new_ticket_ccs = [] new_ticket_ccs.append(create_ticket_cc(ticket, to_list + cc_list)) any_if_this = [False if not message.get('Auto-Submitted') else message.get('Auto-Submitted').lower() != 'no', True if message.get('X-Auto-Response-Suppress') in ('DR', 'AutoReply', 'All') else False, message.get('List-Id'), message.get('List-Unsubscribe')] autoreply = any(any_if_this) if autoreply: logger.info('Message seems to be auto-reply, not sending any emails back to the sender') else: extra_headers = {'In-Reply-To': message_id, 'Auto-Submitted': 'auto-replied', 'X-Auto-Response-Suppress': 'All', 'Precedence': 'auto_reply'} if new: ticket.send({'submitter': ('newticket_submitter', context), 'new_ticket_cc': ('newticket_cc', context), 'ticket_cc': ('newticket_cc', context)}, fail_silently=True, extra_headers=extra_headers) else: context.update(comment=f.comment) ticket.send({'submitter': ('updated_submitter', context), 'assigned_to': ('updated_owner', context)}, fail_silently=True, extra_headers=extra_headers) if queue.enable_notifications_on_email_events: ticket.send({'ticket_cc': ('updated_cc', context)}, fail_silently=True, extra_headers=extra_headers) return ticket
django-helpdesk
positive
def build(self): source = prev_input = self.input_nodes[0] anchor_points = collections.deque([source], maxlen=3) for _ in range(self.num_layers): vnode = VariableNode() <DeepExtract> vnode.add_op(Identity()) activations = [None, tf.nn.swish, tf.nn.relu, tf.nn.tanh, tf.nn.sigmoid] for units in range(16, 97, 16): for activation in activations: vnode.add_op(Dense(units=units, activation=activation)) </DeepExtract> self.connect(prev_input, vnode) cell_output = vnode cmerge = ConstantNode() cmerge.set_op(AddByProjecting(self, [cell_output], activation='relu')) for anchor in anchor_points: skipco = VariableNode() skipco.add_op(Zero()) skipco.add_op(Connect(self, anchor)) self.connect(skipco, cmerge) prev_input = cmerge anchor_points.append(prev_input) if self.dropout >= 0.0: dropout_node = ConstantNode(op=Dropout(rate=self.dropout)) self.connect(prev_input, dropout_node) prev_input = dropout_node output_node = ConstantNode(Dense(self.output_shape[0], activation=None if self.regression else 'softmax')) self.connect(prev_input, output_node) return self
def build(self): source = prev_input = self.input_nodes[0] anchor_points = collections.deque([source], maxlen=3) for _ in range(self.num_layers): vnode = VariableNode() vnode.add_op(Identity()) activations = [None, tf.nn.swish, tf.nn.relu, tf.nn.tanh, tf.nn.sigmoid] for units in range(16, 97, 16): for activation in activations: vnode.add_op(Dense(units=units, activation=activation)) self.connect(prev_input, vnode) cell_output = vnode cmerge = ConstantNode() cmerge.set_op(AddByProjecting(self, [cell_output], activation='relu')) for anchor in anchor_points: skipco = VariableNode() skipco.add_op(Zero()) skipco.add_op(Connect(self, anchor)) self.connect(skipco, cmerge) prev_input = cmerge anchor_points.append(prev_input) if self.dropout >= 0.0: dropout_node = ConstantNode(op=Dropout(rate=self.dropout)) self.connect(prev_input, dropout_node) prev_input = dropout_node output_node = ConstantNode(Dense(self.output_shape[0], activation=None if self.regression else 'softmax')) self.connect(prev_input, output_node) return self
deephyper
positive
def add_mask_rcnn_blobs(blobs, sampled_boxes, roidb, im_scale, batch_idx): """Add Mask R-CNN specific blobs to the input blob dictionary.""" M = cfg.MRCNN.RESOLUTION polys_gt_inds = np.where((roidb['gt_classes'] > 0) & (roidb['is_crowd'] == 0))[0] polys_gt = [roidb['segms'][i] for i in polys_gt_inds] boxes_from_polys = segm_utils.polys_to_boxes(polys_gt) fg_inds = np.where(blobs['labels_int32'] > 0)[0] roi_has_mask = blobs['labels_int32'].copy() roi_has_mask[roi_has_mask > 0] = 1 if fg_inds.shape[0] > 0: mask_class_labels = blobs['labels_int32'][fg_inds] masks = blob_utils.zeros((fg_inds.shape[0], M ** 2), int32=True) rois_fg = sampled_boxes[fg_inds] overlaps_bbfg_bbpolys = box_utils.bbox_overlaps(rois_fg.astype(np.float32, copy=False), boxes_from_polys.astype(np.float32, copy=False)) fg_polys_inds = np.argmax(overlaps_bbfg_bbpolys, axis=1) for i in range(rois_fg.shape[0]): fg_polys_ind = fg_polys_inds[i] poly_gt = polys_gt[fg_polys_ind] roi_fg = rois_fg[i] mask = segm_utils.polys_to_mask_wrt_box(poly_gt, roi_fg, M) mask = np.array(mask > 0, dtype=np.int32) masks[i, :] = np.reshape(mask, M ** 2) else: bg_inds = np.where(blobs['labels_int32'] == 0)[0] rois_fg = sampled_boxes[bg_inds[0]].reshape((1, -1)) masks = -blob_utils.ones((1, M ** 2), int32=True) mask_class_labels = blob_utils.zeros((1,)) roi_has_mask[0] = 1 if cfg.MRCNN.CLS_SPECIFIC_MASK: <DeepExtract> assert masks.shape[0] == mask_class_labels.shape[0] M = cfg.MRCNN.RESOLUTION mask_targets = -blob_utils.ones((masks.shape[0], cfg.MODEL.NUM_CLASSES * M ** 2), int32=True) for i in range(masks.shape[0]): cls = int(mask_class_labels[i]) start = M ** 2 * cls end = start + M ** 2 if cls > 0: mask_targets[i, start:end] = masks[i, :] masks = mask_targets </DeepExtract> rois_fg *= im_scale repeated_batch_idx = batch_idx * blob_utils.ones((rois_fg.shape[0], 1)) rois_fg = np.hstack((repeated_batch_idx, rois_fg)) blobs['mask_rois'] = rois_fg blobs['roi_has_mask_int32'] = roi_has_mask blobs['masks_int32'] = masks
def add_mask_rcnn_blobs(blobs, sampled_boxes, roidb, im_scale, batch_idx): """Add Mask R-CNN specific blobs to the input blob dictionary.""" M = cfg.MRCNN.RESOLUTION polys_gt_inds = np.where((roidb['gt_classes'] > 0) & (roidb['is_crowd'] == 0))[0] polys_gt = [roidb['segms'][i] for i in polys_gt_inds] boxes_from_polys = segm_utils.polys_to_boxes(polys_gt) fg_inds = np.where(blobs['labels_int32'] > 0)[0] roi_has_mask = blobs['labels_int32'].copy() roi_has_mask[roi_has_mask > 0] = 1 if fg_inds.shape[0] > 0: mask_class_labels = blobs['labels_int32'][fg_inds] masks = blob_utils.zeros((fg_inds.shape[0], M ** 2), int32=True) rois_fg = sampled_boxes[fg_inds] overlaps_bbfg_bbpolys = box_utils.bbox_overlaps(rois_fg.astype(np.float32, copy=False), boxes_from_polys.astype(np.float32, copy=False)) fg_polys_inds = np.argmax(overlaps_bbfg_bbpolys, axis=1) for i in range(rois_fg.shape[0]): fg_polys_ind = fg_polys_inds[i] poly_gt = polys_gt[fg_polys_ind] roi_fg = rois_fg[i] mask = segm_utils.polys_to_mask_wrt_box(poly_gt, roi_fg, M) mask = np.array(mask > 0, dtype=np.int32) masks[i, :] = np.reshape(mask, M ** 2) else: bg_inds = np.where(blobs['labels_int32'] == 0)[0] rois_fg = sampled_boxes[bg_inds[0]].reshape((1, -1)) masks = -blob_utils.ones((1, M ** 2), int32=True) mask_class_labels = blob_utils.zeros((1,)) roi_has_mask[0] = 1 if cfg.MRCNN.CLS_SPECIFIC_MASK: assert masks.shape[0] == mask_class_labels.shape[0] M = cfg.MRCNN.RESOLUTION mask_targets = -blob_utils.ones((masks.shape[0], cfg.MODEL.NUM_CLASSES * M ** 2), int32=True) for i in range(masks.shape[0]): cls = int(mask_class_labels[i]) start = M ** 2 * cls end = start + M ** 2 if cls > 0: mask_targets[i, start:end] = masks[i, :] masks = mask_targets rois_fg *= im_scale repeated_batch_idx = batch_idx * blob_utils.ones((rois_fg.shape[0], 1)) rois_fg = np.hstack((repeated_batch_idx, rois_fg)) blobs['mask_rois'] = rois_fg blobs['roi_has_mask_int32'] = roi_has_mask blobs['masks_int32'] = masks
Detectron-Cascade-RCNN
positive
def removeall(self, v): i = 0 while i < len(self._keys): k = self._keys[i] if v in self.getall(k): <DeepExtract> defaultdict.__getitem__(self, k).remove(v) self._key_value.remove((k, v)) if not dict.__getitem__(self, k): del self[k] self._keys.remove(k) </DeepExtract> self._keys.remove(k) i += 1
def removeall(self, v): i = 0 while i < len(self._keys): k = self._keys[i] if v in self.getall(k): defaultdict.__getitem__(self, k).remove(v) self._key_value.remove((k, v)) if not dict.__getitem__(self, k): del self[k] self._keys.remove(k) self._keys.remove(k) i += 1
camr
positive
def test_flat(self): for dtype in [np.int, np.float]: data = ma.arange(12, dtype=dtype) data[::2] = ma.masked <DeepExtract> array = biggus.NumpyArrayAdapter(np.arange(2, dtype=data)) agg = self.biggus_operator(array, axis=0) self.assertEqual(agg.dtype, data) </DeepExtract> data.mask = ma.nomask data[1::2] = ma.masked <DeepExtract> array = biggus.NumpyArrayAdapter(np.arange(2, dtype=data)) agg = self.biggus_operator(array, axis=0) self.assertEqual(agg.dtype, data) </DeepExtract>
def test_flat(self): for dtype in [np.int, np.float]: data = ma.arange(12, dtype=dtype) data[::2] = ma.masked array = biggus.NumpyArrayAdapter(np.arange(2, dtype=data)) agg = self.biggus_operator(array, axis=0) self.assertEqual(agg.dtype, data) data.mask = ma.nomask data[1::2] = ma.masked array = biggus.NumpyArrayAdapter(np.arange(2, dtype=data)) agg = self.biggus_operator(array, axis=0) self.assertEqual(agg.dtype, data) </DeepExtract>
biggus
positive
def entry(self, index): (intro_format, intro_length) = self.version and ('>Q', 16) or ('>I', 8) row_length = intro_length + 1 + self.length_size_of_traf_num + 1 + self.length_size_of_trun_num + 1 + self.length_size_of_sample_num row_start = self.offset + 24 + row_length * index <DeepExtract> offset = 0 while offset < len(self.fmap[row_start:row_start + row_length]): ret = struct.unpack_from(intro_format, self.fmap[row_start:row_start + row_length], offset) offset += struct.calcsize(intro_format) intro_format = (yield ret) or intro_format </DeepExtract> time = next(p)[0] moof_offset = next(p)[0] traf = p.send(['>B', '>H', '>BH', '>I'][self.length_size_of_traf_num])[-1] trun = p.send(['>B', '>H', '>BH', '>I'][self.length_size_of_trun_num])[-1] sample = p.send(['>B', '>H', '>BH', '>I'][self.length_size_of_sample_num])[-1] return (time, moof_offset, traf, trun, sample)
def entry(self, index): (intro_format, intro_length) = self.version and ('>Q', 16) or ('>I', 8) row_length = intro_length + 1 + self.length_size_of_traf_num + 1 + self.length_size_of_trun_num + 1 + self.length_size_of_sample_num row_start = self.offset + 24 + row_length * index offset = 0 while offset < len(self.fmap[row_start:row_start + row_length]): ret = struct.unpack_from(intro_format, self.fmap[row_start:row_start + row_length], offset) offset += struct.calcsize(intro_format) intro_format = (yield ret) or intro_format time = next(p)[0] moof_offset = next(p)[0] traf = p.send(['>B', '>H', '>BH', '>I'][self.length_size_of_traf_num])[-1] trun = p.send(['>B', '>H', '>BH', '>I'][self.length_size_of_trun_num])[-1] sample = p.send(['>B', '>H', '>BH', '>I'][self.length_size_of_sample_num])[-1] return (time, moof_offset, traf, trun, sample)
dash-live-source-simulator
positive
def forward(self, w, r, attn_mask=None, mems=None): (qlen, rlen, bsz) = (w.size(0), r.size(0), w.size(1)) if mems is not None: cat = torch.cat([mems, w], 0) if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(cat)) else: w_heads = self.qkv_net(cat) r_head_k = self.r_net(r) (w_head_q, w_head_k, w_head_v) = torch.chunk(w_heads, 3, dim=-1) w_head_q = w_head_q[-qlen:] else: if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(w)) else: w_heads = self.qkv_net(w) r_head_k = self.r_net(r) (w_head_q, w_head_k, w_head_v) = torch.chunk(w_heads, 3, dim=-1) klen = w_head_k.size(0) w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) rw_head_q = w_head_q + self.r_w_bias AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) rr_head_q = w_head_q + self.r_r_bias BD = torch.einsum('ibnd,jnd->ijbn', (rr_head_q, r_head_k)) <DeepExtract> zero_pad_shape = (BD.size(0), 1) + BD.size()[2:] zero_pad = torch.zeros(zero_pad_shape, device=BD.device, dtype=BD.dtype) x_padded = torch.cat([zero_pad, BD], dim=1) x_padded_shape = (BD.size(1) + 1, BD.size(0)) + BD.size()[2:] x_padded = x_padded.view(*x_padded_shape) BD = x_padded[1:].view_as(BD) if zero_triu: ones = torch.ones((BD.size(0), BD.size(1))) BD = BD * torch.tril(ones, BD.size(1) - BD.size(0))[:, :, None, None] BD = BD </DeepExtract> attn_score = AC + BD attn_score.mul_(self.scale) if attn_mask is not None and attn_mask.any().item(): if attn_mask.dim() == 2: attn_score = attn_score.float().masked_fill(attn_mask[None, :, :, None], -1e+30).type_as(attn_score) elif attn_mask.dim() == 3: attn_score = attn_score.float().masked_fill(attn_mask[:, :, :, None], -1e+30).type_as(attn_score) attn_prob = F.softmax(attn_score, dim=1) attn_prob = self.dropatt(attn_prob) attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v)) attn_vec = attn_vec.contiguous().view(attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head) attn_out = self.o_net(attn_vec) attn_out = self.drop(attn_out) if self.pre_lnorm: output = w + attn_out else: output = self.layer_norm(w + attn_out) return output
def forward(self, w, r, attn_mask=None, mems=None): (qlen, rlen, bsz) = (w.size(0), r.size(0), w.size(1)) if mems is not None: cat = torch.cat([mems, w], 0) if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(cat)) else: w_heads = self.qkv_net(cat) r_head_k = self.r_net(r) (w_head_q, w_head_k, w_head_v) = torch.chunk(w_heads, 3, dim=-1) w_head_q = w_head_q[-qlen:] else: if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(w)) else: w_heads = self.qkv_net(w) r_head_k = self.r_net(r) (w_head_q, w_head_k, w_head_v) = torch.chunk(w_heads, 3, dim=-1) klen = w_head_k.size(0) w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) rw_head_q = w_head_q + self.r_w_bias AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) rr_head_q = w_head_q + self.r_r_bias BD = torch.einsum('ibnd,jnd->ijbn', (rr_head_q, r_head_k)) zero_pad_shape = (BD.size(0), 1) + BD.size()[2:] zero_pad = torch.zeros(zero_pad_shape, device=BD.device, dtype=BD.dtype) x_padded = torch.cat([zero_pad, BD], dim=1) x_padded_shape = (BD.size(1) + 1, BD.size(0)) + BD.size()[2:] x_padded = x_padded.view(*x_padded_shape) BD = x_padded[1:].view_as(BD) if zero_triu: ones = torch.ones((BD.size(0), BD.size(1))) BD = BD * torch.tril(ones, BD.size(1) - BD.size(0))[:, :, None, None] BD = BD attn_score = AC + BD attn_score.mul_(self.scale) if attn_mask is not None and attn_mask.any().item(): if attn_mask.dim() == 2: attn_score = attn_score.float().masked_fill(attn_mask[None, :, :, None], -1e+30).type_as(attn_score) elif attn_mask.dim() == 3: attn_score = attn_score.float().masked_fill(attn_mask[:, :, :, None], -1e+30).type_as(attn_score) attn_prob = F.softmax(attn_score, dim=1) attn_prob = self.dropatt(attn_prob) attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v)) attn_vec = attn_vec.contiguous().view(attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head) attn_out = self.o_net(attn_vec) attn_out = self.drop(attn_out) if self.pre_lnorm: output = w + attn_out else: output = self.layer_norm(w + attn_out) return output
EssayKiller_V2
positive
def notify_permissions(self): """ Sends clients the list of shared terminals if they have been granted access to any shared terminal. .. note:: Normally this only gets called from `~TerminalApplication.permissions` after something changed. """ self.term_log.debug('notify_permissions()') cls = ApplicationWebSocket users = cls._list_connected_users() shared_terms = self.ws.persist['terminal']['shared'] def send_message(user): <DeepExtract> out_dict = {} if not user: user = self.current_user shared_terms = self.ws.persist['terminal'].get('shared', {}) for (share_id, share_dict) in shared_terms.items(): owner = False auth_or_anon = False explicit_user = False for read_perm in share_dict['read']: if read_perm in ['AUTHENTICATED', 'ANONYMOUS']: auth_or_anon = True if user['upn'] in share_dict['read']: explicit_user = True if share_dict['user']['upn'] == user['upn']: owner = True if owner or auth_or_anon or explicit_user: password = share_dict.get('password', False) if password == None: password = False elif password and (not owner): password = True broadcast = share_dict.get('broadcast', False) out_dict[share_id] = {'owner': share_dict['user']['upn'], 'term': share_dict['term'], 'title': share_dict['term_obj']['title'], 'read': share_dict['read'], 'write': share_dict['write'], 'viewers': share_dict['viewers'], 'password_protected': password, 'broadcast': broadcast} out_dict = out_dict </DeepExtract> message = {'terminal:shared_terminals': {'terminals': out_dict}} if user['upn'] == 'ANONYMOUS': cls._deliver(message, session=user['session']) else: cls._deliver(message, upn=user['upn']) for user in users: upn = user.get('upn', None) if not upn: continue for (share_id, share_dict) in shared_terms.items(): try: if share_dict['user'] == user: <DeepExtract> out_dict = self._shared_terminals_dict(user=user) message = {'terminal:shared_terminals': {'terminals': out_dict}} if user['upn'] == 'ANONYMOUS': cls._deliver(message, session=user['session']) else: cls._deliver(message, upn=user['upn']) </DeepExtract> break if 'AUTHENTICATED' in share_dict['read']: <DeepExtract> out_dict = self._shared_terminals_dict(user=user) message = {'terminal:shared_terminals': {'terminals': out_dict}} if user['upn'] == 'ANONYMOUS': cls._deliver(message, session=user['session']) else: cls._deliver(message, upn=user['upn']) </DeepExtract> break if upn in share_dict['read']: <DeepExtract> out_dict = self._shared_terminals_dict(user=user) message = {'terminal:shared_terminals': {'terminals': out_dict}} if user['upn'] == 'ANONYMOUS': cls._deliver(message, session=user['session']) else: cls._deliver(message, upn=user['upn']) </DeepExtract> break except AttributeError: pass
def notify_permissions(self): """ Sends clients the list of shared terminals if they have been granted access to any shared terminal. .. note:: Normally this only gets called from `~TerminalApplication.permissions` after something changed. """ self.term_log.debug('notify_permissions()') cls = ApplicationWebSocket users = cls._list_connected_users() shared_terms = self.ws.persist['terminal']['shared'] def send_message(user): out_dict = {} if not user: user = self.current_user shared_terms = self.ws.persist['terminal'].get('shared', {}) for (share_id, share_dict) in shared_terms.items(): owner = False auth_or_anon = False explicit_user = False for read_perm in share_dict['read']: if read_perm in ['AUTHENTICATED', 'ANONYMOUS']: auth_or_anon = True if user['upn'] in share_dict['read']: explicit_user = True if share_dict['user']['upn'] == user['upn']: owner = True if owner or auth_or_anon or explicit_user: password = share_dict.get('password', False) if password == None: password = False elif password and (not owner): password = True broadcast = share_dict.get('broadcast', False) out_dict[share_id] = {'owner': share_dict['user']['upn'], 'term': share_dict['term'], 'title': share_dict['term_obj']['title'], 'read': share_dict['read'], 'write': share_dict['write'], 'viewers': share_dict['viewers'], 'password_protected': password, 'broadcast': broadcast} out_dict = out_dict message = {'terminal:shared_terminals': {'terminals': out_dict}} if user['upn'] == 'ANONYMOUS': cls._deliver(message, session=user['session']) else: cls._deliver(message, upn=user['upn']) for user in users: upn = user.get('upn', None) if not upn: continue for (share_id, share_dict) in shared_terms.items(): try: if share_dict['user'] == user: out_dict = self._shared_terminals_dict(user=user) message = {'terminal:shared_terminals': {'terminals': out_dict}} if user['upn'] == 'ANONYMOUS': cls._deliver(message, session=user['session']) else: cls._deliver(message, upn=user['upn']) break if 'AUTHENTICATED' in share_dict['read']: out_dict = self._shared_terminals_dict(user=user) message = {'terminal:shared_terminals': {'terminals': out_dict}} if user['upn'] == 'ANONYMOUS': cls._deliver(message, session=user['session']) else: cls._deliver(message, upn=user['upn']) break if upn in share_dict['read']: out_dict = self._shared_terminals_dict(user=user) message = {'terminal:shared_terminals': {'terminals': out_dict}} if user['upn'] == 'ANONYMOUS': cls._deliver(message, session=user['session']) else: cls._deliver(message, upn=user['upn']) break except AttributeError: pass
django-gateone
positive
def update_balance_by_exchange(exchange_id, cache=get_cache()): <DeepExtract> res = (STATUS.FAILURE, None) key = get_key_by_exchange(exchange_id) method_by_exchange = {EXCHANGE.BITTREX: get_balance_bittrex, EXCHANGE.KRAKEN: get_balance_kraken, EXCHANGE.POLONIEX: get_balance_poloniex, EXCHANGE.BINANCE: get_balance_binance, EXCHANGE.HUOBI: get_balance_huobi} if exchange_id in method_by_exchange: res = method_by_exchange[exchange_id](key) (_, balance) = res log_to_file(balance, 'balance.log') else: msg = 'get_balance_by_exchange - Unknown exchange_id! {idx}'.format(idx=exchange_id) print_to_console(msg, LOG_ALL_ERRORS) (status_code, balance) = res </DeepExtract> exchange_name = get_exchange_name_by_id(exchange_id) if status_code == STATUS.SUCCESS and balance is not None: cache.update_balance(exchange_name, balance) log_to_file('Update balance at cache', 'balance.log') log_to_file(balance, 'balance.log') msg = "Can't update balance for exchange_id = {exch1} {exch_name}".format(exch1=exchange_id, exch_name=exchange_name) log_to_file(msg, 'cache.log') log_to_file(msg, 'balance.log') return (status_code, balance)
def update_balance_by_exchange(exchange_id, cache=get_cache()): res = (STATUS.FAILURE, None) key = get_key_by_exchange(exchange_id) method_by_exchange = {EXCHANGE.BITTREX: get_balance_bittrex, EXCHANGE.KRAKEN: get_balance_kraken, EXCHANGE.POLONIEX: get_balance_poloniex, EXCHANGE.BINANCE: get_balance_binance, EXCHANGE.HUOBI: get_balance_huobi} if exchange_id in method_by_exchange: res = method_by_exchange[exchange_id](key) (_, balance) = res log_to_file(balance, 'balance.log') else: msg = 'get_balance_by_exchange - Unknown exchange_id! {idx}'.format(idx=exchange_id) print_to_console(msg, LOG_ALL_ERRORS) (status_code, balance) = res exchange_name = get_exchange_name_by_id(exchange_id) if status_code == STATUS.SUCCESS and balance is not None: cache.update_balance(exchange_name, balance) log_to_file('Update balance at cache', 'balance.log') log_to_file(balance, 'balance.log') msg = "Can't update balance for exchange_id = {exch1} {exch_name}".format(exch1=exchange_id, exch_name=exchange_name) log_to_file(msg, 'cache.log') log_to_file(msg, 'balance.log') return (status_code, balance)
crypto_crawler
positive
def _delete_chunks(self, evt): <DeepExtract> class DeleteChunksDialog(wx.Dialog): def __init__(self, *args, **kwds): kwds['style'] = kwds.get('style', 0) | wx.DEFAULT_DIALOG_STYLE wx.Dialog.__init__(self, *args, **kwds) self.SetTitle('Do you want to load the original chunk state?') sizer_1 = wx.BoxSizer(wx.VERTICAL) label_1 = wx.StaticText(self, wx.ID_ANY, 'Do you want to load the original chunk state?\n\nClicking "Yes" will allow you to undo this operation but the operation will take a while to process.\n\nClicking "No" will mean this operation cannot be undone.\n\nChanges will not be made to the world until you save so closing before saving will not actually delete the chunks.', style=wx.ALIGN_CENTER_HORIZONTAL) label_1.Wrap(500) sizer_1.Add(label_1, 0, wx.ALL, 5) sizer_2 = wx.StdDialogButtonSizer() sizer_1.Add(sizer_2, 0, wx.ALIGN_RIGHT | wx.ALL, 4) self.button_YES = wx.Button(self, wx.ID_YES, '') self.button_YES.SetDefault() sizer_2.AddButton(self.button_YES) self.button_NO = wx.Button(self, wx.ID_NO, '') self.button_NO.Bind(wx.EVT_BUTTON, self._on_no) sizer_2.AddButton(self.button_NO) self.button_CANCEL = wx.Button(self, wx.ID_CANCEL, '') sizer_2.AddButton(self.button_CANCEL) sizer_2.Realize() self.SetSizer(sizer_1) sizer_1.Fit(self) self.SetAffirmativeId(self.button_YES.GetId()) self.SetEscapeId(self.button_CANCEL.GetId()) self.Layout() def _on_no(self, evt): self.EndModal(wx.ID_NO) d = DeleteChunksDialog(self.canvas) response = d.ShowModal() if response == wx.ID_YES: load_original = True elif response == wx.ID_NO: load_original = False load_original = None </DeepExtract> if load_original is not None: self.canvas.run_operation(lambda : delete_chunk(self.canvas.world, self.canvas.dimension, self.canvas.selection.selection_group, load_original))
def _delete_chunks(self, evt): class DeleteChunksDialog(wx.Dialog): def __init__(self, *args, **kwds): kwds['style'] = kwds.get('style', 0) | wx.DEFAULT_DIALOG_STYLE wx.Dialog.__init__(self, *args, **kwds) self.SetTitle('Do you want to load the original chunk state?') sizer_1 = wx.BoxSizer(wx.VERTICAL) label_1 = wx.StaticText(self, wx.ID_ANY, 'Do you want to load the original chunk state?\n\nClicking "Yes" will allow you to undo this operation but the operation will take a while to process.\n\nClicking "No" will mean this operation cannot be undone.\n\nChanges will not be made to the world until you save so closing before saving will not actually delete the chunks.', style=wx.ALIGN_CENTER_HORIZONTAL) label_1.Wrap(500) sizer_1.Add(label_1, 0, wx.ALL, 5) sizer_2 = wx.StdDialogButtonSizer() sizer_1.Add(sizer_2, 0, wx.ALIGN_RIGHT | wx.ALL, 4) self.button_YES = wx.Button(self, wx.ID_YES, '') self.button_YES.SetDefault() sizer_2.AddButton(self.button_YES) self.button_NO = wx.Button(self, wx.ID_NO, '') self.button_NO.Bind(wx.EVT_BUTTON, self._on_no) sizer_2.AddButton(self.button_NO) self.button_CANCEL = wx.Button(self, wx.ID_CANCEL, '') sizer_2.AddButton(self.button_CANCEL) sizer_2.Realize() self.SetSizer(sizer_1) sizer_1.Fit(self) self.SetAffirmativeId(self.button_YES.GetId()) self.SetEscapeId(self.button_CANCEL.GetId()) self.Layout() def _on_no(self, evt): self.EndModal(wx.ID_NO) d = DeleteChunksDialog(self.canvas) response = d.ShowModal() if response == wx.ID_YES: load_original = True elif response == wx.ID_NO: load_original = False load_original = None if load_original is not None: self.canvas.run_operation(lambda : delete_chunk(self.canvas.world, self.canvas.dimension, self.canvas.selection.selection_group, load_original))
Amulet-Map-Editor
positive
def RawValue(typecode_or_type, *args): """ Returns a ctypes object allocated from shared memory """ type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) <DeepExtract> size = ctypes.sizeof(type_) wrapper = heap.BufferWrapper(size) obj = rebuild_ctype(type_, wrapper, None) </DeepExtract> ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) obj.__init__(*args) return obj
def RawValue(typecode_or_type, *args): """ Returns a ctypes object allocated from shared memory """ type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) size = ctypes.sizeof(type_) wrapper = heap.BufferWrapper(size) obj = rebuild_ctype(type_, wrapper, None) ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) obj.__init__(*args) return obj
billiard
positive
def __init__(self, triple_store, relation_vocab, entity_vocab, max_num_actions): """ :param triple_store: :param relation_vocab: :param entity_vocab: :param max_num_actions: :return: self.array_store: shape = (len(entity_vocab), max_num_actions, 2) """ self.ePAD = entity_vocab['PAD'] self.rPAD = relation_vocab['PAD'] self.triple_store = triple_store self.relation_vocab = relation_vocab self.entity_vocab = entity_vocab self.store = defaultdict(list) self.array_store = np.ones((len(entity_vocab), max_num_actions, 2), dtype=np.dtype('int32')) self.array_store[:, :, 0] *= self.ePAD self.array_store[:, :, 1] *= self.rPAD self.masked_array_store = None self.rev_relation_vocab = dict([(v, k) for (k, v) in relation_vocab.items()]) self.rev_entity_vocab = dict([(v, k) for (k, v) in entity_vocab.items()]) <DeepExtract> with open(self.triple_store) as triple_file_raw: triple_file = csv.reader(triple_file_raw, delimiter='\t') for line in triple_file: e1 = self.entity_vocab[line[0]] r = self.relation_vocab[line[1]] e2 = self.entity_vocab[line[2]] self.store[e1].append((r, e2)) for e1 in self.store: num_actions = 1 self.array_store[e1, 0, 1] = self.relation_vocab['NO_OP'] self.array_store[e1, 0, 0] = e1 for (r, e2) in self.store[e1]: if num_actions == self.array_store.shape[1]: break self.array_store[e1, num_actions, 0] = e2 self.array_store[e1, num_actions, 1] = r num_actions += 1 del self.store self.store = None </DeepExtract> print('KG constructed')
def __init__(self, triple_store, relation_vocab, entity_vocab, max_num_actions): """ :param triple_store: :param relation_vocab: :param entity_vocab: :param max_num_actions: :return: self.array_store: shape = (len(entity_vocab), max_num_actions, 2) """ self.ePAD = entity_vocab['PAD'] self.rPAD = relation_vocab['PAD'] self.triple_store = triple_store self.relation_vocab = relation_vocab self.entity_vocab = entity_vocab self.store = defaultdict(list) self.array_store = np.ones((len(entity_vocab), max_num_actions, 2), dtype=np.dtype('int32')) self.array_store[:, :, 0] *= self.ePAD self.array_store[:, :, 1] *= self.rPAD self.masked_array_store = None self.rev_relation_vocab = dict([(v, k) for (k, v) in relation_vocab.items()]) self.rev_entity_vocab = dict([(v, k) for (k, v) in entity_vocab.items()]) with open(self.triple_store) as triple_file_raw: triple_file = csv.reader(triple_file_raw, delimiter='\t') for line in triple_file: e1 = self.entity_vocab[line[0]] r = self.relation_vocab[line[1]] e2 = self.entity_vocab[line[2]] self.store[e1].append((r, e2)) for e1 in self.store: num_actions = 1 self.array_store[e1, 0, 1] = self.relation_vocab['NO_OP'] self.array_store[e1, 0, 0] = e1 for (r, e2) in self.store[e1]: if num_actions == self.array_store.shape[1]: break self.array_store[e1, num_actions, 0] = e2 self.array_store[e1, num_actions, 1] = r num_actions += 1 del self.store self.store = None print('KG constructed')
CPL
positive
def test_adb_binary(ADB, mocker): output = mock.Mock(stdout=b'lala', returncode=0) mock_run = mocker.patch('andriller.adb_conn.subprocess.run', return_value=output) <DeepExtract> 'hello'.patch('andriller.adb_conn.ADBConn.kill') 'hello'.patch('andriller.adb_conn.ADBConn._opt_use_capture', return_value=True) with mock.patch('andriller.adb_conn.ADBConn._get_adb_bin', return_value=fake_adb.name): with mock.patch('andriller.adb_conn.ADBConn._adb_has_exec', return_value=True): adb = adb_conn.ADBConn() adb_cmd = adb.adb.__func__ setattr(adb, 'adb', lambda *args, **kwargs: adb_cmd(adb, *args, **kwargs)) res = adb </DeepExtract> assert res == b'lala' mock_run.assert_called_with([fake_adb.name, 'hello'], capture_output=True, shell=False, startupinfo=None)
def test_adb_binary(ADB, mocker): output = mock.Mock(stdout=b'lala', returncode=0) mock_run = mocker.patch('andriller.adb_conn.subprocess.run', return_value=output) 'hello'.patch('andriller.adb_conn.ADBConn.kill') 'hello'.patch('andriller.adb_conn.ADBConn._opt_use_capture', return_value=True) with mock.patch('andriller.adb_conn.ADBConn._get_adb_bin', return_value=fake_adb.name): with mock.patch('andriller.adb_conn.ADBConn._adb_has_exec', return_value=True): adb = adb_conn.ADBConn() adb_cmd = adb.adb.__func__ setattr(adb, 'adb', lambda *args, **kwargs: adb_cmd(adb, *args, **kwargs)) res = adb assert res == b'lala' mock_run.assert_called_with([fake_adb.name, 'hello'], capture_output=True, shell=False, startupinfo=None)
andriller
positive
def start_join(self, ratio=0.8, wait=True): """Start Athena queries for the joining Args: ratio (float): Split ratio for training and evaluation data set wait (bool): Whether the call should wait until the joining completes. """ logger.info(f'Splitting data into train/evaluation set with ratio of {ratio}') (obs_start_time, obs_end_time) = self.join_job_record.get_obs_start_end_time() <DeepExtract> if obs_start_time is not None: start_time_str = obs_start_time.strftime('%Y-%m-%d-%H') if obs_end_time is not None: end_time_str = obs_end_time.strftime('%Y-%m-%d-%H') if obs_start_time is None or obs_end_time is None: query_string_prefix = f'\n WITH joined_table AS\n (SELECT {self.obs_table_non_partitioned}.event_id AS event_id,\n {self.obs_table_non_partitioned}.action AS action,\n {self.obs_table_non_partitioned}.action_prob AS action_prob,\n {self.obs_table_non_partitioned}.model_id AS model_id,\n {self.obs_table_non_partitioned}.observation AS observation,\n {self.obs_table_non_partitioned}.sample_prob AS sample_prob,\n {self.rewards_table}.reward AS reward\n FROM {self.obs_table_non_partitioned}\n JOIN {self.rewards_table}\n ON {self.rewards_table}.event_id={self.obs_table_non_partitioned}.event_id)' else: query_string_prefix = f"\n WITH joined_table AS\n ( WITH obs_table AS\n (SELECT *\n FROM {self.obs_table_partitioned}\n WHERE dt<='{end_time_str}' AND dt>='{start_time_str}'\n )\n SELECT obs_table.event_id AS event_id,\n obs_table.action AS action,\n obs_table.action_prob AS action_prob,\n obs_table.model_id AS model_id,\n obs_table.observation AS observation,\n obs_table.sample_prob AS sample_prob,\n {self.rewards_table}.reward AS reward\n FROM obs_table\n JOIN {self.rewards_table}\n ON {self.rewards_table}.event_id=obs_table.event_id\n )" if True: query_sample_string = f'SELECT * FROM joined_table WHERE joined_table.sample_prob <= {ratio}' else: query_sample_string = f'SELECT * FROM joined_table WHERE joined_table.sample_prob > {ratio}' query_string = f'\n {query_string_prefix}\n {query_sample_string}' join_query_for_train_data = query_string </DeepExtract> <DeepExtract> if obs_start_time is not None: start_time_str = obs_start_time.strftime('%Y-%m-%d-%H') if obs_end_time is not None: end_time_str = obs_end_time.strftime('%Y-%m-%d-%H') if obs_start_time is None or obs_end_time is None: query_string_prefix = f'\n WITH joined_table AS\n (SELECT {self.obs_table_non_partitioned}.event_id AS event_id,\n {self.obs_table_non_partitioned}.action AS action,\n {self.obs_table_non_partitioned}.action_prob AS action_prob,\n {self.obs_table_non_partitioned}.model_id AS model_id,\n {self.obs_table_non_partitioned}.observation AS observation,\n {self.obs_table_non_partitioned}.sample_prob AS sample_prob,\n {self.rewards_table}.reward AS reward\n FROM {self.obs_table_non_partitioned}\n JOIN {self.rewards_table}\n ON {self.rewards_table}.event_id={self.obs_table_non_partitioned}.event_id)' else: query_string_prefix = f"\n WITH joined_table AS\n ( WITH obs_table AS\n (SELECT *\n FROM {self.obs_table_partitioned}\n WHERE dt<='{end_time_str}' AND dt>='{start_time_str}'\n )\n SELECT obs_table.event_id AS event_id,\n obs_table.action AS action,\n obs_table.action_prob AS action_prob,\n obs_table.model_id AS model_id,\n obs_table.observation AS observation,\n obs_table.sample_prob AS sample_prob,\n {self.rewards_table}.reward AS reward\n FROM obs_table\n JOIN {self.rewards_table}\n ON {self.rewards_table}.event_id=obs_table.event_id\n )" if False: query_sample_string = f'SELECT * FROM joined_table WHERE joined_table.sample_prob <= {ratio}' else: query_sample_string = f'SELECT * FROM joined_table WHERE joined_table.sample_prob > {ratio}' query_string = f'\n {query_string_prefix}\n {query_sample_string}' join_query_for_eval_data = query_string </DeepExtract> s3_output_path = f's3://{self.query_s3_output_bucket}/{self.experiment_id}/joined_data/{self.join_job_id}' logger.info(f'Joined data will be stored under {s3_output_path}') <DeepExtract> try: response = self.athena_client.start_query_execution(QueryString=join_query_for_train_data, ResultConfiguration={'OutputLocation': f'{s3_output_path}/train'}) query_id = response['QueryExecutionId'] except ClientError as e: error_code = e.response['Error']['Code'] message = e.response['Error']['Message'] raise RuntimeError('Failed to submit athena query with error {}: {}'.format(error_code, message)) join_query_id_for_train = query_id </DeepExtract> <DeepExtract> try: response = self.athena_client.start_query_execution(QueryString=join_query_for_eval_data, ResultConfiguration={'OutputLocation': f'{s3_output_path}/eval'}) query_id = response['QueryExecutionId'] except ClientError as e: error_code = e.response['Error']['Code'] message = e.response['Error']['Message'] raise RuntimeError('Failed to submit athena query with error {}: {}'.format(error_code, message)) join_query_id_for_eval = query_id </DeepExtract> self.join_db_client.update_join_job_current_state(self.experiment_id, self.join_job_id, 'PENDING') self.join_db_client.update_join_job_output_joined_train_data_s3_path(self.experiment_id, self.join_job_id, f'{s3_output_path}/train') self.join_db_client.update_join_job_output_joined_eval_data_s3_path(self.experiment_id, self.join_job_id, f'{s3_output_path}/eval') self.join_db_client.update_join_job_join_query_ids(self.experiment_id, self.join_job_id, [join_query_id_for_train, join_query_id_for_eval]) if wait: <DeepExtract> status = 'QUEUED' while status == 'RUNNING' or status == 'QUEUED': try: response = self.athena_client.get_query_execution(QueryExecutionId=join_query_id_for_train) status = response['QueryExecution']['Status']['State'] logger.debug(f'Waiting query to finish...') time.sleep(5) except ClientError as e: error_code = e.response['Error']['Code'] message = e.response['Error']['Message'] raise RuntimeError('Failed to retrieve athena query status with error {}: {}'.format(error_code, message)) if status == 'FAILED': raise RuntimeError(f"Query failed with reason: {response['QueryExecution']['Status']['StateChangeReason']}") elif status == 'CANCELLED': logger.warning('Query was cancelled...') elif status == 'SUCCEEDED': logger.debug('Query finished successfully') </DeepExtract> <DeepExtract> status = 'QUEUED' while status == 'RUNNING' or status == 'QUEUED': try: response = self.athena_client.get_query_execution(QueryExecutionId=join_query_id_for_eval) status = response['QueryExecution']['Status']['State'] logger.debug(f'Waiting query to finish...') time.sleep(5) except ClientError as e: error_code = e.response['Error']['Code'] message = e.response['Error']['Message'] raise RuntimeError('Failed to retrieve athena query status with error {}: {}'.format(error_code, message)) if status == 'FAILED': raise RuntimeError(f"Query failed with reason: {response['QueryExecution']['Status']['StateChangeReason']}") elif status == 'CANCELLED': logger.warning('Query was cancelled...') elif status == 'SUCCEEDED': logger.debug('Query finished successfully') </DeepExtract>
def start_join(self, ratio=0.8, wait=True): """Start Athena queries for the joining Args: ratio (float): Split ratio for training and evaluation data set wait (bool): Whether the call should wait until the joining completes. """ logger.info(f'Splitting data into train/evaluation set with ratio of {ratio}') (obs_start_time, obs_end_time) = self.join_job_record.get_obs_start_end_time() if obs_start_time is not None: start_time_str = obs_start_time.strftime('%Y-%m-%d-%H') if obs_end_time is not None: end_time_str = obs_end_time.strftime('%Y-%m-%d-%H') if obs_start_time is None or obs_end_time is None: query_string_prefix = f'\n WITH joined_table AS\n (SELECT {self.obs_table_non_partitioned}.event_id AS event_id,\n {self.obs_table_non_partitioned}.action AS action,\n {self.obs_table_non_partitioned}.action_prob AS action_prob,\n {self.obs_table_non_partitioned}.model_id AS model_id,\n {self.obs_table_non_partitioned}.observation AS observation,\n {self.obs_table_non_partitioned}.sample_prob AS sample_prob,\n {self.rewards_table}.reward AS reward\n FROM {self.obs_table_non_partitioned}\n JOIN {self.rewards_table}\n ON {self.rewards_table}.event_id={self.obs_table_non_partitioned}.event_id)' else: query_string_prefix = f"\n WITH joined_table AS\n ( WITH obs_table AS\n (SELECT *\n FROM {self.obs_table_partitioned}\n WHERE dt<='{end_time_str}' AND dt>='{start_time_str}'\n )\n SELECT obs_table.event_id AS event_id,\n obs_table.action AS action,\n obs_table.action_prob AS action_prob,\n obs_table.model_id AS model_id,\n obs_table.observation AS observation,\n obs_table.sample_prob AS sample_prob,\n {self.rewards_table}.reward AS reward\n FROM obs_table\n JOIN {self.rewards_table}\n ON {self.rewards_table}.event_id=obs_table.event_id\n )" if True: query_sample_string = f'SELECT * FROM joined_table WHERE joined_table.sample_prob <= {ratio}' else: query_sample_string = f'SELECT * FROM joined_table WHERE joined_table.sample_prob > {ratio}' query_string = f'\n {query_string_prefix}\n {query_sample_string}' join_query_for_train_data = query_string if obs_start_time is not None: start_time_str = obs_start_time.strftime('%Y-%m-%d-%H') if obs_end_time is not None: end_time_str = obs_end_time.strftime('%Y-%m-%d-%H') if obs_start_time is None or obs_end_time is None: query_string_prefix = f'\n WITH joined_table AS\n (SELECT {self.obs_table_non_partitioned}.event_id AS event_id,\n {self.obs_table_non_partitioned}.action AS action,\n {self.obs_table_non_partitioned}.action_prob AS action_prob,\n {self.obs_table_non_partitioned}.model_id AS model_id,\n {self.obs_table_non_partitioned}.observation AS observation,\n {self.obs_table_non_partitioned}.sample_prob AS sample_prob,\n {self.rewards_table}.reward AS reward\n FROM {self.obs_table_non_partitioned}\n JOIN {self.rewards_table}\n ON {self.rewards_table}.event_id={self.obs_table_non_partitioned}.event_id)' else: query_string_prefix = f"\n WITH joined_table AS\n ( WITH obs_table AS\n (SELECT *\n FROM {self.obs_table_partitioned}\n WHERE dt<='{end_time_str}' AND dt>='{start_time_str}'\n )\n SELECT obs_table.event_id AS event_id,\n obs_table.action AS action,\n obs_table.action_prob AS action_prob,\n obs_table.model_id AS model_id,\n obs_table.observation AS observation,\n obs_table.sample_prob AS sample_prob,\n {self.rewards_table}.reward AS reward\n FROM obs_table\n JOIN {self.rewards_table}\n ON {self.rewards_table}.event_id=obs_table.event_id\n )" if False: query_sample_string = f'SELECT * FROM joined_table WHERE joined_table.sample_prob <= {ratio}' else: query_sample_string = f'SELECT * FROM joined_table WHERE joined_table.sample_prob > {ratio}' query_string = f'\n {query_string_prefix}\n {query_sample_string}' join_query_for_eval_data = query_string s3_output_path = f's3://{self.query_s3_output_bucket}/{self.experiment_id}/joined_data/{self.join_job_id}' logger.info(f'Joined data will be stored under {s3_output_path}') try: response = self.athena_client.start_query_execution(QueryString=join_query_for_train_data, ResultConfiguration={'OutputLocation': f'{s3_output_path}/train'}) query_id = response['QueryExecutionId'] except ClientError as e: error_code = e.response['Error']['Code'] message = e.response['Error']['Message'] raise RuntimeError('Failed to submit athena query with error {}: {}'.format(error_code, message)) join_query_id_for_train = query_id try: response = self.athena_client.start_query_execution(QueryString=join_query_for_eval_data, ResultConfiguration={'OutputLocation': f'{s3_output_path}/eval'}) query_id = response['QueryExecutionId'] except ClientError as e: error_code = e.response['Error']['Code'] message = e.response['Error']['Message'] raise RuntimeError('Failed to submit athena query with error {}: {}'.format(error_code, message)) join_query_id_for_eval = query_id self.join_db_client.update_join_job_current_state(self.experiment_id, self.join_job_id, 'PENDING') self.join_db_client.update_join_job_output_joined_train_data_s3_path(self.experiment_id, self.join_job_id, f'{s3_output_path}/train') self.join_db_client.update_join_job_output_joined_eval_data_s3_path(self.experiment_id, self.join_job_id, f'{s3_output_path}/eval') self.join_db_client.update_join_job_join_query_ids(self.experiment_id, self.join_job_id, [join_query_id_for_train, join_query_id_for_eval]) if wait: status = 'QUEUED' while status == 'RUNNING' or status == 'QUEUED': try: response = self.athena_client.get_query_execution(QueryExecutionId=join_query_id_for_train) status = response['QueryExecution']['Status']['State'] logger.debug(f'Waiting query to finish...') time.sleep(5) except ClientError as e: error_code = e.response['Error']['Code'] message = e.response['Error']['Message'] raise RuntimeError('Failed to retrieve athena query status with error {}: {}'.format(error_code, message)) if status == 'FAILED': raise RuntimeError(f"Query failed with reason: {response['QueryExecution']['Status']['StateChangeReason']}") elif status == 'CANCELLED': logger.warning('Query was cancelled...') elif status == 'SUCCEEDED': logger.debug('Query finished successfully') status = 'QUEUED' while status == 'RUNNING' or status == 'QUEUED': try: response = self.athena_client.get_query_execution(QueryExecutionId=join_query_id_for_eval) status = response['QueryExecution']['Status']['State'] logger.debug(f'Waiting query to finish...') time.sleep(5) except ClientError as e: error_code = e.response['Error']['Code'] message = e.response['Error']['Message'] raise RuntimeError('Failed to retrieve athena query status with error {}: {}'.format(error_code, message)) if status == 'FAILED': raise RuntimeError(f"Query failed with reason: {response['QueryExecution']['Status']['StateChangeReason']}") elif status == 'CANCELLED': logger.warning('Query was cancelled...') elif status == 'SUCCEEDED': logger.debug('Query finished successfully') </DeepExtract>
deepracer-local
positive
def sleep(self): """ Sleep between retry attempts using an exponential backoff. By default, the backoff factor is 0 and this method will return immediately. """ <DeepExtract> if self._observed_errors <= 1: backoff = 0 backoff_value = self.backoff_factor * 2 ** (self._observed_errors - 1) backoff = min(self.BACKOFF_MAX, backoff_value) </DeepExtract> if backoff <= 0: return time.sleep(backoff)
def sleep(self): """ Sleep between retry attempts using an exponential backoff. By default, the backoff factor is 0 and this method will return immediately. """ if self._observed_errors <= 1: backoff = 0 backoff_value = self.backoff_factor * 2 ** (self._observed_errors - 1) backoff = min(self.BACKOFF_MAX, backoff_value) if backoff <= 0: return time.sleep(backoff)
crunchy-xml-decoder
positive
def read_expr(src): token = src.remove_front() if token is None: raise SyntaxError('Incomplete expression') elif is_literal(token): return read_call_expr(src, Literal(token)) elif is_name(token): return read_call_expr(src, Name(token)) elif token == 'lambda': <DeepExtract> if src.current() in (':', ')'): params = [] else: s = [read_param(src)] while src.current() == ',': src.remove_front() s.append(read_param(src)) params = s </DeepExtract> src.expect(':') <DeepExtract> token = src.remove_front() if token is None: raise SyntaxError('Incomplete expression') elif is_literal(token): body = read_call_expr(src, Literal(token)) elif is_name(token): body = read_call_expr(src, Name(token)) elif token == 'lambda': params = read_comma_separated(src, read_param) src.expect(':') body = read_expr(src) body = LambdaExpr(params, body) elif token == '(': inner_expr = read_expr(src) src.expect(')') body = read_call_expr(src, inner_expr) else: raise SyntaxError("'{}' is not the start of an expression".format(token)) </DeepExtract> return LambdaExpr(params, body) elif token == '(': <DeepExtract> token = src.remove_front() if token is None: raise SyntaxError('Incomplete expression') elif is_literal(token): inner_expr = read_call_expr(src, Literal(token)) elif is_name(token): inner_expr = read_call_expr(src, Name(token)) elif token == 'lambda': params = read_comma_separated(src, read_param) src.expect(':') body = read_expr(src) inner_expr = LambdaExpr(params, body) elif token == '(': inner_expr = read_expr(src) src.expect(')') inner_expr = read_call_expr(src, inner_expr) else: raise SyntaxError("'{}' is not the start of an expression".format(token)) </DeepExtract> src.expect(')') return read_call_expr(src, inner_expr) else: raise SyntaxError("'{}' is not the start of an expression".format(token))
def read_expr(src): token = src.remove_front() if token is None: raise SyntaxError('Incomplete expression') elif is_literal(token): return read_call_expr(src, Literal(token)) elif is_name(token): return read_call_expr(src, Name(token)) elif token == 'lambda': if src.current() in (':', ')'): params = [] else: s = [read_param(src)] while src.current() == ',': src.remove_front() s.append(read_param(src)) params = s src.expect(':') token = src.remove_front() if token is None: raise SyntaxError('Incomplete expression') elif is_literal(token): body = read_call_expr(src, Literal(token)) elif is_name(token): body = read_call_expr(src, Name(token)) elif token == 'lambda': params = read_comma_separated(src, read_param) src.expect(':') body = read_expr(src) body = LambdaExpr(params, body) elif token == '(': inner_expr = read_expr(src) src.expect(')') body = read_call_expr(src, inner_expr) else: raise SyntaxError("'{}' is not the start of an expression".format(token)) return LambdaExpr(params, body) elif token == '(': token = src.remove_front() if token is None: raise SyntaxError('Incomplete expression') elif is_literal(token): inner_expr = read_call_expr(src, Literal(token)) elif is_name(token): inner_expr = read_call_expr(src, Name(token)) elif token == 'lambda': params = read_comma_separated(src, read_param) src.expect(':') body = read_expr(src) inner_expr = LambdaExpr(params, body) elif token == '(': inner_expr = read_expr(src) src.expect(')') inner_expr = read_call_expr(src, inner_expr) else: raise SyntaxError("'{}' is not the start of an expression".format(token)) src.expect(')') return read_call_expr(src, inner_expr) else: raise SyntaxError("'{}' is not the start of an expression".format(token))
cs61a-2018-spring
positive
@property def summary(self): """ A summary for the estimated causal effect after calling :meth:`fit`. """ col_names = ['coef', 'std err', 't', 'P>|t|'] if np.isnan(self.coef).all(): df_summary = pd.DataFrame(columns=col_names) else: summary_stats = np.transpose(np.vstack([self.coef, self.se, self.t_stat, self.pval])) df_summary = pd.DataFrame(summary_stats, columns=col_names, index=self.quantiles) <DeepExtract> if not isinstance(joint, bool): raise TypeError(f'joint must be True or False. Got {str(joint)}.') if not isinstance(level, float): raise TypeError(f'The confidence level must be of float type. Object of type {str(type(level))} was passed.') if (level <= 0) | (level >= 1): raise ValueError(f'The confidence level must be in (0,1). {str(level)} was passed.') a = 1 - level ab = np.array([a / 2, 1.0 - a / 2]) if joint: if np.isnan(self.boot_coef).all(): raise ValueError('Apply fit() & bootstrap() before confint(joint=True).') sim = np.amax(np.abs(self.boot_t_stat), 0) hatc = np.quantile(sim, 1 - a) ci = np.vstack((self.coef - self.se * hatc, self.coef + self.se * hatc)).T else: if np.isnan(self.coef).all(): raise ValueError('Apply fit() before confint().') fac = norm.ppf(ab) ci = np.vstack((self.coef + self.se * fac[0], self.coef + self.se * fac[1])).T df_ci = pd.DataFrame(ci, columns=['{:.1f} %'.format(i * 100) for i in ab], index=self._quantiles) ci = df_ci </DeepExtract> df_summary = df_summary.join(ci) return df_summary
@property def summary(self): """ A summary for the estimated causal effect after calling :meth:`fit`. """ col_names = ['coef', 'std err', 't', 'P>|t|'] if np.isnan(self.coef).all(): df_summary = pd.DataFrame(columns=col_names) else: summary_stats = np.transpose(np.vstack([self.coef, self.se, self.t_stat, self.pval])) df_summary = pd.DataFrame(summary_stats, columns=col_names, index=self.quantiles) if not isinstance(joint, bool): raise TypeError(f'joint must be True or False. Got {str(joint)}.') if not isinstance(level, float): raise TypeError(f'The confidence level must be of float type. Object of type {str(type(level))} was passed.') if (level <= 0) | (level >= 1): raise ValueError(f'The confidence level must be in (0,1). {str(level)} was passed.') a = 1 - level ab = np.array([a / 2, 1.0 - a / 2]) if joint: if np.isnan(self.boot_coef).all(): raise ValueError('Apply fit() & bootstrap() before confint(joint=True).') sim = np.amax(np.abs(self.boot_t_stat), 0) hatc = np.quantile(sim, 1 - a) ci = np.vstack((self.coef - self.se * hatc, self.coef + self.se * hatc)).T else: if np.isnan(self.coef).all(): raise ValueError('Apply fit() before confint().') fac = norm.ppf(ab) ci = np.vstack((self.coef + self.se * fac[0], self.coef + self.se * fac[1])).T df_ci = pd.DataFrame(ci, columns=['{:.1f} %'.format(i * 100) for i in ab], index=self._quantiles) ci = df_ci df_summary = df_summary.join(ci) return df_summary
doubleml-for-py
positive
def __xor__(self, other): if not isinstance(other, Set): if not isinstance(other, Iterable): return NotImplemented <DeepExtract> other = other(it) </DeepExtract> return self - other | other - self
def __xor__(self, other): if not isinstance(other, Set): if not isinstance(other, Iterable): return NotImplemented other = other(it) return self - other | other - self
BruteSploit
positive
def wrapper(misc, pseudotime, n_TFs=20): <DeepExtract> self.del_attrs() gene_lists = self.hdf5_info['gene_list'] p_list = [] ps_sums = [] ps_sum_randoms = [] if verbose: loop = tqdm(gene_lists) else: loop = gene_lists for gene in loop: self.load_hdf5(gene=gene, misc=misc, specify_attributes=['inner_product_df']) if 'score_randomized' not in self.inner_product_df.columns: raise ValueError('please update inner_product_df first') (p, ps_sum, ps_sum_random) = self.get_positive_PS_p_value(pseudotime=pseudotime, return_ps_sum=True, plot=False) p_list.append(p) ps_sum_randoms.append(ps_sum_random) ps_sums.append(ps_sum) self.del_attrs() p_corrected = np.clip(np.array(p_list) * len(gene_lists), 0, 1) result = pd.DataFrame({'gene': gene_lists, 'ps_sum': ps_sums, 'ps_sum_random': ps_sum_randoms, 'p': p_list, 'p_adj': p_corrected}) result = result.sort_values('ps_sum', ascending=False).reset_index(drop=True) df = result </DeepExtract> print(f'Top {n_TFs} in {misc}') display(HTML(df.iloc[:min(n_TFs, df.shape[0])].to_html()))
def wrapper(misc, pseudotime, n_TFs=20): self.del_attrs() gene_lists = self.hdf5_info['gene_list'] p_list = [] ps_sums = [] ps_sum_randoms = [] if verbose: loop = tqdm(gene_lists) else: loop = gene_lists for gene in loop: self.load_hdf5(gene=gene, misc=misc, specify_attributes=['inner_product_df']) if 'score_randomized' not in self.inner_product_df.columns: raise ValueError('please update inner_product_df first') (p, ps_sum, ps_sum_random) = self.get_positive_PS_p_value(pseudotime=pseudotime, return_ps_sum=True, plot=False) p_list.append(p) ps_sum_randoms.append(ps_sum_random) ps_sums.append(ps_sum) self.del_attrs() p_corrected = np.clip(np.array(p_list) * len(gene_lists), 0, 1) result = pd.DataFrame({'gene': gene_lists, 'ps_sum': ps_sums, 'ps_sum_random': ps_sum_randoms, 'p': p_list, 'p_adj': p_corrected}) result = result.sort_values('ps_sum', ascending=False).reset_index(drop=True) df = result print(f'Top {n_TFs} in {misc}') display(HTML(df.iloc[:min(n_TFs, df.shape[0])].to_html()))
CellOracle
positive
def create_message_stream(self, definition: MessageStreamDefinition) -> None: """ Create a message stream with a given definition. :param definition: :class:`~.data.MessageStreamDefinition` definition object. :return: Nothing is returned if the request succeeds. :raises: :exc:`~.exceptions.StreamManagerException` and subtypes based on the precise error. :raises: :exc:`asyncio.TimeoutError` if the request times out. :raises: :exc:`ConnectionError` if the client is unable to reconnect to the server. """ <DeepExtract> if self.__closed: raise StreamManagerException('Client is closed. Create a new client first.') </DeepExtract> return UtilInternal.sync(self._create_message_stream(definition), loop=self.__loop)
def create_message_stream(self, definition: MessageStreamDefinition) -> None: """ Create a message stream with a given definition. :param definition: :class:`~.data.MessageStreamDefinition` definition object. :return: Nothing is returned if the request succeeds. :raises: :exc:`~.exceptions.StreamManagerException` and subtypes based on the precise error. :raises: :exc:`asyncio.TimeoutError` if the request times out. :raises: :exc:`ConnectionError` if the client is unable to reconnect to the server. """ if self.__closed: raise StreamManagerException('Client is closed. Create a new client first.') return UtilInternal.sync(self._create_message_stream(definition), loop=self.__loop)
aws-greengrass-core-sdk-python
positive
def get_output(self, train=False): <DeepExtract> if hasattr(self, 'previous'): if self.layer_cache is not None and self.cache_enabled: previous_layer_id = '%s_%s' % (id(self.previous), train) if previous_layer_id in self.layer_cache: X = self.layer_cache[previous_layer_id] previous_output = self.previous.get_output(train=train) if self.layer_cache is not None and self.cache_enabled: previous_layer_id = '%s_%s' % (id(self.previous), train) self.layer_cache[previous_layer_id] = previous_output X = previous_output elif hasattr(self, 'input'): X = self.input else: self.input = K.placeholder(shape=self.input_shape) X = self.input </DeepExtract> if self.p > 0.0: if train: X = K.dropout(X, level=self.p) return X
def get_output(self, train=False): if hasattr(self, 'previous'): if self.layer_cache is not None and self.cache_enabled: previous_layer_id = '%s_%s' % (id(self.previous), train) if previous_layer_id in self.layer_cache: X = self.layer_cache[previous_layer_id] previous_output = self.previous.get_output(train=train) if self.layer_cache is not None and self.cache_enabled: previous_layer_id = '%s_%s' % (id(self.previous), train) self.layer_cache[previous_layer_id] = previous_output X = previous_output elif hasattr(self, 'input'): X = self.input else: self.input = K.placeholder(shape=self.input_shape) X = self.input if self.p > 0.0: if train: X = K.dropout(X, level=self.p) return X
encoder_decoder
positive
def _validate_all_field_names(name, val): try: for x in val: <DeepExtract> try: assert x in self._field_names or x is None except AssertionError: raise Exception('Invalid field name: %s!' % x) </DeepExtract> except AssertionError: raise Exception('fields must be a sequence of field names!')
def _validate_all_field_names(name, val): try: for x in val: try: assert x in self._field_names or x is None except AssertionError: raise Exception('Invalid field name: %s!' % x) except AssertionError: raise Exception('fields must be a sequence of field names!')
C--Compiler
positive