before
stringlengths
0
955k
after
stringlengths
0
877k
repo
stringlengths
1
74
type
stringclasses
1 value
def apply_odoo_requirements_file(self): """Try and read Odoo's 'requirements.txt' and apply it. This file appeared in the course of Odoo 8 lifetime. If not available, a warning is issued, that's all. Entries from the requirements file are applied if there is not already an entry in the versions section for the same project. A more interesting behaviour would be to apply then if they don't contradict an existing entry in the versions section, but that's far more complicated. """ req_fname = 'requirements.txt' req_path = join(self.odoo_dir, req_fname) if not os.path.exists(req_path): logger.warn('%r not found in this version of Odoo, although the configuration said to use it. Proceeding anyway.', req_fname) return for k in list(sys.modules.keys()): if k.split('.', 1)[0] == 'pip': del sys.modules[k] versions = Installer._versions <DeepExtract> devdir_contents = (f.rsplit('.', 1) for f in os.listdir(self.b_options['develop-eggs-directory'])) develops = [s[0] for s in devdir_contents if s[1] == 'egg-link'] </DeepExtract> if pip_version() < (8, 1, 2): <DeepExtract> from pip.req import parse_requirements if pip_version() < (1, 5): parsed = parse_requirements(req_path) else: fake_session = object() parsed = parse_requirements(req_path, session=fake_session) for inst_req in parsed: req = inst_req.req logger.debug("Considering requirement from Odoo's file %s", req) project_name = req.project_name if project_name not in self.requirements: self.requirements.append(str(project_name)) if inst_req.markers: logger.warn('Requirement %s has a marker %s but the evaluation of markers is not supported in this old version of pip. Please upgrade to pip 8.2 or higher. ', project_name, inst_req.markers) if project_name in versions: logger.debug("Requirement from Odoo's file %s superseded by buildout versions configuration as %r", req, versions[project_name]) continue if project_name in develops: logger.debug("Requirement from Odoo's file %s superseded by a direct develop directive", req) continue if not req.specs: continue supported = True if len(req.specs) > 1: supported = False spec = req.specs[0] if spec[0] != '==' or '*' in spec[1]: supported = False if not supported: raise UserError("Version requirement %s from Odoo's requirement file is too complicated to be taken automatically into account. Please override it in your [%s] configuration section. Future support of this format is pending the release of buildout 3.0.0 which relies on pip rather than setuptools.easy_install." % (req, self.b_options.get('versions', 'versions'))) logger.debug("Applying requirement %s from Odoo's file", req) versions[project_name] = spec[1] </DeepExtract> else: <DeepExtract> fake_session = object() if pip_version() < (10, 0, 0): from pip.req import parse_requirements else: from pip._internal.req import parse_requirements for inst_req in parse_requirements(req_path, session=fake_session): if pip_version() < (20, 0, 0): req = inst_req.req project_name = req.name.lower() marker = inst_req.markers else: req = pkg_resources.Requirement.parse(inst_req.requirement) project_name = req.project_name.lower() marker = req.marker if marker and (not marker.evaluate()): logger.debug('Skipping requirement %s with marker %s', project_name, marker) continue specs = req.specifier logger.debug("Considering requirement from Odoo's file %s", req) if project_name not in self.requirements: self.requirements.append(str(project_name)) if project_name in versions: logger.debug("Requirement from Odoo's file %s superseded by buildout versions configuration as %r", req, versions[project_name]) continue if project_name in develops: logger.debug("Requirement from Odoo's file %s superseded by a direct develop directive", req) continue specs = req.specifier if not specs: continue supported = True if len(specs) > 1: supported = False spec = next(specs.__iter__()) if spec.operator != '==' or '*' in spec.version: supported = False if not supported: raise UserError("Version requirement %s from Odoo's requirement file is too complicated to be taken automatically into account. Please override it in your [%s] configuration section. Future support of this format is pending the release of buildout 3.0.0 which relies on pip rather than setuptools.easy_install." % (req, self.b_options.get('versions', 'versions'))) logger.debug("Applying requirement %s from Odoo's file", req) versions[project_name] = spec.version </DeepExtract> <DeepExtract> if reqs is None: reqs = self.requirements serial = '\n'.join(reqs) if 'eggs' not in self.options: self.options['eggs'] = serial else: self.options['eggs'] += '\n' + serial </DeepExtract>
def apply_odoo_requirements_file(self): """Try and read Odoo's 'requirements.txt' and apply it. This file appeared in the course of Odoo 8 lifetime. If not available, a warning is issued, that's all. Entries from the requirements file are applied if there is not already an entry in the versions section for the same project. A more interesting behaviour would be to apply then if they don't contradict an existing entry in the versions section, but that's far more complicated. """ req_fname = 'requirements.txt' req_path = join(self.odoo_dir, req_fname) if not os.path.exists(req_path): logger.warn('%r not found in this version of Odoo, although the configuration said to use it. Proceeding anyway.', req_fname) return for k in list(sys.modules.keys()): if k.split('.', 1)[0] == 'pip': del sys.modules[k] versions = Installer._versions devdir_contents = (f.rsplit('.', 1) for f in os.listdir(self.b_options['develop-eggs-directory'])) develops = [s[0] for s in devdir_contents if s[1] == 'egg-link'] if pip_version() < (8, 1, 2): from pip.req import parse_requirements if pip_version() < (1, 5): parsed = parse_requirements(req_path) else: fake_session = object() parsed = parse_requirements(req_path, session=fake_session) for inst_req in parsed: req = inst_req.req logger.debug("Considering requirement from Odoo's file %s", req) project_name = req.project_name if project_name not in self.requirements: self.requirements.append(str(project_name)) if inst_req.markers: logger.warn('Requirement %s has a marker %s but the evaluation of markers is not supported in this old version of pip. Please upgrade to pip 8.2 or higher. ', project_name, inst_req.markers) if project_name in versions: logger.debug("Requirement from Odoo's file %s superseded by buildout versions configuration as %r", req, versions[project_name]) continue if project_name in develops: logger.debug("Requirement from Odoo's file %s superseded by a direct develop directive", req) continue if not req.specs: continue supported = True if len(req.specs) > 1: supported = False spec = req.specs[0] if spec[0] != '==' or '*' in spec[1]: supported = False if not supported: raise UserError("Version requirement %s from Odoo's requirement file is too complicated to be taken automatically into account. Please override it in your [%s] configuration section. Future support of this format is pending the release of buildout 3.0.0 which relies on pip rather than setuptools.easy_install." % (req, self.b_options.get('versions', 'versions'))) logger.debug("Applying requirement %s from Odoo's file", req) versions[project_name] = spec[1] else: fake_session = object() if pip_version() < (10, 0, 0): from pip.req import parse_requirements else: from pip._internal.req import parse_requirements for inst_req in parse_requirements(req_path, session=fake_session): if pip_version() < (20, 0, 0): req = inst_req.req project_name = req.name.lower() marker = inst_req.markers else: req = pkg_resources.Requirement.parse(inst_req.requirement) project_name = req.project_name.lower() marker = req.marker if marker and (not marker.evaluate()): logger.debug('Skipping requirement %s with marker %s', project_name, marker) continue specs = req.specifier logger.debug("Considering requirement from Odoo's file %s", req) if project_name not in self.requirements: self.requirements.append(str(project_name)) if project_name in versions: logger.debug("Requirement from Odoo's file %s superseded by buildout versions configuration as %r", req, versions[project_name]) continue if project_name in develops: logger.debug("Requirement from Odoo's file %s superseded by a direct develop directive", req) continue specs = req.specifier if not specs: continue supported = True if len(specs) > 1: supported = False spec = next(specs.__iter__()) if spec.operator != '==' or '*' in spec.version: supported = False if not supported: raise UserError("Version requirement %s from Odoo's requirement file is too complicated to be taken automatically into account. Please override it in your [%s] configuration section. Future support of this format is pending the release of buildout 3.0.0 which relies on pip rather than setuptools.easy_install." % (req, self.b_options.get('versions', 'versions'))) logger.debug("Applying requirement %s from Odoo's file", req) versions[project_name] = spec.version if reqs is None: reqs = self.requirements serial = '\n'.join(reqs) if 'eggs' not in self.options: self.options['eggs'] = serial else: self.options['eggs'] += '\n' + serial </DeepExtract>
anybox.recipe.odoo
positive
def test_bank_spending_add_view_edit_out_of_fiscal_year(self): """ A `GET` to the `add_bank_entry` view with a `journal_type` of `CD` and a `entry_id` will return a 404 Error if the entry is before the current Fiscal Year. """ <DeepExtract> response = self.client.post(reverse('entries.views.add_bank_entry', kwargs={'journal_type': 'CD'}), data={'entry-account': self.bank_account.id, 'entry-date': '2013-03-12', 'entry-ach_payment': True, 'entry-payee': 'test payee', 'entry-amount': 20, 'entry-memo': 'test memo', 'transaction-TOTAL_FORMS': 20, 'transaction-INITIAL_FORMS': 0, 'transaction-MAX_NUM_FORMS': '', 'transaction-0-id': '', 'transaction-0-bankspend_entry': '', 'transaction-0-detail': 'test detail', 'transaction-0-amount': 20, 'transaction-0-account': self.expense_account.id, 'subbtn': 'Submit'}) entry = BankSpendingEntry.objects.all()[0] self.assertRedirects(response, reverse('entries.views.show_bank_entry', kwargs={'journal_type': 'CD', 'entry_id': entry.id})) self.assertEqual(BankSpendingEntry.objects.count(), 1) self.assertEqual(Account.objects.get(bank=True).balance, 20) self.assertEqual(Account.objects.get(bank=False).balance, -20) </DeepExtract> FiscalYear.objects.create(year=2015, end_month=12, period=12) entry = BankSpendingEntry.objects.all()[0] response = self.client.get(reverse('entries.views.add_bank_entry', kwargs={'journal_type': 'CD', 'entry_id': entry.id})) self.assertEqual(response.status_code, 404)
def test_bank_spending_add_view_edit_out_of_fiscal_year(self): """ A `GET` to the `add_bank_entry` view with a `journal_type` of `CD` and a `entry_id` will return a 404 Error if the entry is before the current Fiscal Year. """ response = self.client.post(reverse('entries.views.add_bank_entry', kwargs={'journal_type': 'CD'}), data={'entry-account': self.bank_account.id, 'entry-date': '2013-03-12', 'entry-ach_payment': True, 'entry-payee': 'test payee', 'entry-amount': 20, 'entry-memo': 'test memo', 'transaction-TOTAL_FORMS': 20, 'transaction-INITIAL_FORMS': 0, 'transaction-MAX_NUM_FORMS': '', 'transaction-0-id': '', 'transaction-0-bankspend_entry': '', 'transaction-0-detail': 'test detail', 'transaction-0-amount': 20, 'transaction-0-account': self.expense_account.id, 'subbtn': 'Submit'}) entry = BankSpendingEntry.objects.all()[0] self.assertRedirects(response, reverse('entries.views.show_bank_entry', kwargs={'journal_type': 'CD', 'entry_id': entry.id})) self.assertEqual(BankSpendingEntry.objects.count(), 1) self.assertEqual(Account.objects.get(bank=True).balance, 20) self.assertEqual(Account.objects.get(bank=False).balance, -20) FiscalYear.objects.create(year=2015, end_month=12, period=12) entry = BankSpendingEntry.objects.all()[0] response = self.client.get(reverse('entries.views.add_bank_entry', kwargs={'journal_type': 'CD', 'entry_id': entry.id})) self.assertEqual(response.status_code, 404)
AcornAccounting
positive
def result_generator(): watermark_value = None if self._watermark_column: watermark_value = self._redis_state_service.get(self._name) or self._watermark_init if self._object_type: <DeepExtract> description = self._client.describe(self._object_type) fields = [f['name'] for f in description['fields']] field_projection = ', '.join(fields) query = 'SELECT {fields} FROM {object_type}'.format(fields=field_projection, object_type=self._object_type) if self._watermark_column: query = '{base} WHERE {watermark_column} > {{{watermark_column}}}'.format(base=query, watermark_column=self._watermark_column) self._query = query </DeepExtract> if self._watermark_column: self._query = self._query.format(**{self._watermark_column: watermark_value}) records = self._client.query(self._query) for rec in records: <DeepExtract> row = rec._flatten(rec._delete_metadata_from_record(record)) </DeepExtract> yield row if self._watermark_column: self._redis_state_service.set(self._name, row[self._watermark_column])
def result_generator(): watermark_value = None if self._watermark_column: watermark_value = self._redis_state_service.get(self._name) or self._watermark_init if self._object_type: description = self._client.describe(self._object_type) fields = [f['name'] for f in description['fields']] field_projection = ', '.join(fields) query = 'SELECT {fields} FROM {object_type}'.format(fields=field_projection, object_type=self._object_type) if self._watermark_column: query = '{base} WHERE {watermark_column} > {{{watermark_column}}}'.format(base=query, watermark_column=self._watermark_column) self._query = query if self._watermark_column: self._query = self._query.format(**{self._watermark_column: watermark_value}) records = self._client.query(self._query) for rec in records: row = rec._flatten(rec._delete_metadata_from_record(record)) yield row if self._watermark_column: self._redis_state_service.set(self._name, row[self._watermark_column])
artefactory-connectors-kit
positive
def run_sklearn_study(opt_class, opt_kwargs, model_name, dataset, scorer, n_calls, n_suggestions, data_root=None, callback=None): """Run a study for a single optimizer on a single `sklearn` model/data set combination. This routine is meant for benchmarking when tuning `sklearn` models, as opposed to the more general :func:`.run_study`. Parameters ---------- opt_class : :class:`.abstract_optimizer.AbstractOptimizer` Type of wrapper optimizer must be subclass of :class:`.abstract_optimizer.AbstractOptimizer`. opt_kwargs : kwargs `kwargs` to use when instantiating the wrapper class. model_name : str Which sklearn model we are attempting to tune, must be an element of `constants.MODEL_NAMES`. dataset : str Which data set the model is being tuned to, which must be either a) an element of `constants.DATA_LOADER_NAMES`, or b) the name of a csv file in the `data_root` folder for a custom data set. scorer : str Which metric to use when evaluating the model. This must be an element of `sklearn_funcs.SCORERS_CLF` for classification models, or `sklearn_funcs.SCORERS_REG` for regression models. n_calls : int How many iterations of minimization to run. n_suggestions : int How many parallel evaluation we run each iteration. Must be ``>= 1``. data_root : str Absolute path to folder containing custom data sets. This may be ``None`` if no custom data sets are used.`` callback : callable Optional callback taking the current best function evaluation, and the number of iterations finished. Takes array of shape `(n_obj,)`. Returns ------- function_evals : :class:`numpy:numpy.ndarray` of shape (n_calls, n_suggestions, n_obj) Value of objective for each evaluation. timing_evals : (:class:`numpy:numpy.ndarray`, :class:`numpy:numpy.ndarray`, :class:`numpy:numpy.ndarray`) Tuple of 3 timing results: ``(suggest_time, eval_time, observe_time)`` with shapes ``(n_calls,)``, ``(n_calls, n_suggestions)``, and ``(n_calls,)``. These are the time to make each suggestion, the time for each evaluation of the objective function, and the time to make an observe call. suggest_log : list(list(dict(str, object))) Log of the suggestions corresponding to the `function_evals`. """ <DeepExtract> if model_name.endswith('-surr'): model_name = chomp(model_name, '-surr') prob = SklearnSurrogate(model_name, dataset, scorer, path=data_root) else: prob = SklearnModel(model_name, dataset, scorer, data_root=data_root) function_instance = prob </DeepExtract> api_config = function_instance.get_api_config() optimizer_instance = opt_class(api_config, **opt_kwargs) assert function_instance.objective_names == OBJECTIVE_NAMES assert OBJECTIVE_NAMES[0] == cc.VISIBLE_TO_OPT n_obj = len(OBJECTIVE_NAMES) <DeepExtract> assert n_suggestions >= 1, 'batch size must be at least 1' assert n_obj >= 1, 'Must be at least one objective' space_for_validate = JointSpace(function_instance.get_api_config()) if callback is not None: callback(np.full((n_obj,), np.inf, dtype=float), 0) suggest_time = np.zeros(n_calls) observe_time = np.zeros(n_calls) eval_time = np.zeros((n_calls, n_suggestions)) function_evals = np.zeros((n_calls, n_suggestions, n_obj)) suggest_log = [None] * n_calls for ii in range(n_calls): tt = time() try: next_points = optimizer_instance.suggest(n_suggestions) except Exception as e: logger.warning('Failure in optimizer suggest. Falling back to random search.') logger.exception(e, exc_info=True) print(json.dumps({'optimizer_suggest_exception': {ITER: ii}})) api_config = function_instance.get_api_config() next_points = rs.suggest_dict([], [], api_config, n_suggestions=n_suggestions) suggest_time[ii] = time() - tt logger.info('suggestion time taken %f iter %d next_points %s' % (suggest_time[ii], ii, str(next_points))) assert len(next_points) == n_suggestions, 'invalid number of suggestions provided by the optimizer' try: space_for_validate.validate(next_points) except Exception: raise ValueError('Optimizer suggestion is out of range.') for (jj, next_point) in enumerate(next_points): tt = time() try: f_current_eval = function_instance.evaluate(next_point) except Exception as e: logger.warning('Failure in function eval. Setting to inf.') logger.exception(e, exc_info=True) f_current_eval = np.full((n_obj,), np.inf, dtype=float) eval_time[ii, jj] = time() - tt assert np.shape(f_current_eval) == (n_obj,) suggest_log[ii] = next_points function_evals[ii, jj, :] = f_current_eval logger.info('function_evaluation time %f value %f suggestion %s' % (eval_time[ii, jj], f_current_eval[0], str(next_point))) eval_list = function_evals[ii, :, 0].tolist() if callback is not None: (idx_ii, idx_jj) = argmin_2d(function_evals[:ii + 1, :, 0]) callback(function_evals[idx_ii, idx_jj, :], ii + 1) tt = time() try: optimizer_instance.observe(next_points, eval_list) except Exception as e: logger.warning('Failure in optimizer observe. Ignoring these observations.') logger.exception(e, exc_info=True) print(json.dumps({'optimizer_observe_exception': {ITER: ii}})) observe_time[ii] = time() - tt logger.info('observation time %f, current best %f at iter %d' % (observe_time[ii], np.min(function_evals[:ii + 1, :, 0]), ii)) (function_evals, timing, suggest_log) = (function_evals, (suggest_time, eval_time, observe_time), suggest_log) </DeepExtract> return (function_evals, timing, suggest_log)
def run_sklearn_study(opt_class, opt_kwargs, model_name, dataset, scorer, n_calls, n_suggestions, data_root=None, callback=None): """Run a study for a single optimizer on a single `sklearn` model/data set combination. This routine is meant for benchmarking when tuning `sklearn` models, as opposed to the more general :func:`.run_study`. Parameters ---------- opt_class : :class:`.abstract_optimizer.AbstractOptimizer` Type of wrapper optimizer must be subclass of :class:`.abstract_optimizer.AbstractOptimizer`. opt_kwargs : kwargs `kwargs` to use when instantiating the wrapper class. model_name : str Which sklearn model we are attempting to tune, must be an element of `constants.MODEL_NAMES`. dataset : str Which data set the model is being tuned to, which must be either a) an element of `constants.DATA_LOADER_NAMES`, or b) the name of a csv file in the `data_root` folder for a custom data set. scorer : str Which metric to use when evaluating the model. This must be an element of `sklearn_funcs.SCORERS_CLF` for classification models, or `sklearn_funcs.SCORERS_REG` for regression models. n_calls : int How many iterations of minimization to run. n_suggestions : int How many parallel evaluation we run each iteration. Must be ``>= 1``. data_root : str Absolute path to folder containing custom data sets. This may be ``None`` if no custom data sets are used.`` callback : callable Optional callback taking the current best function evaluation, and the number of iterations finished. Takes array of shape `(n_obj,)`. Returns ------- function_evals : :class:`numpy:numpy.ndarray` of shape (n_calls, n_suggestions, n_obj) Value of objective for each evaluation. timing_evals : (:class:`numpy:numpy.ndarray`, :class:`numpy:numpy.ndarray`, :class:`numpy:numpy.ndarray`) Tuple of 3 timing results: ``(suggest_time, eval_time, observe_time)`` with shapes ``(n_calls,)``, ``(n_calls, n_suggestions)``, and ``(n_calls,)``. These are the time to make each suggestion, the time for each evaluation of the objective function, and the time to make an observe call. suggest_log : list(list(dict(str, object))) Log of the suggestions corresponding to the `function_evals`. """ if model_name.endswith('-surr'): model_name = chomp(model_name, '-surr') prob = SklearnSurrogate(model_name, dataset, scorer, path=data_root) else: prob = SklearnModel(model_name, dataset, scorer, data_root=data_root) function_instance = prob api_config = function_instance.get_api_config() optimizer_instance = opt_class(api_config, **opt_kwargs) assert function_instance.objective_names == OBJECTIVE_NAMES assert OBJECTIVE_NAMES[0] == cc.VISIBLE_TO_OPT n_obj = len(OBJECTIVE_NAMES) assert n_suggestions >= 1, 'batch size must be at least 1' assert n_obj >= 1, 'Must be at least one objective' space_for_validate = JointSpace(function_instance.get_api_config()) if callback is not None: callback(np.full((n_obj,), np.inf, dtype=float), 0) suggest_time = np.zeros(n_calls) observe_time = np.zeros(n_calls) eval_time = np.zeros((n_calls, n_suggestions)) function_evals = np.zeros((n_calls, n_suggestions, n_obj)) suggest_log = [None] * n_calls for ii in range(n_calls): tt = time() try: next_points = optimizer_instance.suggest(n_suggestions) except Exception as e: logger.warning('Failure in optimizer suggest. Falling back to random search.') logger.exception(e, exc_info=True) print(json.dumps({'optimizer_suggest_exception': {ITER: ii}})) api_config = function_instance.get_api_config() next_points = rs.suggest_dict([], [], api_config, n_suggestions=n_suggestions) suggest_time[ii] = time() - tt logger.info('suggestion time taken %f iter %d next_points %s' % (suggest_time[ii], ii, str(next_points))) assert len(next_points) == n_suggestions, 'invalid number of suggestions provided by the optimizer' try: space_for_validate.validate(next_points) except Exception: raise ValueError('Optimizer suggestion is out of range.') for (jj, next_point) in enumerate(next_points): tt = time() try: f_current_eval = function_instance.evaluate(next_point) except Exception as e: logger.warning('Failure in function eval. Setting to inf.') logger.exception(e, exc_info=True) f_current_eval = np.full((n_obj,), np.inf, dtype=float) eval_time[ii, jj] = time() - tt assert np.shape(f_current_eval) == (n_obj,) suggest_log[ii] = next_points function_evals[ii, jj, :] = f_current_eval logger.info('function_evaluation time %f value %f suggestion %s' % (eval_time[ii, jj], f_current_eval[0], str(next_point))) eval_list = function_evals[ii, :, 0].tolist() if callback is not None: (idx_ii, idx_jj) = argmin_2d(function_evals[:ii + 1, :, 0]) callback(function_evals[idx_ii, idx_jj, :], ii + 1) tt = time() try: optimizer_instance.observe(next_points, eval_list) except Exception as e: logger.warning('Failure in optimizer observe. Ignoring these observations.') logger.exception(e, exc_info=True) print(json.dumps({'optimizer_observe_exception': {ITER: ii}})) observe_time[ii] = time() - tt logger.info('observation time %f, current best %f at iter %d' % (observe_time[ii], np.min(function_evals[:ii + 1, :, 0]), ii)) (function_evals, timing, suggest_log) = (function_evals, (suggest_time, eval_time, observe_time), suggest_log) return (function_evals, timing, suggest_log)
bayesmark
positive
def build_hparams(): """Build tf.Hparams for training Amoeba Net.""" hparams = model_lib.imagenet_hparams() hparams.add_hparam('reduction_size', FLAGS.reduction_size) (operations, hiddenstate_indices, used_hiddenstates) = model_specs.get_normal_cell(FLAGS.cell_name) hparams.add_hparam('normal_cell_operations', operations) hparams.add_hparam('normal_cell_hiddenstate_indices', hiddenstate_indices) hparams.add_hparam('normal_cell_used_hiddenstates', used_hiddenstates) (operations, hiddenstate_indices, used_hiddenstates) = model_specs.get_reduction_cell(FLAGS.cell_name) hparams.add_hparam('reduction_cell_operations', operations) hparams.add_hparam('reduction_cell_hiddenstate_indices', hiddenstate_indices) hparams.add_hparam('reduction_cell_used_hiddenstates', used_hiddenstates) hparams.add_hparam('stem_reduction_size', FLAGS.stem_reduction_size) hparams.set_hparam('data_format', 'NHWC') <DeepExtract> override_flag_names = ['aux_scaling', 'train_batch_size', 'batch_norm_decay', 'batch_norm_epsilon', 'dense_dropout_keep_prob', 'drop_connect_keep_prob', 'drop_connect_version', 'eval_batch_size', 'gradient_clipping_by_global_norm', 'lr', 'lr_decay_method', 'lr_decay_value', 'lr_num_epochs_per_decay', 'moving_average_decay', 'image_size', 'num_cells', 'num_epochs', 'num_epochs_per_eval', 'optimizer', 'enable_hostcall', 'use_aux_head', 'use_bp16', 'use_tpu', 'lr_warmup_epochs', 'weight_decay', 'num_shards', 'distributed_group_size'] for flag_name in override_flag_names: flag_value = getattr(FLAGS, flag_name, 'INVALID') if flag_value == 'INVALID': tf.logging.fatal('Unknown flag %s.' % str(flag_name)) if flag_value is not None: _set_or_add_hparam(hparams, flag_name, flag_value) </DeepExtract> return hparams
def build_hparams(): """Build tf.Hparams for training Amoeba Net.""" hparams = model_lib.imagenet_hparams() hparams.add_hparam('reduction_size', FLAGS.reduction_size) (operations, hiddenstate_indices, used_hiddenstates) = model_specs.get_normal_cell(FLAGS.cell_name) hparams.add_hparam('normal_cell_operations', operations) hparams.add_hparam('normal_cell_hiddenstate_indices', hiddenstate_indices) hparams.add_hparam('normal_cell_used_hiddenstates', used_hiddenstates) (operations, hiddenstate_indices, used_hiddenstates) = model_specs.get_reduction_cell(FLAGS.cell_name) hparams.add_hparam('reduction_cell_operations', operations) hparams.add_hparam('reduction_cell_hiddenstate_indices', hiddenstate_indices) hparams.add_hparam('reduction_cell_used_hiddenstates', used_hiddenstates) hparams.add_hparam('stem_reduction_size', FLAGS.stem_reduction_size) hparams.set_hparam('data_format', 'NHWC') override_flag_names = ['aux_scaling', 'train_batch_size', 'batch_norm_decay', 'batch_norm_epsilon', 'dense_dropout_keep_prob', 'drop_connect_keep_prob', 'drop_connect_version', 'eval_batch_size', 'gradient_clipping_by_global_norm', 'lr', 'lr_decay_method', 'lr_decay_value', 'lr_num_epochs_per_decay', 'moving_average_decay', 'image_size', 'num_cells', 'num_epochs', 'num_epochs_per_eval', 'optimizer', 'enable_hostcall', 'use_aux_head', 'use_bp16', 'use_tpu', 'lr_warmup_epochs', 'weight_decay', 'num_shards', 'distributed_group_size'] for flag_name in override_flag_names: flag_value = getattr(FLAGS, flag_name, 'INVALID') if flag_value == 'INVALID': tf.logging.fatal('Unknown flag %s.' % str(flag_name)) if flag_value is not None: _set_or_add_hparam(hparams, flag_name, flag_value) return hparams
class-balanced-loss
positive
def _render_gl(self, scene): CLEAR_DEPTH = 1.0 draw_target = Glf.DrawTarget(self.width, self.height) draw_target.Bind() draw_target.AddAttachment('color', bgl.GL_RGBA, bgl.GL_FLOAT, bgl.GL_RGBA) renderer = UsdImagingGL.Engine() if not self._sync_render_settings(renderer, scene): renderer = None return <DeepExtract> if scene.hdusd.final.nodetree_camera != '' and scene.hdusd.final.data_source: usd_camera = UsdAppUtils.GetCameraAtPath(self.stage, scene.hdusd.final.nodetree_camera) else: usd_camera = UsdAppUtils.GetCameraAtPath(self.stage, Tf.MakeValidIdentifier(scene.camera.data.name)) gf_camera = usd_camera.GetCamera(scene.frame_current) renderer.SetCameraState(gf_camera.frustum.ComputeViewMatrix(), gf_camera.frustum.ComputeProjectionMatrix()) </DeepExtract> renderer.SetRenderViewport((0, 0, self.width, self.height)) renderer.SetRendererAov('color') root = self.stage.GetPseudoRoot() params = UsdImagingGL.RenderParams() params.renderResolution = (self.width, self.height) params.frame = Usd.TimeCode(scene.frame_current) if scene.hdusd.final.data_source: world_data = world.WorldData.init_from_stage(self.stage) else: world_data = world.WorldData.init_from_world(scene.world) params.clearColor = world_data.clear_color try: renderer.Render(root, params) except Exception as e: log.error(e) <DeepExtract> result = self.render_engine.begin_result(0, 0, self.width, self.height, layer=self.render_layer_name) render_passes = result.layers[0].passes images = [] for p in render_passes: image = {'Combined': gl.get_framebuffer_data(self.width, self.height)}.get(p.name) if image is None: image = np.zeros((self.width, self.height, p.channels), dtype=np.float32) if p.channels != image.shape[2]: image = image[:, :, 0:p.channels] images.append(image.flatten()) render_passes.foreach_set('rect', np.concatenate(images)) self.render_engine.end_result(result) </DeepExtract> draw_target.Unbind() draw_target = None renderer = None
def _render_gl(self, scene): CLEAR_DEPTH = 1.0 draw_target = Glf.DrawTarget(self.width, self.height) draw_target.Bind() draw_target.AddAttachment('color', bgl.GL_RGBA, bgl.GL_FLOAT, bgl.GL_RGBA) renderer = UsdImagingGL.Engine() if not self._sync_render_settings(renderer, scene): renderer = None return if scene.hdusd.final.nodetree_camera != '' and scene.hdusd.final.data_source: usd_camera = UsdAppUtils.GetCameraAtPath(self.stage, scene.hdusd.final.nodetree_camera) else: usd_camera = UsdAppUtils.GetCameraAtPath(self.stage, Tf.MakeValidIdentifier(scene.camera.data.name)) gf_camera = usd_camera.GetCamera(scene.frame_current) renderer.SetCameraState(gf_camera.frustum.ComputeViewMatrix(), gf_camera.frustum.ComputeProjectionMatrix()) renderer.SetRenderViewport((0, 0, self.width, self.height)) renderer.SetRendererAov('color') root = self.stage.GetPseudoRoot() params = UsdImagingGL.RenderParams() params.renderResolution = (self.width, self.height) params.frame = Usd.TimeCode(scene.frame_current) if scene.hdusd.final.data_source: world_data = world.WorldData.init_from_stage(self.stage) else: world_data = world.WorldData.init_from_world(scene.world) params.clearColor = world_data.clear_color try: renderer.Render(root, params) except Exception as e: log.error(e) result = self.render_engine.begin_result(0, 0, self.width, self.height, layer=self.render_layer_name) render_passes = result.layers[0].passes images = [] for p in render_passes: image = {'Combined': gl.get_framebuffer_data(self.width, self.height)}.get(p.name) if image is None: image = np.zeros((self.width, self.height, p.channels), dtype=np.float32) if p.channels != image.shape[2]: image = image[:, :, 0:p.channels] images.append(image.flatten()) render_passes.foreach_set('rect', np.concatenate(images)) self.render_engine.end_result(result) draw_target.Unbind() draw_target = None renderer = None
BlenderUSDHydraAddon
positive
def batch_multiclass_non_max_suppression(boxes, scores, score_thresh, iou_thresh, max_size_per_class, max_total_size=0, clip_window=None, change_coordinate_frame=False, num_valid_boxes=None, masks=None, additional_fields=None, scope=None, parallel_iterations=32): """Multi-class version of non maximum suppression that operates on a batch. This op is similar to `multiclass_non_max_suppression` but operates on a batch of boxes and scores. See documentation for `multiclass_non_max_suppression` for details. Args: boxes: A [batch_size, num_anchors, q, 4] float32 tensor containing detections. If `q` is 1 then same boxes are used for all classes otherwise, if `q` is equal to number of classes, class-specific boxes are used. scores: A [batch_size, num_anchors, num_classes] float32 tensor containing the scores for each of the `num_anchors` detections. score_thresh: scalar threshold for score (low scoring boxes are removed). iou_thresh: scalar threshold for IOU (new boxes that have high IOU overlap with previously selected boxes are removed). max_size_per_class: maximum number of retained boxes per class. max_total_size: maximum number of boxes retained over all classes. By default returns all boxes retained after capping boxes per class. clip_window: A float32 tensor of shape [batch_size, 4] where each entry is of the form [y_min, x_min, y_max, x_max] representing the window to clip boxes to before performing non-max suppression. This argument can also be a tensor of shape [4] in which case, the same clip window is applied to all images in the batch. If clip_widow is None, all boxes are used to perform non-max suppression. change_coordinate_frame: Whether to normalize coordinates after clipping relative to clip_window (this can only be set to True if a clip_window is provided) num_valid_boxes: (optional) a Tensor of type `int32`. A 1-D tensor of shape [batch_size] representing the number of valid boxes to be considered for each image in the batch. This parameter allows for ignoring zero paddings. masks: (optional) a [batch_size, num_anchors, q, mask_height, mask_width] float32 tensor containing box masks. `q` can be either number of classes or 1 depending on whether a separate mask is predicted per class. additional_fields: (optional) If not None, a dictionary that maps keys to tensors whose dimensions are [batch_size, num_anchors, ...]. scope: tf scope name. parallel_iterations: (optional) number of batch items to process in parallel. Returns: 'nmsed_boxes': A [batch_size, max_detections, 4] float32 tensor containing the non-max suppressed boxes. 'nmsed_scores': A [batch_size, max_detections] float32 tensor containing the scores for the boxes. 'nmsed_classes': A [batch_size, max_detections] float32 tensor containing the class for boxes. 'nmsed_masks': (optional) a [batch_size, max_detections, mask_height, mask_width] float32 tensor containing masks for each selected box. This is set to None if input `masks` is None. 'nmsed_additional_fields': (optional) a dictionary of [batch_size, max_detections, ...] float32 tensors corresponding to the tensors specified in the input `additional_fields`. This is not returned if input `additional_fields` is None. 'num_detections': A [batch_size] int32 tensor indicating the number of valid detections per batch item. Only the top num_detections[i] entries in nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the entries are zero paddings. Raises: ValueError: if `q` in boxes.shape is not 1 or not equal to number of classes as inferred from scores.shape. """ q = boxes.shape[2].value num_classes = scores.shape[2].value if q != 1 and q != num_classes: raise ValueError('third dimension of boxes must be either 1 or equal to the third dimension of scores') if change_coordinate_frame and clip_window is None: raise ValueError('if change_coordinate_frame is True, then a clip_windowmust be specified.') original_masks = masks original_additional_fields = additional_fields with tf.name_scope(scope, 'BatchMultiClassNonMaxSuppression'): boxes_shape = boxes.shape batch_size = boxes_shape[0].value num_anchors = boxes_shape[1].value if batch_size is None: batch_size = tf.shape(boxes)[0] if num_anchors is None: num_anchors = tf.shape(boxes)[1] if num_valid_boxes is None: num_valid_boxes = tf.ones([batch_size], dtype=tf.int32) * num_anchors if masks is None: masks_shape = tf.stack([batch_size, num_anchors, 1, 0, 0]) masks = tf.zeros(masks_shape) if clip_window is None: clip_window = tf.stack([tf.reduce_min(boxes[:, :, :, 0]), tf.reduce_min(boxes[:, :, :, 1]), tf.reduce_max(boxes[:, :, :, 2]), tf.reduce_max(boxes[:, :, :, 3])]) if clip_window.shape.ndims == 1: clip_window = tf.tile(tf.expand_dims(clip_window, 0), [batch_size, 1]) if additional_fields is None: additional_fields = {} def _single_image_nms_fn(args): """Runs NMS on a single image and returns padded output. Args: args: A list of tensors consisting of the following: per_image_boxes - A [num_anchors, q, 4] float32 tensor containing detections. If `q` is 1 then same boxes are used for all classes otherwise, if `q` is equal to number of classes, class-specific boxes are used. per_image_scores - A [num_anchors, num_classes] float32 tensor containing the scores for each of the `num_anchors` detections. per_image_masks - A [num_anchors, q, mask_height, mask_width] float32 tensor containing box masks. `q` can be either number of classes or 1 depending on whether a separate mask is predicted per class. per_image_clip_window - A 1D float32 tensor of the form [ymin, xmin, ymax, xmax] representing the window to clip the boxes to. per_image_additional_fields - (optional) A variable number of float32 tensors each with size [num_anchors, ...]. per_image_num_valid_boxes - A tensor of type `int32`. A 1-D tensor of shape [batch_size] representing the number of valid boxes to be considered for each image in the batch. This parameter allows for ignoring zero paddings. Returns: 'nmsed_boxes': A [max_detections, 4] float32 tensor containing the non-max suppressed boxes. 'nmsed_scores': A [max_detections] float32 tensor containing the scores for the boxes. 'nmsed_classes': A [max_detections] float32 tensor containing the class for boxes. 'nmsed_masks': (optional) a [max_detections, mask_height, mask_width] float32 tensor containing masks for each selected box. This is set to None if input `masks` is None. 'nmsed_additional_fields': (optional) A variable number of float32 tensors each with size [max_detections, ...] corresponding to the input `per_image_additional_fields`. 'num_detections': A [batch_size] int32 tensor indicating the number of valid detections per batch item. Only the top num_detections[i] entries in nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the entries are zero paddings. """ per_image_boxes = args[0] per_image_scores = args[1] per_image_masks = args[2] per_image_clip_window = args[3] per_image_additional_fields = {key: value for (key, value) in zip(additional_fields, args[4:-1])} per_image_num_valid_boxes = args[-1] per_image_boxes = tf.reshape(tf.slice(per_image_boxes, 3 * [0], tf.stack([per_image_num_valid_boxes, -1, -1])), [-1, q, 4]) per_image_scores = tf.reshape(tf.slice(per_image_scores, [0, 0], tf.stack([per_image_num_valid_boxes, -1])), [-1, num_classes]) per_image_masks = tf.reshape(tf.slice(per_image_masks, 4 * [0], tf.stack([per_image_num_valid_boxes, -1, -1, -1])), [-1, q, per_image_masks.shape[2].value, per_image_masks.shape[3].value]) if per_image_additional_fields is not None: for (key, tensor) in per_image_additional_fields.items(): additional_field_shape = tensor.get_shape() additional_field_dim = len(additional_field_shape) per_image_additional_fields[key] = tf.reshape(tf.slice(per_image_additional_fields[key], additional_field_dim * [0], tf.stack([per_image_num_valid_boxes] + (additional_field_dim - 1) * [-1])), [-1] + [dim.value for dim in additional_field_shape[1:]]) <DeepExtract> if not 0 <= iou_thresh <= 1.0: raise ValueError('iou_thresh must be between 0 and 1') if per_image_scores.shape.ndims != 2: raise ValueError('scores field must be of rank 2') if per_image_scores.shape[1].value is None: raise ValueError('scores must have statically defined second dimension') if per_image_boxes.shape.ndims != 3: raise ValueError('boxes must be of rank 3.') if not (per_image_boxes.shape[1].value == per_image_scores.shape[1].value or per_image_boxes.shape[1].value == 1): raise ValueError('second dimension of boxes must be either 1 or equal to the second dimension of scores') if per_image_boxes.shape[2].value != 4: raise ValueError('last dimension of boxes must be of size 4.') if change_coordinate_frame and per_image_clip_window is None: raise ValueError('if change_coordinate_frame is True, then a clip_windowmust be specified.') with tf.name_scope(scope, 'MultiClassNonMaxSuppression'): num_boxes = tf.shape(per_image_boxes)[0] num_scores = tf.shape(per_image_scores)[0] num_classes = per_image_scores.get_shape()[1] length_assert = tf.Assert(tf.equal(num_boxes, num_scores), ['Incorrect scores field length: actual vs expected.', num_scores, num_boxes]) selected_boxes_list = [] per_class_boxes_list = tf.unstack(per_image_boxes, axis=1) if per_image_masks is not None: per_class_masks_list = tf.unstack(per_image_masks, axis=1) if boundaries is not None: per_class_boundaries_list = tf.unstack(boundaries, axis=1) boxes_ids = range(num_classes) if len(per_class_boxes_list) > 1 else [0] * num_classes.value for (class_idx, boxes_idx) in zip(range(num_classes), boxes_ids): per_class_boxes = per_class_boxes_list[boxes_idx] boxlist_and_class_scores = box_list.BoxList(per_class_boxes) with tf.control_dependencies([length_assert]): class_scores = tf.reshape(tf.slice(per_image_scores, [0, class_idx], tf.stack([num_scores, 1])), [-1]) boxlist_and_class_scores.add_field(fields.BoxListFields.scores, class_scores) if per_image_masks is not None: per_class_masks = per_class_masks_list[boxes_idx] boxlist_and_class_scores.add_field(fields.BoxListFields.masks, per_class_masks) if boundaries is not None: per_class_boundaries = per_class_boundaries_list[boxes_idx] boxlist_and_class_scores.add_field(fields.BoxListFields.boundaries, per_class_boundaries) if per_image_additional_fields is not None: for (key, tensor) in per_image_additional_fields.items(): boxlist_and_class_scores.add_field(key, tensor) boxlist_filtered = box_list_ops.filter_greater_than(boxlist_and_class_scores, score_thresh) if per_image_clip_window is not None: boxlist_filtered = box_list_ops.clip_to_window(boxlist_filtered, per_image_clip_window) if change_coordinate_frame: boxlist_filtered = box_list_ops.change_coordinate_frame(boxlist_filtered, per_image_clip_window) max_selection_size = tf.minimum(max_size_per_class, boxlist_filtered.num_boxes()) selected_indices = tf.image.non_max_suppression(boxlist_filtered.get(), boxlist_filtered.get_field(fields.BoxListFields.scores), max_selection_size, iou_threshold=iou_thresh) nms_result = box_list_ops.gather(boxlist_filtered, selected_indices) nms_result.add_field(fields.BoxListFields.classes, tf.zeros_like(nms_result.get_field(fields.BoxListFields.scores)) + class_idx) selected_boxes_list.append(nms_result) selected_boxes = box_list_ops.concatenate(selected_boxes_list) sorted_boxes = box_list_ops.sort_by_field(selected_boxes, fields.BoxListFields.scores) if max_total_size: max_total_size = tf.minimum(max_total_size, sorted_boxes.num_boxes()) sorted_boxes = box_list_ops.gather(sorted_boxes, tf.range(max_total_size)) nmsed_boxlist = sorted_boxes </DeepExtract> padded_boxlist = box_list_ops.pad_or_clip_box_list(nmsed_boxlist, max_total_size) num_detections = nmsed_boxlist.num_boxes() nmsed_boxes = padded_boxlist.get() nmsed_scores = padded_boxlist.get_field(fields.BoxListFields.scores) nmsed_classes = padded_boxlist.get_field(fields.BoxListFields.classes) nmsed_masks = padded_boxlist.get_field(fields.BoxListFields.masks) nmsed_additional_fields = [padded_boxlist.get_field(key) for key in per_image_additional_fields] return [nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks] + nmsed_additional_fields + [num_detections] num_additional_fields = 0 if additional_fields is not None: num_additional_fields = len(additional_fields) num_nmsed_outputs = 4 + num_additional_fields batch_outputs = shape_utils.static_or_dynamic_map_fn(_single_image_nms_fn, elems=[boxes, scores, masks, clip_window] + list(additional_fields.values()) + [num_valid_boxes], dtype=num_nmsed_outputs * [tf.float32] + [tf.int32], parallel_iterations=parallel_iterations) batch_nmsed_boxes = batch_outputs[0] batch_nmsed_scores = batch_outputs[1] batch_nmsed_classes = batch_outputs[2] batch_nmsed_masks = batch_outputs[3] batch_nmsed_additional_fields = {key: value for (key, value) in zip(additional_fields, batch_outputs[4:-1])} batch_num_detections = batch_outputs[-1] if original_masks is None: batch_nmsed_masks = None if original_additional_fields is None: batch_nmsed_additional_fields = None return (batch_nmsed_boxes, batch_nmsed_scores, batch_nmsed_classes, batch_nmsed_masks, batch_nmsed_additional_fields, batch_num_detections)
def batch_multiclass_non_max_suppression(boxes, scores, score_thresh, iou_thresh, max_size_per_class, max_total_size=0, clip_window=None, change_coordinate_frame=False, num_valid_boxes=None, masks=None, additional_fields=None, scope=None, parallel_iterations=32): """Multi-class version of non maximum suppression that operates on a batch. This op is similar to `multiclass_non_max_suppression` but operates on a batch of boxes and scores. See documentation for `multiclass_non_max_suppression` for details. Args: boxes: A [batch_size, num_anchors, q, 4] float32 tensor containing detections. If `q` is 1 then same boxes are used for all classes otherwise, if `q` is equal to number of classes, class-specific boxes are used. scores: A [batch_size, num_anchors, num_classes] float32 tensor containing the scores for each of the `num_anchors` detections. score_thresh: scalar threshold for score (low scoring boxes are removed). iou_thresh: scalar threshold for IOU (new boxes that have high IOU overlap with previously selected boxes are removed). max_size_per_class: maximum number of retained boxes per class. max_total_size: maximum number of boxes retained over all classes. By default returns all boxes retained after capping boxes per class. clip_window: A float32 tensor of shape [batch_size, 4] where each entry is of the form [y_min, x_min, y_max, x_max] representing the window to clip boxes to before performing non-max suppression. This argument can also be a tensor of shape [4] in which case, the same clip window is applied to all images in the batch. If clip_widow is None, all boxes are used to perform non-max suppression. change_coordinate_frame: Whether to normalize coordinates after clipping relative to clip_window (this can only be set to True if a clip_window is provided) num_valid_boxes: (optional) a Tensor of type `int32`. A 1-D tensor of shape [batch_size] representing the number of valid boxes to be considered for each image in the batch. This parameter allows for ignoring zero paddings. masks: (optional) a [batch_size, num_anchors, q, mask_height, mask_width] float32 tensor containing box masks. `q` can be either number of classes or 1 depending on whether a separate mask is predicted per class. additional_fields: (optional) If not None, a dictionary that maps keys to tensors whose dimensions are [batch_size, num_anchors, ...]. scope: tf scope name. parallel_iterations: (optional) number of batch items to process in parallel. Returns: 'nmsed_boxes': A [batch_size, max_detections, 4] float32 tensor containing the non-max suppressed boxes. 'nmsed_scores': A [batch_size, max_detections] float32 tensor containing the scores for the boxes. 'nmsed_classes': A [batch_size, max_detections] float32 tensor containing the class for boxes. 'nmsed_masks': (optional) a [batch_size, max_detections, mask_height, mask_width] float32 tensor containing masks for each selected box. This is set to None if input `masks` is None. 'nmsed_additional_fields': (optional) a dictionary of [batch_size, max_detections, ...] float32 tensors corresponding to the tensors specified in the input `additional_fields`. This is not returned if input `additional_fields` is None. 'num_detections': A [batch_size] int32 tensor indicating the number of valid detections per batch item. Only the top num_detections[i] entries in nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the entries are zero paddings. Raises: ValueError: if `q` in boxes.shape is not 1 or not equal to number of classes as inferred from scores.shape. """ q = boxes.shape[2].value num_classes = scores.shape[2].value if q != 1 and q != num_classes: raise ValueError('third dimension of boxes must be either 1 or equal to the third dimension of scores') if change_coordinate_frame and clip_window is None: raise ValueError('if change_coordinate_frame is True, then a clip_windowmust be specified.') original_masks = masks original_additional_fields = additional_fields with tf.name_scope(scope, 'BatchMultiClassNonMaxSuppression'): boxes_shape = boxes.shape batch_size = boxes_shape[0].value num_anchors = boxes_shape[1].value if batch_size is None: batch_size = tf.shape(boxes)[0] if num_anchors is None: num_anchors = tf.shape(boxes)[1] if num_valid_boxes is None: num_valid_boxes = tf.ones([batch_size], dtype=tf.int32) * num_anchors if masks is None: masks_shape = tf.stack([batch_size, num_anchors, 1, 0, 0]) masks = tf.zeros(masks_shape) if clip_window is None: clip_window = tf.stack([tf.reduce_min(boxes[:, :, :, 0]), tf.reduce_min(boxes[:, :, :, 1]), tf.reduce_max(boxes[:, :, :, 2]), tf.reduce_max(boxes[:, :, :, 3])]) if clip_window.shape.ndims == 1: clip_window = tf.tile(tf.expand_dims(clip_window, 0), [batch_size, 1]) if additional_fields is None: additional_fields = {} def _single_image_nms_fn(args): """Runs NMS on a single image and returns padded output. Args: args: A list of tensors consisting of the following: per_image_boxes - A [num_anchors, q, 4] float32 tensor containing detections. If `q` is 1 then same boxes are used for all classes otherwise, if `q` is equal to number of classes, class-specific boxes are used. per_image_scores - A [num_anchors, num_classes] float32 tensor containing the scores for each of the `num_anchors` detections. per_image_masks - A [num_anchors, q, mask_height, mask_width] float32 tensor containing box masks. `q` can be either number of classes or 1 depending on whether a separate mask is predicted per class. per_image_clip_window - A 1D float32 tensor of the form [ymin, xmin, ymax, xmax] representing the window to clip the boxes to. per_image_additional_fields - (optional) A variable number of float32 tensors each with size [num_anchors, ...]. per_image_num_valid_boxes - A tensor of type `int32`. A 1-D tensor of shape [batch_size] representing the number of valid boxes to be considered for each image in the batch. This parameter allows for ignoring zero paddings. Returns: 'nmsed_boxes': A [max_detections, 4] float32 tensor containing the non-max suppressed boxes. 'nmsed_scores': A [max_detections] float32 tensor containing the scores for the boxes. 'nmsed_classes': A [max_detections] float32 tensor containing the class for boxes. 'nmsed_masks': (optional) a [max_detections, mask_height, mask_width] float32 tensor containing masks for each selected box. This is set to None if input `masks` is None. 'nmsed_additional_fields': (optional) A variable number of float32 tensors each with size [max_detections, ...] corresponding to the input `per_image_additional_fields`. 'num_detections': A [batch_size] int32 tensor indicating the number of valid detections per batch item. Only the top num_detections[i] entries in nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the entries are zero paddings. """ per_image_boxes = args[0] per_image_scores = args[1] per_image_masks = args[2] per_image_clip_window = args[3] per_image_additional_fields = {key: value for (key, value) in zip(additional_fields, args[4:-1])} per_image_num_valid_boxes = args[-1] per_image_boxes = tf.reshape(tf.slice(per_image_boxes, 3 * [0], tf.stack([per_image_num_valid_boxes, -1, -1])), [-1, q, 4]) per_image_scores = tf.reshape(tf.slice(per_image_scores, [0, 0], tf.stack([per_image_num_valid_boxes, -1])), [-1, num_classes]) per_image_masks = tf.reshape(tf.slice(per_image_masks, 4 * [0], tf.stack([per_image_num_valid_boxes, -1, -1, -1])), [-1, q, per_image_masks.shape[2].value, per_image_masks.shape[3].value]) if per_image_additional_fields is not None: for (key, tensor) in per_image_additional_fields.items(): additional_field_shape = tensor.get_shape() additional_field_dim = len(additional_field_shape) per_image_additional_fields[key] = tf.reshape(tf.slice(per_image_additional_fields[key], additional_field_dim * [0], tf.stack([per_image_num_valid_boxes] + (additional_field_dim - 1) * [-1])), [-1] + [dim.value for dim in additional_field_shape[1:]]) if not 0 <= iou_thresh <= 1.0: raise ValueError('iou_thresh must be between 0 and 1') if per_image_scores.shape.ndims != 2: raise ValueError('scores field must be of rank 2') if per_image_scores.shape[1].value is None: raise ValueError('scores must have statically defined second dimension') if per_image_boxes.shape.ndims != 3: raise ValueError('boxes must be of rank 3.') if not (per_image_boxes.shape[1].value == per_image_scores.shape[1].value or per_image_boxes.shape[1].value == 1): raise ValueError('second dimension of boxes must be either 1 or equal to the second dimension of scores') if per_image_boxes.shape[2].value != 4: raise ValueError('last dimension of boxes must be of size 4.') if change_coordinate_frame and per_image_clip_window is None: raise ValueError('if change_coordinate_frame is True, then a clip_windowmust be specified.') with tf.name_scope(scope, 'MultiClassNonMaxSuppression'): num_boxes = tf.shape(per_image_boxes)[0] num_scores = tf.shape(per_image_scores)[0] num_classes = per_image_scores.get_shape()[1] length_assert = tf.Assert(tf.equal(num_boxes, num_scores), ['Incorrect scores field length: actual vs expected.', num_scores, num_boxes]) selected_boxes_list = [] per_class_boxes_list = tf.unstack(per_image_boxes, axis=1) if per_image_masks is not None: per_class_masks_list = tf.unstack(per_image_masks, axis=1) if boundaries is not None: per_class_boundaries_list = tf.unstack(boundaries, axis=1) boxes_ids = range(num_classes) if len(per_class_boxes_list) > 1 else [0] * num_classes.value for (class_idx, boxes_idx) in zip(range(num_classes), boxes_ids): per_class_boxes = per_class_boxes_list[boxes_idx] boxlist_and_class_scores = box_list.BoxList(per_class_boxes) with tf.control_dependencies([length_assert]): class_scores = tf.reshape(tf.slice(per_image_scores, [0, class_idx], tf.stack([num_scores, 1])), [-1]) boxlist_and_class_scores.add_field(fields.BoxListFields.scores, class_scores) if per_image_masks is not None: per_class_masks = per_class_masks_list[boxes_idx] boxlist_and_class_scores.add_field(fields.BoxListFields.masks, per_class_masks) if boundaries is not None: per_class_boundaries = per_class_boundaries_list[boxes_idx] boxlist_and_class_scores.add_field(fields.BoxListFields.boundaries, per_class_boundaries) if per_image_additional_fields is not None: for (key, tensor) in per_image_additional_fields.items(): boxlist_and_class_scores.add_field(key, tensor) boxlist_filtered = box_list_ops.filter_greater_than(boxlist_and_class_scores, score_thresh) if per_image_clip_window is not None: boxlist_filtered = box_list_ops.clip_to_window(boxlist_filtered, per_image_clip_window) if change_coordinate_frame: boxlist_filtered = box_list_ops.change_coordinate_frame(boxlist_filtered, per_image_clip_window) max_selection_size = tf.minimum(max_size_per_class, boxlist_filtered.num_boxes()) selected_indices = tf.image.non_max_suppression(boxlist_filtered.get(), boxlist_filtered.get_field(fields.BoxListFields.scores), max_selection_size, iou_threshold=iou_thresh) nms_result = box_list_ops.gather(boxlist_filtered, selected_indices) nms_result.add_field(fields.BoxListFields.classes, tf.zeros_like(nms_result.get_field(fields.BoxListFields.scores)) + class_idx) selected_boxes_list.append(nms_result) selected_boxes = box_list_ops.concatenate(selected_boxes_list) sorted_boxes = box_list_ops.sort_by_field(selected_boxes, fields.BoxListFields.scores) if max_total_size: max_total_size = tf.minimum(max_total_size, sorted_boxes.num_boxes()) sorted_boxes = box_list_ops.gather(sorted_boxes, tf.range(max_total_size)) nmsed_boxlist = sorted_boxes padded_boxlist = box_list_ops.pad_or_clip_box_list(nmsed_boxlist, max_total_size) num_detections = nmsed_boxlist.num_boxes() nmsed_boxes = padded_boxlist.get() nmsed_scores = padded_boxlist.get_field(fields.BoxListFields.scores) nmsed_classes = padded_boxlist.get_field(fields.BoxListFields.classes) nmsed_masks = padded_boxlist.get_field(fields.BoxListFields.masks) nmsed_additional_fields = [padded_boxlist.get_field(key) for key in per_image_additional_fields] return [nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks] + nmsed_additional_fields + [num_detections] num_additional_fields = 0 if additional_fields is not None: num_additional_fields = len(additional_fields) num_nmsed_outputs = 4 + num_additional_fields batch_outputs = shape_utils.static_or_dynamic_map_fn(_single_image_nms_fn, elems=[boxes, scores, masks, clip_window] + list(additional_fields.values()) + [num_valid_boxes], dtype=num_nmsed_outputs * [tf.float32] + [tf.int32], parallel_iterations=parallel_iterations) batch_nmsed_boxes = batch_outputs[0] batch_nmsed_scores = batch_outputs[1] batch_nmsed_classes = batch_outputs[2] batch_nmsed_masks = batch_outputs[3] batch_nmsed_additional_fields = {key: value for (key, value) in zip(additional_fields, batch_outputs[4:-1])} batch_num_detections = batch_outputs[-1] if original_masks is None: batch_nmsed_masks = None if original_additional_fields is None: batch_nmsed_additional_fields = None return (batch_nmsed_boxes, batch_nmsed_scores, batch_nmsed_classes, batch_nmsed_masks, batch_nmsed_additional_fields, batch_num_detections)
Accident-Detection-on-Indian-Roads
positive
def add_word(self, word): """ Parameters ---------- word : etree.Element etree representation of a <word> element (i.e. a token, which might contain child elements) """ <DeepExtract> id_attrib_key = add_ns('id') if id_attrib_key in word.attrib: word_id = word.attrib[id_attrib_key] try: word_id = word.getparent().attrib[id_attrib_key] except KeyError as e: raise KeyError('Neither the element "{0}" nor its parent "{1}" have an ID'.format(word, word.getparent())) </DeepExtract> if word.getparent().tag in ('node', 'sentence'): <DeepExtract> if 'parent' in word.attrib: parent_id = word.attrib['parent'] else: parent_id = word.getparent().attrib[add_ns('id')] </DeepExtract> else: try: parent = word.iterancestors(tag=('node', 'sentence')).next() <DeepExtract> id_attrib_key = add_ns('id') if id_attrib_key in parent.attrib: parent_id = parent.attrib[id_attrib_key] try: parent_id = parent.getparent().attrib[id_attrib_key] except KeyError as e: raise KeyError('Neither the element "{0}" nor its parent "{1}" have an ID'.format(parent, parent.getparent())) </DeepExtract> except StopIteration as e: parent_id = self.get_element_id(word).split('_')[0] self.tokens.append(word_id) <DeepExtract> word_attribs = {self.ns + ':' + key: val for (key, val) in word.attrib.items() if key != add_ns('id')} </DeepExtract> token_str = word_attribs[self.ns + ':form'] word_attribs.update({self.ns + ':token': token_str, 'label': token_str}) self.add_node(word_id, layers={self.ns, self.ns + ':token'}, attr_dict=word_attribs) self.add_edge(parent_id, word_id, edge_type=dg.EdgeTypes.dominance_relation) <DeepExtract> for child in word.iterchildren(): self.parsers[child.tag](child) </DeepExtract>
def add_word(self, word): """ Parameters ---------- word : etree.Element etree representation of a <word> element (i.e. a token, which might contain child elements) """ id_attrib_key = add_ns('id') if id_attrib_key in word.attrib: word_id = word.attrib[id_attrib_key] try: word_id = word.getparent().attrib[id_attrib_key] except KeyError as e: raise KeyError('Neither the element "{0}" nor its parent "{1}" have an ID'.format(word, word.getparent())) if word.getparent().tag in ('node', 'sentence'): if 'parent' in word.attrib: parent_id = word.attrib['parent'] else: parent_id = word.getparent().attrib[add_ns('id')] else: try: parent = word.iterancestors(tag=('node', 'sentence')).next() id_attrib_key = add_ns('id') if id_attrib_key in parent.attrib: parent_id = parent.attrib[id_attrib_key] try: parent_id = parent.getparent().attrib[id_attrib_key] except KeyError as e: raise KeyError('Neither the element "{0}" nor its parent "{1}" have an ID'.format(parent, parent.getparent())) except StopIteration as e: parent_id = self.get_element_id(word).split('_')[0] self.tokens.append(word_id) word_attribs = {self.ns + ':' + key: val for (key, val) in word.attrib.items() if key != add_ns('id')} token_str = word_attribs[self.ns + ':form'] word_attribs.update({self.ns + ':token': token_str, 'label': token_str}) self.add_node(word_id, layers={self.ns, self.ns + ':token'}, attr_dict=word_attribs) self.add_edge(parent_id, word_id, edge_type=dg.EdgeTypes.dominance_relation) for child in word.iterchildren(): self.parsers[child.tag](child) </DeepExtract>
discoursegraphs
positive
def Parse(expression): <DeepExtract> result = [] token = '' state = STATE_START while expression != '': char = expression[0] expression = expression[1:] if char == "'": if state == STATE_START: state = STATE_STRING elif state == STATE_IDENTIFIER: result.append([STATE_IDENTIFIER, token]) state = STATE_STRING token = '' elif state == STATE_STRING: result.append([STATE_STRING, token]) state = STATE_START token = '' elif char >= '0' and char <= '9' or (char.lower() >= 'a' and char.lower() <= 'z'): if state == STATE_START: token = char state = STATE_IDENTIFIER else: token += char elif char == ' ': if state == STATE_IDENTIFIER: result.append([STATE_IDENTIFIER, token]) token = '' state = STATE_START elif state == STATE_STRING: token += char elif state == STATE_IDENTIFIER: result.append([STATE_IDENTIFIER, token]) token = '' state = STATE_START result.append([STATE_SPECIAL_CHAR, char]) elif state == STATE_STRING: token += char else: result.append([STATE_SPECIAL_CHAR, char]) token = '' if state == STATE_IDENTIFIER: result.append([state, token]) elif state == STATE_STRING: result = [[STATE_ERROR, 'Error: string not closed', token]] tokens = result </DeepExtract> if len(tokens) == 0: print('Parsing error') return None if tokens[0][0] == STATE_ERROR: print(tokens[0][1]) print(tokens[0][2]) print(expression) return None functioncalls = [] while True: <DeepExtract> if len(tokens) == 0: print('Parsing error') (functioncall, tokens) = (None, tokens) if tokens[0][0] != STATE_IDENTIFIER: print('Parsing error') (functioncall, tokens) = (None, tokens) function = tokens[0][1] tokens = tokens[1:] if len(tokens) == 0: print('Parsing error') (functioncall, tokens) = (None, tokens) if tokens[0][0] != STATE_SPECIAL_CHAR or tokens[0][1] != '(': print('Parsing error') (functioncall, tokens) = (None, tokens) tokens = tokens[1:] if len(tokens) == 0: print('Parsing error') (functioncall, tokens) = (None, tokens) arguments = [] while True: if tokens[0][0] != STATE_IDENTIFIER and tokens[0][0] != STATE_STRING: print('Parsing error') (functioncall, tokens) = (None, tokens) arguments.append(tokens[0]) tokens = tokens[1:] if len(tokens) == 0: print('Parsing error') (functioncall, tokens) = (None, tokens) if tokens[0][0] != STATE_SPECIAL_CHAR or (tokens[0][1] != ',' and tokens[0][1] != ')'): print('Parsing error') (functioncall, tokens) = (None, tokens) if tokens[0][0] == STATE_SPECIAL_CHAR and tokens[0][1] == ')': tokens = tokens[1:] break tokens = tokens[1:] if len(tokens) == 0: print('Parsing error') (functioncall, tokens) = (None, tokens) (functioncall, tokens) = [[function, arguments], tokens] </DeepExtract> if functioncall == None: return None functioncalls.append(functioncall) if len(tokens) == 0: return functioncalls if tokens[0][0] != STATE_SPECIAL_CHAR or tokens[0][1] != '+': print('Parsing error') return None tokens = tokens[1:]
def Parse(expression): result = [] token = '' state = STATE_START while expression != '': char = expression[0] expression = expression[1:] if char == "'": if state == STATE_START: state = STATE_STRING elif state == STATE_IDENTIFIER: result.append([STATE_IDENTIFIER, token]) state = STATE_STRING token = '' elif state == STATE_STRING: result.append([STATE_STRING, token]) state = STATE_START token = '' elif char >= '0' and char <= '9' or (char.lower() >= 'a' and char.lower() <= 'z'): if state == STATE_START: token = char state = STATE_IDENTIFIER else: token += char elif char == ' ': if state == STATE_IDENTIFIER: result.append([STATE_IDENTIFIER, token]) token = '' state = STATE_START elif state == STATE_STRING: token += char elif state == STATE_IDENTIFIER: result.append([STATE_IDENTIFIER, token]) token = '' state = STATE_START result.append([STATE_SPECIAL_CHAR, char]) elif state == STATE_STRING: token += char else: result.append([STATE_SPECIAL_CHAR, char]) token = '' if state == STATE_IDENTIFIER: result.append([state, token]) elif state == STATE_STRING: result = [[STATE_ERROR, 'Error: string not closed', token]] tokens = result if len(tokens) == 0: print('Parsing error') return None if tokens[0][0] == STATE_ERROR: print(tokens[0][1]) print(tokens[0][2]) print(expression) return None functioncalls = [] while True: if len(tokens) == 0: print('Parsing error') (functioncall, tokens) = (None, tokens) if tokens[0][0] != STATE_IDENTIFIER: print('Parsing error') (functioncall, tokens) = (None, tokens) function = tokens[0][1] tokens = tokens[1:] if len(tokens) == 0: print('Parsing error') (functioncall, tokens) = (None, tokens) if tokens[0][0] != STATE_SPECIAL_CHAR or tokens[0][1] != '(': print('Parsing error') (functioncall, tokens) = (None, tokens) tokens = tokens[1:] if len(tokens) == 0: print('Parsing error') (functioncall, tokens) = (None, tokens) arguments = [] while True: if tokens[0][0] != STATE_IDENTIFIER and tokens[0][0] != STATE_STRING: print('Parsing error') (functioncall, tokens) = (None, tokens) arguments.append(tokens[0]) tokens = tokens[1:] if len(tokens) == 0: print('Parsing error') (functioncall, tokens) = (None, tokens) if tokens[0][0] != STATE_SPECIAL_CHAR or (tokens[0][1] != ',' and tokens[0][1] != ')'): print('Parsing error') (functioncall, tokens) = (None, tokens) if tokens[0][0] == STATE_SPECIAL_CHAR and tokens[0][1] == ')': tokens = tokens[1:] break tokens = tokens[1:] if len(tokens) == 0: print('Parsing error') (functioncall, tokens) = (None, tokens) (functioncall, tokens) = [[function, arguments], tokens] if functioncall == None: return None functioncalls.append(functioncall) if len(tokens) == 0: return functioncalls if tokens[0][0] != STATE_SPECIAL_CHAR or tokens[0][1] != '+': print('Parsing error') return None tokens = tokens[1:]
Beta
positive
def _get_min_size_limit(self): limit = size.Size(0) if self.selected_fs: limit = self.selected_fs._min_size <DeepExtract> device_type = self.selected_type if device_type in ('lvmlv', 'lvmthinpool'): min_size = self.selected_parent.pe_size elif device_type == 'lvm': min_size = lvm.LVM_PE_SIZE * 2 elif device_type in ('lvmthinlv', 'lvm snapshot'): min_size = self.selected_parent.vg.pe_size elif device_type == 'btrfs volume': min_size = BTRFS._min_size else: min_size = size.Size('1 MiB') parent_limit = min_size </DeepExtract> limit = max(limit, parent_limit) return limit or size.Size('1 MiB')
def _get_min_size_limit(self): limit = size.Size(0) if self.selected_fs: limit = self.selected_fs._min_size device_type = self.selected_type if device_type in ('lvmlv', 'lvmthinpool'): min_size = self.selected_parent.pe_size elif device_type == 'lvm': min_size = lvm.LVM_PE_SIZE * 2 elif device_type in ('lvmthinlv', 'lvm snapshot'): min_size = self.selected_parent.vg.pe_size elif device_type == 'btrfs volume': min_size = BTRFS._min_size else: min_size = size.Size('1 MiB') parent_limit = min_size limit = max(limit, parent_limit) return limit or size.Size('1 MiB')
blivet-gui
positive
def step(self, metrics, epoch=None): current = metrics if epoch is None: epoch = self.last_epoch = self.last_epoch + 1 self.last_epoch = epoch if self.is_better(current, self.best): self.best = current self.num_bad_epochs = 0 else: self.num_bad_epochs += 1 if self.in_cooldown: self.cooldown_counter -= 1 self.num_bad_epochs = 0 if self.num_bad_epochs > self.patience: self._reduce_lr(epoch) <DeepExtract> for (i, param_group) in enumerate(self.optimizer.param_groups): if param_group['weight_decay'] != 0: old_weight_decay = float(param_group['weight_decay']) new_weight_decay = max(old_weight_decay * self.factor, self.min_lrs[i]) if old_weight_decay - new_weight_decay > self.eps: param_group['weight_decay'] = new_weight_decay if self.verbose: log.info(f'Epoch {epoch}: reducing weight decay factor of group {i} to {new_weight_decay:.4e}.') </DeepExtract> self.cooldown_counter = self.cooldown self.num_bad_epochs = 0
def step(self, metrics, epoch=None): current = metrics if epoch is None: epoch = self.last_epoch = self.last_epoch + 1 self.last_epoch = epoch if self.is_better(current, self.best): self.best = current self.num_bad_epochs = 0 else: self.num_bad_epochs += 1 if self.in_cooldown: self.cooldown_counter -= 1 self.num_bad_epochs = 0 if self.num_bad_epochs > self.patience: self._reduce_lr(epoch) for (i, param_group) in enumerate(self.optimizer.param_groups): if param_group['weight_decay'] != 0: old_weight_decay = float(param_group['weight_decay']) new_weight_decay = max(old_weight_decay * self.factor, self.min_lrs[i]) if old_weight_decay - new_weight_decay > self.eps: param_group['weight_decay'] = new_weight_decay if self.verbose: log.info(f'Epoch {epoch}: reducing weight decay factor of group {i} to {new_weight_decay:.4e}.') self.cooldown_counter = self.cooldown self.num_bad_epochs = 0
ACE
positive
def associate_to_template(self, template): """ Method to associate all resources to the template :param troposphere.Template template: :return: """ template.add_resource(self.lb) <DeepExtract> self.output_properties = {LB_DNS_NAME: (f'{self.logical_name}{LB_DNS_NAME.return_value}', self.cfn_resource, GetAtt, LB_DNS_NAME.return_value), LB_DNS_ZONE_ID: (f'{self.logical_name}{LB_DNS_ZONE_ID.return_value}', self.cfn_resource, GetAtt, LB_DNS_ZONE_ID.return_value), LB_NAME: (f'{self.logical_name}{LB_NAME.return_value}', self.cfn_resource, GetAtt, LB_NAME.return_value), LB_FULL_NAME: (f'{self.logical_name}{LB_FULL_NAME.return_value}', self.cfn_resource, GetAtt, LB_FULL_NAME.return_value)} </DeepExtract> if self.lb_sg and isinstance(self.lb_sg, SecurityGroup): self.output_properties.update({LB_SG_ID: (f'{self.logical_name}{LB_SG_ID.return_value}', self.lb_sg, GetAtt, LB_SG_ID.return_value, None)}) template.add_resource(self.lb_sg) for eip in self.lb_eips: template.add_resource(eip) self.generate_outputs()
def associate_to_template(self, template): """ Method to associate all resources to the template :param troposphere.Template template: :return: """ template.add_resource(self.lb) self.output_properties = {LB_DNS_NAME: (f'{self.logical_name}{LB_DNS_NAME.return_value}', self.cfn_resource, GetAtt, LB_DNS_NAME.return_value), LB_DNS_ZONE_ID: (f'{self.logical_name}{LB_DNS_ZONE_ID.return_value}', self.cfn_resource, GetAtt, LB_DNS_ZONE_ID.return_value), LB_NAME: (f'{self.logical_name}{LB_NAME.return_value}', self.cfn_resource, GetAtt, LB_NAME.return_value), LB_FULL_NAME: (f'{self.logical_name}{LB_FULL_NAME.return_value}', self.cfn_resource, GetAtt, LB_FULL_NAME.return_value)} if self.lb_sg and isinstance(self.lb_sg, SecurityGroup): self.output_properties.update({LB_SG_ID: (f'{self.logical_name}{LB_SG_ID.return_value}', self.lb_sg, GetAtt, LB_SG_ID.return_value, None)}) template.add_resource(self.lb_sg) for eip in self.lb_eips: template.add_resource(eip) self.generate_outputs()
ecs_composex
positive
def get_varlen_iter(self, start=0, std=5, min_len=5, max_deviation=3): max_len = self.bptt + max_deviation * std i = start while True: bptt = self.bptt if np.random.random() < 0.95 else self.bptt / 2.0 bptt = min(max_len, max(min_len, int(np.random.normal(bptt, std)))) <DeepExtract> if bptt is None: bptt = self.bptt seq_len = min(bptt, self.data.size(0) - 1 - i) end_idx = i + seq_len beg_idx = max(0, i - self.ext_len) data = self.data[beg_idx:end_idx] target = self.data[i + 1:i + 1 + seq_len] data_out = data.transpose(0, 1).contiguous().to(self.device) target_out = target.transpose(0, 1).contiguous().to(self.device) (data, target, seq_len) = (data_out, target_out, seq_len) </DeepExtract> i += seq_len yield (data, target, seq_len) if i >= self.data.size(0) - 2: break
def get_varlen_iter(self, start=0, std=5, min_len=5, max_deviation=3): max_len = self.bptt + max_deviation * std i = start while True: bptt = self.bptt if np.random.random() < 0.95 else self.bptt / 2.0 bptt = min(max_len, max(min_len, int(np.random.normal(bptt, std)))) if bptt is None: bptt = self.bptt seq_len = min(bptt, self.data.size(0) - 1 - i) end_idx = i + seq_len beg_idx = max(0, i - self.ext_len) data = self.data[beg_idx:end_idx] target = self.data[i + 1:i + 1 + seq_len] data_out = data.transpose(0, 1).contiguous().to(self.device) target_out = target.transpose(0, 1).contiguous().to(self.device) (data, target, seq_len) = (data_out, target_out, seq_len) i += seq_len yield (data, target, seq_len) if i >= self.data.size(0) - 2: break
bert_on_stilts
positive
def read_videos_and_save_tf_records(output_dir, fnames, start_sequence_iter=None, end_sequence_iter=None, sequences_per_file=128): print('started process with PID:', os.getpid()) if not os.path.exists(output_dir): os.makedirs(output_dir) if start_sequence_iter is None: start_sequence_iter = 0 if end_sequence_iter is None: end_sequence_iter = len(fnames) def preprocess_image(image): if image.shape != (240, 320, 3): image = cv2.resize(image, (320, 240), interpolation=cv2.INTER_LINEAR) return tf.compat.as_bytes(cv2.imencode('.jpg', image)[1].tobytes()) print('reading and saving sequences {0} to {1}'.format(start_sequence_iter, end_sequence_iter)) sequences = [] for sequence_iter in range(start_sequence_iter, end_sequence_iter): if not sequences: last_start_sequence_iter = sequence_iter print('reading sequences starting at sequence %d' % sequence_iter) sequences.append(read_video(fnames[sequence_iter])) if len(sequences) == sequences_per_file or sequence_iter == end_sequence_iter - 1: output_fname = 'sequence_{0}_to_{1}.tfrecords'.format(last_start_sequence_iter, sequence_iter) output_fname = os.path.join(output_dir, output_fname) <DeepExtract> print('saving sequences to %s' % output_fname) with tf.python_io.TFRecordWriter(output_fname) as writer: for sequence in sequences: num_frames = len(sequence) (height, width, channels) = sequence[0].shape encoded_sequence = [preprocess_image(image) for image in sequence] features = tf.train.Features(feature={'sequence_length': _int64_feature(num_frames), 'height': _int64_feature(height), 'width': _int64_feature(width), 'channels': _int64_feature(channels), 'images/encoded': _bytes_list_feature(encoded_sequence)}) example = tf.train.Example(features=features) writer.write(example.SerializeToString()) </DeepExtract> sequences[:] = []
def read_videos_and_save_tf_records(output_dir, fnames, start_sequence_iter=None, end_sequence_iter=None, sequences_per_file=128): print('started process with PID:', os.getpid()) if not os.path.exists(output_dir): os.makedirs(output_dir) if start_sequence_iter is None: start_sequence_iter = 0 if end_sequence_iter is None: end_sequence_iter = len(fnames) def preprocess_image(image): if image.shape != (240, 320, 3): image = cv2.resize(image, (320, 240), interpolation=cv2.INTER_LINEAR) return tf.compat.as_bytes(cv2.imencode('.jpg', image)[1].tobytes()) print('reading and saving sequences {0} to {1}'.format(start_sequence_iter, end_sequence_iter)) sequences = [] for sequence_iter in range(start_sequence_iter, end_sequence_iter): if not sequences: last_start_sequence_iter = sequence_iter print('reading sequences starting at sequence %d' % sequence_iter) sequences.append(read_video(fnames[sequence_iter])) if len(sequences) == sequences_per_file or sequence_iter == end_sequence_iter - 1: output_fname = 'sequence_{0}_to_{1}.tfrecords'.format(last_start_sequence_iter, sequence_iter) output_fname = os.path.join(output_dir, output_fname) print('saving sequences to %s' % output_fname) with tf.python_io.TFRecordWriter(output_fname) as writer: for sequence in sequences: num_frames = len(sequence) (height, width, channels) = sequence[0].shape encoded_sequence = [preprocess_image(image) for image in sequence] features = tf.train.Features(feature={'sequence_length': _int64_feature(num_frames), 'height': _int64_feature(height), 'width': _int64_feature(width), 'channels': _int64_feature(channels), 'images/encoded': _bytes_list_feature(encoded_sequence)}) example = tf.train.Example(features=features) writer.write(example.SerializeToString()) sequences[:] = []
DSGAN
positive
def test_bad_label_filter(): """Expect to detect nothing because the label is not in the training label set.""" <DeepExtract> _dir = os.path.dirname(os.path.abspath(__file__)) _good_tflite_model = os.path.join(_dir, 'mobilenet_ssd_v2_coco_quant_postprocess.tflite') _good_edgetpu_model = os.path.join(_dir, 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite') _good_labels = os.path.join(_dir, 'coco_labels.txt') config = {'model': {'tflite': _good_tflite_model, 'edgetpu': _good_edgetpu_model}, 'labels': _good_labels, 'top_k': 3, 'confidence_threshold': 0.8} config = config </DeepExtract> config['confidence_threshold'] = 0.6 config['label_filter'] = ['SomeR@ndomJunk'] result = None def sample_callback(image=None, inference_result=None, **kwargs): nonlocal result result = inference_result object_detector = ObjectDetector(**config) output = _OutPipeElement(sample_callback=sample_callback) object_detector.connect_to_next_element(output) <DeepExtract> assert 'person-couch.jpg' _dir = os.path.dirname(os.path.abspath(__file__)) image_file = os.path.join(_dir, 'person-couch.jpg') img = Image.open(image_file) img = img </DeepExtract> object_detector.receive_next_sample(image=img) assert not result
def test_bad_label_filter(): """Expect to detect nothing because the label is not in the training label set.""" _dir = os.path.dirname(os.path.abspath(__file__)) _good_tflite_model = os.path.join(_dir, 'mobilenet_ssd_v2_coco_quant_postprocess.tflite') _good_edgetpu_model = os.path.join(_dir, 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite') _good_labels = os.path.join(_dir, 'coco_labels.txt') config = {'model': {'tflite': _good_tflite_model, 'edgetpu': _good_edgetpu_model}, 'labels': _good_labels, 'top_k': 3, 'confidence_threshold': 0.8} config = config config['confidence_threshold'] = 0.6 config['label_filter'] = ['SomeR@ndomJunk'] result = None def sample_callback(image=None, inference_result=None, **kwargs): nonlocal result result = inference_result object_detector = ObjectDetector(**config) output = _OutPipeElement(sample_callback=sample_callback) object_detector.connect_to_next_element(output) assert 'person-couch.jpg' _dir = os.path.dirname(os.path.abspath(__file__)) image_file = os.path.join(_dir, 'person-couch.jpg') img = Image.open(image_file) img = img object_detector.receive_next_sample(image=img) assert not result
ambianic-edge
positive
def fdr(self, theta): """Given a threshold theta, find the estimated FDR Parameters ---------- theta : float or array of shape (n_samples) values to test Returns ------- afp : value of array of shape(n) """ from scipy.stats import norm <DeepExtract> import scipy.stats as st if self.learned == 0: self.learn() efp = self.p0 * st.norm.sf(self.x, self.mu, self.sigma) * self.n / np.arange(self.n, 0, -1) efp = np.minimum(efp, 1) ix = np.argsort(self.x) for i in range(np.size(efp) - 1, 0, -1): efp[ix[i - 1]] = np.maximum(efp[ix[i]], efp[ix[i - 1]]) self.sorted_x = self.x[ix] self.sorted_fdr = efp[ix] return efp </DeepExtract> if np.isscalar(theta): if theta > self.sorted_x[-1]: return 0 maj = np.where(self.sorted_x >= theta)[0][0] efp = self.p0 * norm.sf(theta, self.mu, self.sigma) * self.n / np.sum(self.x >= theta) efp = np.maximum(self.sorted_fdr[maj], efp) else: efp = [] for th in theta: if th > self.sorted_x[-1]: efp.append(0) continue maj = self.sorted_fdr[np.where(self.sorted_x >= th)[0][0]] efp.append(np.maximum(maj, self.p0 * st.norm.sf(th, self.mu, self.sigma) * self.n / np.sum(self.x >= th))) efp = np.array(efp) efp = np.minimum(efp, 1) return efp
def fdr(self, theta): """Given a threshold theta, find the estimated FDR Parameters ---------- theta : float or array of shape (n_samples) values to test Returns ------- afp : value of array of shape(n) """ from scipy.stats import norm import scipy.stats as st if self.learned == 0: self.learn() efp = self.p0 * st.norm.sf(self.x, self.mu, self.sigma) * self.n / np.arange(self.n, 0, -1) efp = np.minimum(efp, 1) ix = np.argsort(self.x) for i in range(np.size(efp) - 1, 0, -1): efp[ix[i - 1]] = np.maximum(efp[ix[i]], efp[ix[i - 1]]) self.sorted_x = self.x[ix] self.sorted_fdr = efp[ix] return efp if np.isscalar(theta): if theta > self.sorted_x[-1]: return 0 maj = np.where(self.sorted_x >= theta)[0][0] efp = self.p0 * norm.sf(theta, self.mu, self.sigma) * self.n / np.sum(self.x >= theta) efp = np.maximum(self.sorted_fdr[maj], efp) else: efp = [] for th in theta: if th > self.sorted_x[-1]: efp.append(0) continue maj = self.sorted_fdr[np.where(self.sorted_x >= th)[0][0]] efp.append(np.maximum(maj, self.p0 * st.norm.sf(th, self.mu, self.sigma) * self.n / np.sum(self.x >= th))) efp = np.array(efp) efp = np.minimum(efp, 1) return efp
3DUnetCNN
positive
def executeWrite(db, dbConn, sql, row, writeCursor=None, warnQuery=False, charset=DEF_ENCODING, use_unicode=DEF_UNICODE_SWITCH, mysql_config_file=MYSQL_CONFIG_FILE): """Executes a write query""" if warnQuery: warn('SQL (write many) QUERY: %s' % sql) if not writeCursor: writeCursor = dbConn.cursor() attempts = 0 while 1: try: writeCursor.execute(sql, row) break except MySQLdb.Error as e: attempts += 1 warn(' *MYSQL Corpus DB ERROR on %s:\n%s (%d attempt)' % (sql, e, attempts)) time.sleep(MYSQL_ERROR_SLEEP) <DeepExtract> dbConn = None attempts = 0 while 1: try: dbConn = MySQLdb.connect(db=db, charset=charset, use_unicode=use_unicode, local_infile=True, read_default_file=mysql_config_file) break except MySQLdb.Error as e: attempts += 1 warn(' *MYSQL Connect ERROR on db:%s\n%s\n (%d attempt)' % (db, e, attempts)) time.sleep(MYSQL_ERROR_SLEEP) if attempts > MAX_ATTEMPTS: sys.exit(1) dbCursor = dbConn.cursor() dictCursor = dbConn.cursor(MySQLdb.cursors.DictCursor) (dbConn, dbCursor, dictCursor) = (dbConn, dbCursor, dictCursor) </DeepExtract> writeCursor = dbConn.cursor() if attempts > MAX_ATTEMPTS: sys.exit(1) return writeCursor
def executeWrite(db, dbConn, sql, row, writeCursor=None, warnQuery=False, charset=DEF_ENCODING, use_unicode=DEF_UNICODE_SWITCH, mysql_config_file=MYSQL_CONFIG_FILE): """Executes a write query""" if warnQuery: warn('SQL (write many) QUERY: %s' % sql) if not writeCursor: writeCursor = dbConn.cursor() attempts = 0 while 1: try: writeCursor.execute(sql, row) break except MySQLdb.Error as e: attempts += 1 warn(' *MYSQL Corpus DB ERROR on %s:\n%s (%d attempt)' % (sql, e, attempts)) time.sleep(MYSQL_ERROR_SLEEP) dbConn = None attempts = 0 while 1: try: dbConn = MySQLdb.connect(db=db, charset=charset, use_unicode=use_unicode, local_infile=True, read_default_file=mysql_config_file) break except MySQLdb.Error as e: attempts += 1 warn(' *MYSQL Connect ERROR on db:%s\n%s\n (%d attempt)' % (db, e, attempts)) time.sleep(MYSQL_ERROR_SLEEP) if attempts > MAX_ATTEMPTS: sys.exit(1) dbCursor = dbConn.cursor() dictCursor = dbConn.cursor(MySQLdb.cursors.DictCursor) (dbConn, dbCursor, dictCursor) = (dbConn, dbCursor, dictCursor) writeCursor = dbConn.cursor() if attempts > MAX_ATTEMPTS: sys.exit(1) return writeCursor
dlatk
positive
def get_identifier(self): """ Read identifier """ <DeepExtract> if time.time() - self.pre_time < 1.0: time.sleep(1.0) self._intf.write(str('version')) self.pre_time = time.time() </DeepExtract> <DeepExtract> ret = self._intf.read() if len(ret) < 2 or ret[-2:] != '\r\n': logger.warning('read() termination error') ret = ret[:-2] </DeepExtract> return ret
def get_identifier(self): """ Read identifier """ if time.time() - self.pre_time < 1.0: time.sleep(1.0) self._intf.write(str('version')) self.pre_time = time.time() ret = self._intf.read() if len(ret) < 2 or ret[-2:] != '\r\n': logger.warning('read() termination error') ret = ret[:-2] return ret
basil
positive
@pytest.fixture def recipe_dir(recipes_folder: py.path.local, tmpdir: py.path.local, case, recipe_data): """Prepares a recipe from recipe_data in recipes_folder""" recipe = deepcopy(recipe_data['meta.yaml']) if 'remove' in case: for remove in utils.ensure_list(case['remove']): path = remove.split('/') cont = recipe for p in path[:-1]: cont = cont[p] if isinstance(cont, list): for n in range(len(cont)): del cont[n][path[-1]] else: del cont[path[-1]] if 'add' in case: <DeepExtract> for (key, value) in case['add'].items(): if isinstance(value, dict): recipe[key] = dict_merge(recipe.get(key, {}), value) elif isinstance(recipe, list): for num in range(len(recipe)): recipe[num][key] = dict_merge(recipe[num].get(key, {}), case['add']) else: recipe[key] = value return recipe </DeepExtract> recipe_dir = recipes_folder.mkdir(recipe_data['folder']) with recipe_dir.join('meta.yaml').open('w') as fdes: yaml.dump(recipe, fdes, transform=lambda l: l.replace('#{%', '{%').replace('#{{', '{{')) if 'add_files' in case: for (fname, data) in case['add_files'].items(): with recipe_dir.join(fname).open('w') as fdes: fdes.write(data) if 'move_files' in case: for (src, dest) in case['move_files'].items(): src_path = recipe_dir.join(src) if not dest: if os.path.isdir(src_path): shutil.rmtree(src_path) else: os.remove(src_path) else: dest_path = recipe_dir.join(dest) shutil.move(src_path, dest_path) yield recipe_dir
@pytest.fixture def recipe_dir(recipes_folder: py.path.local, tmpdir: py.path.local, case, recipe_data): """Prepares a recipe from recipe_data in recipes_folder""" recipe = deepcopy(recipe_data['meta.yaml']) if 'remove' in case: for remove in utils.ensure_list(case['remove']): path = remove.split('/') cont = recipe for p in path[:-1]: cont = cont[p] if isinstance(cont, list): for n in range(len(cont)): del cont[n][path[-1]] else: del cont[path[-1]] if 'add' in case: for (key, value) in case['add'].items(): if isinstance(value, dict): recipe[key] = dict_merge(recipe.get(key, {}), value) elif isinstance(recipe, list): for num in range(len(recipe)): recipe[num][key] = dict_merge(recipe[num].get(key, {}), case['add']) else: recipe[key] = value return recipe recipe_dir = recipes_folder.mkdir(recipe_data['folder']) with recipe_dir.join('meta.yaml').open('w') as fdes: yaml.dump(recipe, fdes, transform=lambda l: l.replace('#{%', '{%').replace('#{{', '{{')) if 'add_files' in case: for (fname, data) in case['add_files'].items(): with recipe_dir.join(fname).open('w') as fdes: fdes.write(data) if 'move_files' in case: for (src, dest) in case['move_files'].items(): src_path = recipe_dir.join(src) if not dest: if os.path.isdir(src_path): shutil.rmtree(src_path) else: os.remove(src_path) else: dest_path = recipe_dir.join(dest) shutil.move(src_path, dest_path) yield recipe_dir
bioconda-utils
positive
def _create_test_tensor(spec, dtype=None): """Creates an arbitrary tensor consistent with the TensorSpec `spec`.""" <DeepExtract> if _is_numeric_type(spec.dtype): value = tensor_spec_utils.bounds(spec).min else: value = tensor_utils.data_type_to_np_type(spec.dtype).type() shape = np.asarray(spec.shape) shape[shape < 0] = 1 value = np.full(shape=shape, fill_value=value, dtype=dtype) </DeepExtract> return tensor_utils.pack_tensor(value)
def _create_test_tensor(spec, dtype=None): """Creates an arbitrary tensor consistent with the TensorSpec `spec`.""" if _is_numeric_type(spec.dtype): value = tensor_spec_utils.bounds(spec).min else: value = tensor_utils.data_type_to_np_type(spec.dtype).type() shape = np.asarray(spec.shape) shape[shape < 0] = 1 value = np.full(shape=shape, fill_value=value, dtype=dtype) return tensor_utils.pack_tensor(value)
dm_env_rpc
positive
def __init__(self, trace, expression): <DeepExtract> try: self.expression = Constant(trace, expression.value(), expression.type_(), expression.signed()) except AttributeError: trace.error('Not a valid expression') except NotConstant: self.expression = expression </DeepExtract> self.trace = trace Expression.__init__(self, 'double', expression.signed())
def __init__(self, trace, expression): try: self.expression = Constant(trace, expression.value(), expression.type_(), expression.signed()) except AttributeError: trace.error('Not a valid expression') except NotConstant: self.expression = expression self.trace = trace Expression.__init__(self, 'double', expression.signed())
Chips-2.0
positive
def get_labels(self, args, ids, benchmark): assignments = [] for id in ids: <DeepExtract> if True: columns = 'rowid, ' + columns query = 'SELECT {columns} FROM {table_name} WHERE rowid = ?;'.format(table_name=DataAccess.TABLE_JOBS, columns=columns) ihdp = self.db.execute(query, (id[0],)).fetchone() </DeepExtract> assignment = benchmark.get_assignment(id[0], ihdp[1])[0] assignments.append(assignment) assignments = np.array(assignments) num_labels = benchmark.get_num_treatments() return (assignments, num_labels)
def get_labels(self, args, ids, benchmark): assignments = [] for id in ids: if True: columns = 'rowid, ' + columns query = 'SELECT {columns} FROM {table_name} WHERE rowid = ?;'.format(table_name=DataAccess.TABLE_JOBS, columns=columns) ihdp = self.db.execute(query, (id[0],)).fetchone() assignment = benchmark.get_assignment(id[0], ihdp[1])[0] assignments.append(assignment) assignments = np.array(assignments) num_labels = benchmark.get_num_treatments() return (assignments, num_labels)
drnet
positive
def check_and_filter(self): """ Filters the executable removing those not matching the correct application. Returns whether or not all of them were correctly set. """ execs_correct: bool = True execs_to_check = {'gencase': self.gencase, 'dualsphysics': self.dsphysics, 'partvtk': self.partvtk, 'floatinginfo': self.floatinginfo, 'computeforces': self.computeforces, 'measuretool': self.measuretool, 'isosurface': self.isosurface, 'boundaryvtk': self.boundaryvtk, 'flowtool': self.flowtool, 'bathymetrytool': self.bathymetrytool} bad_executables: list = list() for (word, executable) in execs_to_check.items(): if not executable_contains_string(executable, word): debug('Executable {} does not contain the word {}'.format(executable, word)) execs_correct = False bad_executables.append(executable) if not execs_correct: error_dialog('One or more of the executables set on the configuration is not correct. Please see the details below.', "These executables do not correspond to their appropriate tool or don't have execution permissions:\n\n{}".format(LINE_END.join(bad_executables))) <DeepExtract> with open(get_saved_config_file(), 'wb') as picklefile: pickle.dump(self, picklefile, PICKLE_PROTOCOL) </DeepExtract> return execs_correct
def check_and_filter(self): """ Filters the executable removing those not matching the correct application. Returns whether or not all of them were correctly set. """ execs_correct: bool = True execs_to_check = {'gencase': self.gencase, 'dualsphysics': self.dsphysics, 'partvtk': self.partvtk, 'floatinginfo': self.floatinginfo, 'computeforces': self.computeforces, 'measuretool': self.measuretool, 'isosurface': self.isosurface, 'boundaryvtk': self.boundaryvtk, 'flowtool': self.flowtool, 'bathymetrytool': self.bathymetrytool} bad_executables: list = list() for (word, executable) in execs_to_check.items(): if not executable_contains_string(executable, word): debug('Executable {} does not contain the word {}'.format(executable, word)) execs_correct = False bad_executables.append(executable) if not execs_correct: error_dialog('One or more of the executables set on the configuration is not correct. Please see the details below.', "These executables do not correspond to their appropriate tool or don't have execution permissions:\n\n{}".format(LINE_END.join(bad_executables))) with open(get_saved_config_file(), 'wb') as picklefile: pickle.dump(self, picklefile, PICKLE_PROTOCOL) return execs_correct
DesignSPHysics
positive
def __init__(self, load_file=None): super(ESP8266V2FirmwareImage, self).__init__() self.version = 2 if load_file is not None: <DeepExtract> (magic, segments, self.flash_mode, self.flash_size_freq, self.entrypoint) = struct.unpack('<BBBBI', load_file.read(8)) if magic != self.IMAGE_V2_MAGIC: raise FatalError('Invalid firmware image magic=0x%x' % magic) segments = segments </DeepExtract> if segments != self.IMAGE_V2_SEGMENT: print('Warning: V2 header has unexpected "segment" count %d (usually 4)' % segments) <DeepExtract> file_offs = load_file.tell() (offset, size) = struct.unpack('<II', load_file.read(8)) self.warn_if_unusual_segment(offset, size, True) segment_data = load_file.read(size) if len(segment_data) < size: raise FatalError('End of file reading segment 0x%x, length %d (actual length %d)' % (offset, size, len(segment_data))) segment = ImageSegment(offset, segment_data, file_offs) self.segments.append(segment) irom_segment = segment </DeepExtract> irom_segment.addr = 0 irom_segment.include_in_checksum = False first_flash_mode = self.flash_mode first_flash_size_freq = self.flash_size_freq first_entrypoint = self.entrypoint <DeepExtract> (magic, segments, self.flash_mode, self.flash_size_freq, self.entrypoint) = struct.unpack('<BBBBI', load_file.read(8)) if magic != ESPLoader.ESP_IMAGE_MAGIC: raise FatalError('Invalid firmware image magic=0x%x' % magic) segments = segments </DeepExtract> if first_flash_mode != self.flash_mode: print('WARNING: Flash mode value in first header (0x%02x) disagrees with second (0x%02x). Using second value.' % (first_flash_mode, self.flash_mode)) if first_flash_size_freq != self.flash_size_freq: print('WARNING: Flash size/freq value in first header (0x%02x) disagrees with second (0x%02x). Using second value.' % (first_flash_size_freq, self.flash_size_freq)) if first_entrypoint != self.entrypoint: print('WARNING: Entrypoint address in first header (0x%08x) disagrees with second header (0x%08x). Using second value.' % (first_entrypoint, self.entrypoint)) for _ in range(segments): <DeepExtract> file_offs = load_file.tell() (offset, size) = struct.unpack('<II', load_file.read(8)) self.warn_if_unusual_segment(offset, size, is_irom_segment) segment_data = load_file.read(size) if len(segment_data) < size: raise FatalError('End of file reading segment 0x%x, length %d (actual length %d)' % (offset, size, len(segment_data))) segment = ImageSegment(offset, segment_data, file_offs) self.segments.append(segment) return segment </DeepExtract> <DeepExtract> align_file_position(load_file, 16) self.checksum = ord(load_file.read(1)) </DeepExtract> <DeepExtract> if len(self.segments) > 16: raise FatalError('Invalid segment count %d (max 16). Usually this indicates a linker script problem.' % len(self.segments)) </DeepExtract>
def __init__(self, load_file=None): super(ESP8266V2FirmwareImage, self).__init__() self.version = 2 if load_file is not None: (magic, segments, self.flash_mode, self.flash_size_freq, self.entrypoint) = struct.unpack('<BBBBI', load_file.read(8)) if magic != self.IMAGE_V2_MAGIC: raise FatalError('Invalid firmware image magic=0x%x' % magic) segments = segments if segments != self.IMAGE_V2_SEGMENT: print('Warning: V2 header has unexpected "segment" count %d (usually 4)' % segments) file_offs = load_file.tell() (offset, size) = struct.unpack('<II', load_file.read(8)) self.warn_if_unusual_segment(offset, size, True) segment_data = load_file.read(size) if len(segment_data) < size: raise FatalError('End of file reading segment 0x%x, length %d (actual length %d)' % (offset, size, len(segment_data))) segment = ImageSegment(offset, segment_data, file_offs) self.segments.append(segment) irom_segment = segment irom_segment.addr = 0 irom_segment.include_in_checksum = False first_flash_mode = self.flash_mode first_flash_size_freq = self.flash_size_freq first_entrypoint = self.entrypoint (magic, segments, self.flash_mode, self.flash_size_freq, self.entrypoint) = struct.unpack('<BBBBI', load_file.read(8)) if magic != ESPLoader.ESP_IMAGE_MAGIC: raise FatalError('Invalid firmware image magic=0x%x' % magic) segments = segments if first_flash_mode != self.flash_mode: print('WARNING: Flash mode value in first header (0x%02x) disagrees with second (0x%02x). Using second value.' % (first_flash_mode, self.flash_mode)) if first_flash_size_freq != self.flash_size_freq: print('WARNING: Flash size/freq value in first header (0x%02x) disagrees with second (0x%02x). Using second value.' % (first_flash_size_freq, self.flash_size_freq)) if first_entrypoint != self.entrypoint: print('WARNING: Entrypoint address in first header (0x%08x) disagrees with second header (0x%08x). Using second value.' % (first_entrypoint, self.entrypoint)) for _ in range(segments): file_offs = load_file.tell() (offset, size) = struct.unpack('<II', load_file.read(8)) self.warn_if_unusual_segment(offset, size, is_irom_segment) segment_data = load_file.read(size) if len(segment_data) < size: raise FatalError('End of file reading segment 0x%x, length %d (actual length %d)' % (offset, size, len(segment_data))) segment = ImageSegment(offset, segment_data, file_offs) self.segments.append(segment) return segment align_file_position(load_file, 16) self.checksum = ord(load_file.read(1)) if len(self.segments) > 16: raise FatalError('Invalid segment count %d (max 16). Usually this indicates a linker script problem.' % len(self.segments)) </DeepExtract>
esptool
positive
def test_parse_collaboration(self): <DeepExtract> path = self.get_path('data/infoxmls/collaboration.xml') xml = read_file_contents(path) </DeepExtract> parse_app_metadata(xml, self.config.info_schema, self.config.pre_info_xslt, self.config.info_xslt)
def test_parse_collaboration(self): path = self.get_path('data/infoxmls/collaboration.xml') xml = read_file_contents(path) parse_app_metadata(xml, self.config.info_schema, self.config.pre_info_xslt, self.config.info_xslt)
appstore
positive
def printInstanceMethods(cls, showaddr=False, prefix='-'): <DeepExtract> tmpString = '\n unsigned int outCount;\n Method *methods = (Method *)class_copyMethodList((Class)$cls, &outCount);\n NSMutableArray *result = (id)[NSMutableArray array];\n \n for (int i = 0; i < outCount; i++) {\n NSMutableDictionary *m = (id)[NSMutableDictionary dictionary];\n\n SEL name = (SEL)method_getName(methods[i]);\n [m setObject:(id)NSStringFromSelector(name) forKey:@"name"];\n \n char * encoding = (char *)method_getTypeEncoding(methods[i]);\n [m setObject:(id)[NSString stringWithUTF8String:encoding] forKey:@"type_encoding"];\n \n NSMutableArray *types = (id)[NSMutableArray array];\n NSInteger args = (NSInteger)method_getNumberOfArguments(methods[i]);\n for (int idx = 0; idx < args; idx++) {\n char *type = (char *)method_copyArgumentType(methods[i], idx);\n [types addObject:(id)[NSString stringWithUTF8String:type]];\n }\n [m setObject:types forKey:@"parameters_type"];\n \n char *ret_type = (char *)method_copyReturnType(methods[i]);\n [m setObject:(id)[NSString stringWithUTF8String:ret_type] forKey:@"return_type"];\n \n long imp = (long)method_getImplementation(methods[i]);\n [m setObject:[NSNumber numberWithLongLong:imp] forKey:@"implementation"];\n \n [result addObject:m];\n }\n RETURN(result);\n ' command = string.Template(tmpString).substitute(cls=cls) methods = fb.evaluate(command) methods = [Method(m) for m in methods] </DeepExtract> if not methods: print('No methods were found') for m in methods: if showaddr: print(prefix + ' ' + m.prettyPrintString() + ' ' + str(m.imp)) else: print(prefix + ' ' + m.prettyPrintString())
def printInstanceMethods(cls, showaddr=False, prefix='-'): tmpString = '\n unsigned int outCount;\n Method *methods = (Method *)class_copyMethodList((Class)$cls, &outCount);\n NSMutableArray *result = (id)[NSMutableArray array];\n \n for (int i = 0; i < outCount; i++) {\n NSMutableDictionary *m = (id)[NSMutableDictionary dictionary];\n\n SEL name = (SEL)method_getName(methods[i]);\n [m setObject:(id)NSStringFromSelector(name) forKey:@"name"];\n \n char * encoding = (char *)method_getTypeEncoding(methods[i]);\n [m setObject:(id)[NSString stringWithUTF8String:encoding] forKey:@"type_encoding"];\n \n NSMutableArray *types = (id)[NSMutableArray array];\n NSInteger args = (NSInteger)method_getNumberOfArguments(methods[i]);\n for (int idx = 0; idx < args; idx++) {\n char *type = (char *)method_copyArgumentType(methods[i], idx);\n [types addObject:(id)[NSString stringWithUTF8String:type]];\n }\n [m setObject:types forKey:@"parameters_type"];\n \n char *ret_type = (char *)method_copyReturnType(methods[i]);\n [m setObject:(id)[NSString stringWithUTF8String:ret_type] forKey:@"return_type"];\n \n long imp = (long)method_getImplementation(methods[i]);\n [m setObject:[NSNumber numberWithLongLong:imp] forKey:@"implementation"];\n \n [result addObject:m];\n }\n RETURN(result);\n ' command = string.Template(tmpString).substitute(cls=cls) methods = fb.evaluate(command) methods = [Method(m) for m in methods] if not methods: print('No methods were found') for m in methods: if showaddr: print(prefix + ' ' + m.prettyPrintString() + ' ' + str(m.imp)) else: print(prefix + ' ' + m.prettyPrintString())
chisel
positive
def test_time_log(self): logger = get_logger(__name__) logger.handlers = [] @time_log(logger) def _my_timed_fn(): for _ in range(2): pass with LogCapture() as lc: <DeepExtract> for _ in range(2): pass </DeepExtract> logged_message = lc.records[-1].getMessage() self.assertTrue(re.match('_my_timed_fn function ran for [0-9]+ milliseconds', logged_message))
def test_time_log(self): logger = get_logger(__name__) logger.handlers = [] @time_log(logger) def _my_timed_fn(): for _ in range(2): pass with LogCapture() as lc: for _ in range(2): pass logged_message = lc.records[-1].getMessage() self.assertTrue(re.match('_my_timed_fn function ran for [0-9]+ milliseconds', logged_message))
darts
positive
def test_converter(tmpdir): converted_fp = Path(tmpdir, 'converted.asreview') copyfile(OLD_STATE_FP, converted_fp) unzipped_fp = Path(tmpdir, 'unzipped.asreview') with ZipFile(converted_fp) as zipobj: zipobj.extractall(unzipped_fp) converted_fp = unzipped_fp upgrade_asreview_project_file(converted_fp, from_version=0, to_version=1) <DeepExtract> with open(Path(Path(converted_fp, 'legacy', 'result.json').parent, 'labeled.json'), 'r') as file: labeled_json = json.load(file) old_labels = [x[1] for x in labeled_json] with open_state_legacy(Path(converted_fp, 'legacy', 'result.json')) as old_state: old_state_length = len(old_state._state_dict['labels']) data_hash = list(old_state._state_dict['data_properties'].keys())[0] old_feature_matrix = old_state.get_feature_matrix(data_hash) old_settings = old_state.settings.to_dict() with open_state(converted_fp) as new_state: new_record_ids = new_state.get_order_of_labeling().tolist() new_labels = new_state.get_labels().tolist() new_settings = new_state.settings.to_dict() new_project = ASReviewProject(converted_fp) feature_extraction_method = new_project.feature_matrices[0]['id'] new_feature_matrix = new_project.get_feature_matrix(feature_extraction_method) assert max(new_record_ids) < old_state_length assert old_labels == new_labels assert (old_feature_matrix != new_feature_matrix).nnz == 0 assert old_settings == new_settings </DeepExtract> assert is_converted_project(converted_fp) rollback_conversion(converted_fp) zipped_fp = make_archive(Path(tmpdir, 'zipped'), 'zip', converted_fp) zipped_fp = Path(zipped_fp).rename(Path(tmpdir, 'zipped.asreview')) state = state_from_file(zipped_fp)[zipped_fp.name] assert isinstance(state, JSONState)
def test_converter(tmpdir): converted_fp = Path(tmpdir, 'converted.asreview') copyfile(OLD_STATE_FP, converted_fp) unzipped_fp = Path(tmpdir, 'unzipped.asreview') with ZipFile(converted_fp) as zipobj: zipobj.extractall(unzipped_fp) converted_fp = unzipped_fp upgrade_asreview_project_file(converted_fp, from_version=0, to_version=1) with open(Path(Path(converted_fp, 'legacy', 'result.json').parent, 'labeled.json'), 'r') as file: labeled_json = json.load(file) old_labels = [x[1] for x in labeled_json] with open_state_legacy(Path(converted_fp, 'legacy', 'result.json')) as old_state: old_state_length = len(old_state._state_dict['labels']) data_hash = list(old_state._state_dict['data_properties'].keys())[0] old_feature_matrix = old_state.get_feature_matrix(data_hash) old_settings = old_state.settings.to_dict() with open_state(converted_fp) as new_state: new_record_ids = new_state.get_order_of_labeling().tolist() new_labels = new_state.get_labels().tolist() new_settings = new_state.settings.to_dict() new_project = ASReviewProject(converted_fp) feature_extraction_method = new_project.feature_matrices[0]['id'] new_feature_matrix = new_project.get_feature_matrix(feature_extraction_method) assert max(new_record_ids) < old_state_length assert old_labels == new_labels assert (old_feature_matrix != new_feature_matrix).nnz == 0 assert old_settings == new_settings assert is_converted_project(converted_fp) rollback_conversion(converted_fp) zipped_fp = make_archive(Path(tmpdir, 'zipped'), 'zip', converted_fp) zipped_fp = Path(zipped_fp).rename(Path(tmpdir, 'zipped.asreview')) state = state_from_file(zipped_fp)[zipped_fp.name] assert isinstance(state, JSONState)
asreview
positive
def lovasz_hinge_flat(self, logits, labels): """ Binary Lovasz hinge loss logits: [P] Variable, logits at each prediction (between -\\infty and +\\infty) labels: [P] Tensor, binary ground truth labels (0 or 1) ignore: label to ignore """ if len(labels) == 0: return logits.sum() * 0.0 signs = 2.0 * labels.float() - 1.0 errors = 1.0 - logits * Variable(signs) (errors_sorted, perm) = torch.sort(errors, dim=0, descending=True) perm = perm.data gt_sorted = labels[perm] <DeepExtract> p = len(gt_sorted) gts = gt_sorted.sum() intersection = gts - gt_sorted.float().cumsum(0) union = gts + (1 - gt_sorted.float()).cumsum(0) jaccard = 1.0 - intersection / union if p > 1: jaccard[1:p] = jaccard[1:p] - jaccard[0:-1] grad = jaccard </DeepExtract> loss = torch.dot(F.relu(errors_sorted), Variable(grad)) return loss
def lovasz_hinge_flat(self, logits, labels): """ Binary Lovasz hinge loss logits: [P] Variable, logits at each prediction (between -\\infty and +\\infty) labels: [P] Tensor, binary ground truth labels (0 or 1) ignore: label to ignore """ if len(labels) == 0: return logits.sum() * 0.0 signs = 2.0 * labels.float() - 1.0 errors = 1.0 - logits * Variable(signs) (errors_sorted, perm) = torch.sort(errors, dim=0, descending=True) perm = perm.data gt_sorted = labels[perm] p = len(gt_sorted) gts = gt_sorted.sum() intersection = gts - gt_sorted.float().cumsum(0) union = gts + (1 - gt_sorted.float()).cumsum(0) jaccard = 1.0 - intersection / union if p > 1: jaccard[1:p] = jaccard[1:p] - jaccard[0:-1] grad = jaccard loss = torch.dot(F.relu(errors_sorted), Variable(grad)) return loss
EmbedMask
positive
@admin.display(description='Photo') def thumbnail(self, obj): try: img_tag = '<img src="%s" width="200px"/>' % obj.photo.url except ValueError: return '' <DeepExtract> url = reverse('admin:example_app_blind_change', args=(obj.id,)) url = url </DeepExtract> return f'<a href="{url}">{img_tag}</a>'
@admin.display(description='Photo') def thumbnail(self, obj): try: img_tag = '<img src="%s" width="200px"/>' % obj.photo.url except ValueError: return '' url = reverse('admin:example_app_blind_change', args=(obj.id,)) url = url return f'<a href="{url}">{img_tag}</a>'
django-silk
positive
def CopyDigitalSignature(SignedSourceFile, UnsignedFile, SignedFile=None): """Extracts the digital signature from file SignedSourceFile and adds it to file UnsignedFile When SignedFile is not None, writes the modified file to SignedFile Returns the modified file as a PE file """ peSignedSource = pefile.PE(SignedSourceFile) address = peSignedSource.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY']].VirtualAddress size = peSignedSource.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY']].Size if address == 0: print('Error: source file not signed') return signature = peSignedSource.write()[address:] <DeepExtract> pe = pefile.PE(UnsignedFile) address = pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY']].VirtualAddress pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY']].VirtualAddress = 0 pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY']].Size = 0 if address != 0: peUnsignedFile = pefile.PE(data=pe.write()[0:address]) else: peUnsignedFile = pefile.PE(data=pe.write()) peUnsignedFile.OPTIONAL_HEADER.CheckSum = peUnsignedFile.generate_checksum() new_file_data = peUnsignedFile.write() if UnsignedFile: f = file(UnsignedFile, 'wb+') f.write(new_file_data) f.close() peUnsigned = new_file_data </DeepExtract> peSignedFileTemp = pefile.PE(data=''.join(list(peUnsigned) + list(signature))) peSignedFileTemp.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY']].VirtualAddress = len(peUnsigned) peSignedFileTemp.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY']].Size = size peSignedFile = pefile.PE(data=peSignedFileTemp.write()) peSignedFile.OPTIONAL_HEADER.CheckSum = peSignedFile.generate_checksum() new_file_data = peSignedFile.write() if SignedFile: f = file(SignedFile, 'wb+') f.write(new_file_data) f.close() return new_file_data
def CopyDigitalSignature(SignedSourceFile, UnsignedFile, SignedFile=None): """Extracts the digital signature from file SignedSourceFile and adds it to file UnsignedFile When SignedFile is not None, writes the modified file to SignedFile Returns the modified file as a PE file """ peSignedSource = pefile.PE(SignedSourceFile) address = peSignedSource.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY']].VirtualAddress size = peSignedSource.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY']].Size if address == 0: print('Error: source file not signed') return signature = peSignedSource.write()[address:] pe = pefile.PE(UnsignedFile) address = pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY']].VirtualAddress pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY']].VirtualAddress = 0 pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY']].Size = 0 if address != 0: peUnsignedFile = pefile.PE(data=pe.write()[0:address]) else: peUnsignedFile = pefile.PE(data=pe.write()) peUnsignedFile.OPTIONAL_HEADER.CheckSum = peUnsignedFile.generate_checksum() new_file_data = peUnsignedFile.write() if UnsignedFile: f = file(UnsignedFile, 'wb+') f.write(new_file_data) f.close() peUnsigned = new_file_data peSignedFileTemp = pefile.PE(data=''.join(list(peUnsigned) + list(signature))) peSignedFileTemp.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY']].VirtualAddress = len(peUnsigned) peSignedFileTemp.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY']].Size = size peSignedFile = pefile.PE(data=peSignedFileTemp.write()) peSignedFile.OPTIONAL_HEADER.CheckSum = peSignedFile.generate_checksum() new_file_data = peSignedFile.write() if SignedFile: f = file(SignedFile, 'wb+') f.write(new_file_data) f.close() return new_file_data
analyst-scripts
positive
def unit(): m = self.RE_UNIT.search(token) if m: idx = begin + m.start(2) <DeepExtract> tokens.append(token[:m.start(2)]) offsets.append((begin + offset, idx + offset)) </DeepExtract> <DeepExtract> tokens.append(m.group(2)) offsets.append((idx + offset, end + offset)) </DeepExtract> return True return False
def unit(): m = self.RE_UNIT.search(token) if m: idx = begin + m.start(2) tokens.append(token[:m.start(2)]) offsets.append((begin + offset, idx + offset)) tokens.append(m.group(2)) offsets.append((idx + offset, end + offset)) return True return False
elit
positive
def add_repo(self, name, url, **kw): gpg_url = kw.pop('gpg_url', None) if gpg_url: <DeepExtract> raise NotImplementedError() </DeepExtract> safe_filename = '%s.list' % name.replace(' ', '-') mode = 420 if urlparse(url).password: mode = 384 self.remote_conn.logger.info('Creating repo file with mode 0600 due to presence of password') self.remote_conn.remote_module.write_sources_list(url, self.remote_info.codename, safe_filename, mode) fqdn = urlparse(url).hostname self.remote_conn.remote_module.set_apt_priority(fqdn)
def add_repo(self, name, url, **kw): gpg_url = kw.pop('gpg_url', None) if gpg_url: raise NotImplementedError() safe_filename = '%s.list' % name.replace(' ', '-') mode = 420 if urlparse(url).password: mode = 384 self.remote_conn.logger.info('Creating repo file with mode 0600 due to presence of password') self.remote_conn.remote_module.write_sources_list(url, self.remote_info.codename, safe_filename, mode) fqdn = urlparse(url).hostname self.remote_conn.remote_module.set_apt_priority(fqdn)
ceph-deploy
positive
def decode_annos(frame, frame_id): """Decodes some meta data (e.g. calibration matrices, frame matrices).""" veh_to_global = np.array(frame.pose.transform) ref_pose = np.reshape(np.array(frame.pose.transform), [4, 4]) global_from_ref_rotation = ref_pose[:3, :3] <DeepExtract> objects = [] for (object_id, label) in enumerate(frame.laser_labels): category_label = label.type box = label.box speed = [label.metadata.speed_x, label.metadata.speed_y] accel = [label.metadata.accel_x, label.metadata.accel_y] num_lidar_points_in_box = label.num_lidar_points_in_box if num_lidar_points_in_box <= 0: combined_difficulty_level = 999 if label.detection_difficulty_level == 0: if num_lidar_points_in_box >= 5: combined_difficulty_level = 1 else: combined_difficulty_level = 2 else: combined_difficulty_level = label.detection_difficulty_level ref_velocity = global_vel_to_ref(speed, global_from_ref_rotation) objects.append({'id': object_id, 'name': label.id, 'label': category_label, 'box': np.array([box.center_x, box.center_y, box.center_z, box.length, box.width, box.height, ref_velocity[0], ref_velocity[1], box.heading], dtype=np.float32), 'num_points': num_lidar_points_in_box, 'detection_difficulty_level': label.detection_difficulty_level, 'combined_difficulty_level': combined_difficulty_level, 'global_speed': np.array(speed, dtype=np.float32), 'global_accel': np.array(accel, dtype=np.float32)}) objects = objects </DeepExtract> frame_name = '{scene_name}_{location}_{time_of_day}_{timestamp}'.format(scene_name=frame.context.name, location=frame.context.stats.location, time_of_day=frame.context.stats.time_of_day, timestamp=frame.timestamp_micros) annos = {'scene_name': frame.context.name, 'frame_name': frame_name, 'frame_id': frame_id, 'veh_to_global': veh_to_global, 'objects': objects} return annos
def decode_annos(frame, frame_id): """Decodes some meta data (e.g. calibration matrices, frame matrices).""" veh_to_global = np.array(frame.pose.transform) ref_pose = np.reshape(np.array(frame.pose.transform), [4, 4]) global_from_ref_rotation = ref_pose[:3, :3] objects = [] for (object_id, label) in enumerate(frame.laser_labels): category_label = label.type box = label.box speed = [label.metadata.speed_x, label.metadata.speed_y] accel = [label.metadata.accel_x, label.metadata.accel_y] num_lidar_points_in_box = label.num_lidar_points_in_box if num_lidar_points_in_box <= 0: combined_difficulty_level = 999 if label.detection_difficulty_level == 0: if num_lidar_points_in_box >= 5: combined_difficulty_level = 1 else: combined_difficulty_level = 2 else: combined_difficulty_level = label.detection_difficulty_level ref_velocity = global_vel_to_ref(speed, global_from_ref_rotation) objects.append({'id': object_id, 'name': label.id, 'label': category_label, 'box': np.array([box.center_x, box.center_y, box.center_z, box.length, box.width, box.height, ref_velocity[0], ref_velocity[1], box.heading], dtype=np.float32), 'num_points': num_lidar_points_in_box, 'detection_difficulty_level': label.detection_difficulty_level, 'combined_difficulty_level': combined_difficulty_level, 'global_speed': np.array(speed, dtype=np.float32), 'global_accel': np.array(accel, dtype=np.float32)}) objects = objects frame_name = '{scene_name}_{location}_{time_of_day}_{timestamp}'.format(scene_name=frame.context.name, location=frame.context.stats.location, time_of_day=frame.context.stats.time_of_day, timestamp=frame.timestamp_micros) annos = {'scene_name': frame.context.name, 'frame_name': frame_name, 'frame_id': frame_id, 'veh_to_global': veh_to_global, 'objects': objects} return annos
CenterPoint
positive
def clear_versionlock(): """A package can be locked to a specific version using a YUM/DNF versionlock plugin. Then, even if a newer version of a package is available, yum or dnf won't update it. That may cause a problem during the conversion as other RHEL packages may depend on a different version than is locked. Therefore, the Convert2RHEL utility clears all the locks to prevent a system conversion failure. DNF has been designed to be backwards compatible with YUM. So the file in which the version locks are defined for YUM works correctly even with DNF thanks to symlinks created by DNF. """ if os.path.isfile(_VERSIONLOCK_FILE_PATH) and os.path.getsize(_VERSIONLOCK_FILE_PATH) > 0: loggerinst.warning('YUM/DNF versionlock plugin is in use. It may cause the conversion to fail.') loggerinst.info('Upon continuing, we will clear all package version locks.') utils.ask_to_continue() versionlock_file.backup() loggerinst.info('Clearing package versions locks...') <DeepExtract> if ['clear'] is None: ['clear'] = [] cmd = ['yum', 'versionlock', '-y'] repos_to_disable = [] if isinstance(disable_repos, list): repos_to_disable = disable_repos else: repos_to_disable = tool_opts.disablerepo for repo in repos_to_disable: cmd.append('--disablerepo=%s' % repo) if set_releasever and system_info.releasever: cmd.append('--releasever=%s' % system_info.releasever) if system_info.version.major == 8: cmd.append('--setopt=module_platform_id=platform:el8') repos_to_enable = [] if isinstance(enable_repos, list): repos_to_enable = enable_repos else: repos_to_enable = system_info.get_enabled_rhel_repos() for repo in repos_to_enable: cmd.append('--enablerepo=%s' % repo) cmd.extend(['clear']) (stdout, returncode) = utils.run_subprocess(cmd, print_output=False) nothing_to_do_error_exists = stdout.endswith('Error: Nothing to do\n') if returncode == 1 and nothing_to_do_error_exists: loggerinst.debug('Yum has nothing to do. Ignoring.') returncode = 0 return (stdout, returncode) </DeepExtract> else: loggerinst.info('Usage of YUM/DNF versionlock plugin not detected.')
def clear_versionlock(): """A package can be locked to a specific version using a YUM/DNF versionlock plugin. Then, even if a newer version of a package is available, yum or dnf won't update it. That may cause a problem during the conversion as other RHEL packages may depend on a different version than is locked. Therefore, the Convert2RHEL utility clears all the locks to prevent a system conversion failure. DNF has been designed to be backwards compatible with YUM. So the file in which the version locks are defined for YUM works correctly even with DNF thanks to symlinks created by DNF. """ if os.path.isfile(_VERSIONLOCK_FILE_PATH) and os.path.getsize(_VERSIONLOCK_FILE_PATH) > 0: loggerinst.warning('YUM/DNF versionlock plugin is in use. It may cause the conversion to fail.') loggerinst.info('Upon continuing, we will clear all package version locks.') utils.ask_to_continue() versionlock_file.backup() loggerinst.info('Clearing package versions locks...') if ['clear'] is None: ['clear'] = [] cmd = ['yum', 'versionlock', '-y'] repos_to_disable = [] if isinstance(disable_repos, list): repos_to_disable = disable_repos else: repos_to_disable = tool_opts.disablerepo for repo in repos_to_disable: cmd.append('--disablerepo=%s' % repo) if set_releasever and system_info.releasever: cmd.append('--releasever=%s' % system_info.releasever) if system_info.version.major == 8: cmd.append('--setopt=module_platform_id=platform:el8') repos_to_enable = [] if isinstance(enable_repos, list): repos_to_enable = enable_repos else: repos_to_enable = system_info.get_enabled_rhel_repos() for repo in repos_to_enable: cmd.append('--enablerepo=%s' % repo) cmd.extend(['clear']) (stdout, returncode) = utils.run_subprocess(cmd, print_output=False) nothing_to_do_error_exists = stdout.endswith('Error: Nothing to do\n') if returncode == 1 and nothing_to_do_error_exists: loggerinst.debug('Yum has nothing to do. Ignoring.') returncode = 0 return (stdout, returncode) else: loggerinst.info('Usage of YUM/DNF versionlock plugin not detected.')
convert2rhel
positive
def test__bathroom_playing__unmute(self, given_that, when_new, assert_that, start_evening_mode): <DeepExtract> given_that.mock_functions_are_cleared() return lambda : when_new.time(hour=EVENING_HOUR) </DeepExtract> given_that.state_of(ID['bathroom']['speaker']).is_set_to('playing') when_new.motion_bathroom() <DeepExtract> assert_that('media_player/volume_set').was.called_with(entity_id=ID['bathroom']['speaker'], volume_level=BATHROOM_VOLUMES['regular']) </DeepExtract>
def test__bathroom_playing__unmute(self, given_that, when_new, assert_that, start_evening_mode): given_that.mock_functions_are_cleared() return lambda : when_new.time(hour=EVENING_HOUR) given_that.state_of(ID['bathroom']['speaker']).is_set_to('playing') when_new.motion_bathroom() assert_that('media_player/volume_set').was.called_with(entity_id=ID['bathroom']['speaker'], volume_level=BATHROOM_VOLUMES['regular']) </DeepExtract>
Appdaemon-Test-Framework
positive
def urlopen(self, method, url, redirect=True, **kw): """Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute.""" u = parse_url(url) if u.scheme == 'http': headers = kw.get('headers', self.headers) <DeepExtract> headers_ = {'Accept': '*/*'} netloc = parse_url(url).netloc if netloc: headers_['Host'] = netloc if headers: headers_.update(headers) kw['headers'] = headers_ </DeepExtract> return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
def urlopen(self, method, url, redirect=True, **kw): """Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute.""" u = parse_url(url) if u.scheme == 'http': headers = kw.get('headers', self.headers) headers_ = {'Accept': '*/*'} netloc = parse_url(url).netloc if netloc: headers_['Host'] = netloc if headers: headers_.update(headers) kw['headers'] = headers_ return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
cachewarmer
positive
def __init__(self, tmp_path: pathlib.Path, template_header: Dict, template_tree: DocsMarkdownNode, validate_yaml_header: bool, validate_md_body: bool, governed_section: Optional[str]=None): """Initialize markdown validator.""" self._validate_yaml_header = validate_yaml_header self._validate_md_body = validate_md_body self.governed_section = governed_section.strip(' ') if governed_section is not None else None self.template_header = template_header self.template_tree = template_tree self.template_path = tmp_path <DeepExtract> if TEMPLATE_VERSION_HEADER not in header.keys(): self.template_version = START_TEMPLATE_VERSION self.template_version = header[TEMPLATE_VERSION_HEADER] </DeepExtract> if self.template_version not in str(self.template_path): raise TrestleError(f'Version of the template {self.template_version} does not match the path {self.template_path}.' + f'Move the template to the folder {self.template_version}') if 'Version' in self.template_header.keys() and self.template_header['Version'] != self.template_version: raise TrestleError(f'Version does not match template-version in template: {self.template_path}.') self._ignore_headers = [] for key in self.template_header.keys(): if key.lower().startswith('x-trestle-'): self._ignore_headers.append(key.lower()) if key.lower() == 'x-trestle-ignore': for key2 in template_header['x-trestle-ignore']: self._ignore_headers.append(key2.lower())
def __init__(self, tmp_path: pathlib.Path, template_header: Dict, template_tree: DocsMarkdownNode, validate_yaml_header: bool, validate_md_body: bool, governed_section: Optional[str]=None): """Initialize markdown validator.""" self._validate_yaml_header = validate_yaml_header self._validate_md_body = validate_md_body self.governed_section = governed_section.strip(' ') if governed_section is not None else None self.template_header = template_header self.template_tree = template_tree self.template_path = tmp_path if TEMPLATE_VERSION_HEADER not in header.keys(): self.template_version = START_TEMPLATE_VERSION self.template_version = header[TEMPLATE_VERSION_HEADER] if self.template_version not in str(self.template_path): raise TrestleError(f'Version of the template {self.template_version} does not match the path {self.template_path}.' + f'Move the template to the folder {self.template_version}') if 'Version' in self.template_header.keys() and self.template_header['Version'] != self.template_version: raise TrestleError(f'Version does not match template-version in template: {self.template_path}.') self._ignore_headers = [] for key in self.template_header.keys(): if key.lower().startswith('x-trestle-'): self._ignore_headers.append(key.lower()) if key.lower() == 'x-trestle-ignore': for key2 in template_header['x-trestle-ignore']: self._ignore_headers.append(key2.lower())
compliance-trestle
positive
def load_haven_mat(folder_path: Union[str, Path]='resources/haven', used_assets: Optional[List[str]]=None, preload: bool=False, fill_used_empty_materials: bool=False, add_cp: Optional[Dict[str, Any]]=None, return_random_element: bool=False) -> Union[List[Material], Material]: """ Loads all specified haven textures from the given directory. :param folder_path: The path to the downloaded haven. :param used_assets: A list of all asset names, you want to use. The asset-name must not be typed in completely, only the beginning the name starts with. By default, all assets will be loaded, specified by an empty list or None. :param preload: If set true, only the material names are loaded and not the complete material. :param fill_used_empty_materials: If set true, the preloaded materials, which are used are now loaded completely. :param add_cp: A dictionary of materials and the respective properties. :param return_random_element: If this is True only a single Material is loaded and returned, if you want to sample many materials load them all with the preload option, use them and then fill the used empty materials instead of calling this function multiple times. :return a list of all loaded materials, if preload is active these materials do not contain any textures yet and have to be filled before rendering (by calling this function again, there is no need to save the prior returned list) or if return_random_element is True only a single Material is returned """ if add_cp is None: add_cp = {} if used_assets is None: used_assets = [] addon_utils.enable('node_wrangler') haven_folder = Path(resolve_path(str(folder_path))) if preload and fill_used_empty_materials: raise RuntimeError('Preload and fill used empty materials can not be done at the same time, check config!') if not haven_folder.exists(): raise FileNotFoundError(f'The given haven folder does not exist: {haven_folder}') if haven_folder.name != 'textures' and (haven_folder / 'textures').exists(): haven_folder /= 'textures' texture_names: List[str] = os.listdir(haven_folder) texture_names.sort() if not texture_names: raise FileNotFoundError(f'No texture folders found in {haven_folder}.') if used_assets: texture_names = [texture_name for texture_name in texture_names if any((texture_name.startswith(asset) for asset in used_assets))] if not texture_names: raise FileNotFoundError(f'No texture folders found in {haven_folder} for which used_assets can be meet: {used_assets}.') if return_random_element: texture_names = [random.choice(texture_names)] materials: List[Material] = [] for texture_name in texture_names: texture_folder_path = haven_folder / texture_name if not texture_folder_path.is_dir(): print(f'Ignoring {texture_folder_path}, must be a folder.') continue <DeepExtract> if isinstance(str(texture_folder_path), str): str(texture_folder_path) = Path(str(texture_folder_path)) texture_map_paths = [str(path.absolute()) for path in str(texture_folder_path).glob('*.jpg')] (color_path, color_identifier) = identify_base_color_image_path(texture_map_paths) if not color_path: texture_map_paths_by_type = None texture_map_types = _texture_map_identifiers.keys() texture_map_paths_by_type = {type: '' for type in texture_map_types} texture_map_paths_by_type['base color'] = color_path for type_val in texture_map_types: for identifier in _texture_map_identifiers[type_val]: texture_map_path_lowercase = color_path.replace(color_identifier, identifier).lower() for path in texture_map_paths: if path.lower() == texture_map_path_lowercase: texture_map_paths_by_type[type_val] = path break texture_map_paths_by_type = texture_map_paths_by_type </DeepExtract> if texture_map_paths_by_type is None: print(f'Ignoring {texture_name}, could not identify texture maps.') continue if fill_used_empty_materials: new_mat = MaterialLoaderUtility.find_cc_material_by_name(texture_name, add_cp) else: new_mat = MaterialLoaderUtility.create_new_cc_material(texture_name, add_cp) materials.append(Material(new_mat)) if preload: continue if fill_used_empty_materials and (not MaterialLoaderUtility.is_material_used(new_mat)): continue HavenMaterialLoader.create_material(new_mat, texture_map_paths_by_type['base color'], texture_map_paths_by_type['ambient occlusion'], texture_map_paths_by_type['specular'], texture_map_paths_by_type['roughness'], texture_map_paths_by_type['transparency'], texture_map_paths_by_type['normal'], texture_map_paths_by_type['displacement'], texture_map_paths_by_type['bump']) if return_random_element: if len(materials) != 1: raise RuntimeError(f'The amount of loaded materials is not one: {materials}, this should not happen!') return materials[0] return materials
def load_haven_mat(folder_path: Union[str, Path]='resources/haven', used_assets: Optional[List[str]]=None, preload: bool=False, fill_used_empty_materials: bool=False, add_cp: Optional[Dict[str, Any]]=None, return_random_element: bool=False) -> Union[List[Material], Material]: """ Loads all specified haven textures from the given directory. :param folder_path: The path to the downloaded haven. :param used_assets: A list of all asset names, you want to use. The asset-name must not be typed in completely, only the beginning the name starts with. By default, all assets will be loaded, specified by an empty list or None. :param preload: If set true, only the material names are loaded and not the complete material. :param fill_used_empty_materials: If set true, the preloaded materials, which are used are now loaded completely. :param add_cp: A dictionary of materials and the respective properties. :param return_random_element: If this is True only a single Material is loaded and returned, if you want to sample many materials load them all with the preload option, use them and then fill the used empty materials instead of calling this function multiple times. :return a list of all loaded materials, if preload is active these materials do not contain any textures yet and have to be filled before rendering (by calling this function again, there is no need to save the prior returned list) or if return_random_element is True only a single Material is returned """ if add_cp is None: add_cp = {} if used_assets is None: used_assets = [] addon_utils.enable('node_wrangler') haven_folder = Path(resolve_path(str(folder_path))) if preload and fill_used_empty_materials: raise RuntimeError('Preload and fill used empty materials can not be done at the same time, check config!') if not haven_folder.exists(): raise FileNotFoundError(f'The given haven folder does not exist: {haven_folder}') if haven_folder.name != 'textures' and (haven_folder / 'textures').exists(): haven_folder /= 'textures' texture_names: List[str] = os.listdir(haven_folder) texture_names.sort() if not texture_names: raise FileNotFoundError(f'No texture folders found in {haven_folder}.') if used_assets: texture_names = [texture_name for texture_name in texture_names if any((texture_name.startswith(asset) for asset in used_assets))] if not texture_names: raise FileNotFoundError(f'No texture folders found in {haven_folder} for which used_assets can be meet: {used_assets}.') if return_random_element: texture_names = [random.choice(texture_names)] materials: List[Material] = [] for texture_name in texture_names: texture_folder_path = haven_folder / texture_name if not texture_folder_path.is_dir(): print(f'Ignoring {texture_folder_path}, must be a folder.') continue if isinstance(str(texture_folder_path), str): str(texture_folder_path) = Path(str(texture_folder_path)) texture_map_paths = [str(path.absolute()) for path in str(texture_folder_path).glob('*.jpg')] (color_path, color_identifier) = identify_base_color_image_path(texture_map_paths) if not color_path: texture_map_paths_by_type = None texture_map_types = _texture_map_identifiers.keys() texture_map_paths_by_type = {type: '' for type in texture_map_types} texture_map_paths_by_type['base color'] = color_path for type_val in texture_map_types: for identifier in _texture_map_identifiers[type_val]: texture_map_path_lowercase = color_path.replace(color_identifier, identifier).lower() for path in texture_map_paths: if path.lower() == texture_map_path_lowercase: texture_map_paths_by_type[type_val] = path break texture_map_paths_by_type = texture_map_paths_by_type if texture_map_paths_by_type is None: print(f'Ignoring {texture_name}, could not identify texture maps.') continue if fill_used_empty_materials: new_mat = MaterialLoaderUtility.find_cc_material_by_name(texture_name, add_cp) else: new_mat = MaterialLoaderUtility.create_new_cc_material(texture_name, add_cp) materials.append(Material(new_mat)) if preload: continue if fill_used_empty_materials and (not MaterialLoaderUtility.is_material_used(new_mat)): continue HavenMaterialLoader.create_material(new_mat, texture_map_paths_by_type['base color'], texture_map_paths_by_type['ambient occlusion'], texture_map_paths_by_type['specular'], texture_map_paths_by_type['roughness'], texture_map_paths_by_type['transparency'], texture_map_paths_by_type['normal'], texture_map_paths_by_type['displacement'], texture_map_paths_by_type['bump']) if return_random_element: if len(materials) != 1: raise RuntimeError(f'The amount of loaded materials is not one: {materials}, this should not happen!') return materials[0] return materials
BlenderProc
positive
def cbPose(self, msg): <DeepExtract> self.vel = msg.twist.twist.linear </DeepExtract> q = msg.pose.pose.orientation self.psi = np.arctan2(2.0 * (q.w * q.z + q.x * q.y), 1 - 2 * (q.y * q.y + q.z * q.z)) self.pose = msg.pose <DeepExtract> marker = Marker() marker.header.stamp = rospy.Time.now() marker.header.frame_id = 'map' marker.ns = 'path_arrow' marker.id = 0 marker.type = marker.ARROW marker.action = marker.ADD marker.points.append(self.pose.pose.position) marker.points.append(self.desired_position.pose.position) marker.scale = Vector3(x=0.1, y=0.2, z=0.2) marker.color = ColorRGBA(b=1.0, a=1.0) marker.lifetime = rospy.Duration(1) self.pub_path_marker.publish(marker) </DeepExtract> v_p = msg.pose.pose.position v_g = self.sub_goal v_pg = np.array([v_g.x - v_p.x, v_g.y - v_p.y]) self.distance = np.linalg.norm(v_pg)
def cbPose(self, msg): self.vel = msg.twist.twist.linear q = msg.pose.pose.orientation self.psi = np.arctan2(2.0 * (q.w * q.z + q.x * q.y), 1 - 2 * (q.y * q.y + q.z * q.z)) self.pose = msg.pose marker = Marker() marker.header.stamp = rospy.Time.now() marker.header.frame_id = 'map' marker.ns = 'path_arrow' marker.id = 0 marker.type = marker.ARROW marker.action = marker.ADD marker.points.append(self.pose.pose.position) marker.points.append(self.desired_position.pose.position) marker.scale = Vector3(x=0.1, y=0.2, z=0.2) marker.color = ColorRGBA(b=1.0, a=1.0) marker.lifetime = rospy.Duration(1) self.pub_path_marker.publish(marker) v_p = msg.pose.pose.position v_g = self.sub_goal v_pg = np.array([v_g.x - v_p.x, v_g.y - v_p.y]) self.distance = np.linalg.norm(v_pg)
arena-rosnav
positive
def test_sample_shape(self): ed_flights = self.ed_flights_small() sample_ed_flights = ed_flights.sample(n=10, random_state=self.SEED) <DeepExtract> sample_pd_flights = self.pd_flights_small().loc[eland_to_pandas(sample_ed_flights).index, eland_to_pandas(sample_ed_flights).columns] sample_pd_flights = sample_pd_flights </DeepExtract> assert sample_pd_flights.shape == sample_ed_flights.shape
def test_sample_shape(self): ed_flights = self.ed_flights_small() sample_ed_flights = ed_flights.sample(n=10, random_state=self.SEED) sample_pd_flights = self.pd_flights_small().loc[eland_to_pandas(sample_ed_flights).index, eland_to_pandas(sample_ed_flights).columns] sample_pd_flights = sample_pd_flights assert sample_pd_flights.shape == sample_ed_flights.shape
eland
positive
def start(self, cmd, env_vars=None, working_dir=None): """ Start process and return immediately """ self.print_next_error_lines = 0 self.print_next_error_file = False env = QtCore.QProcessEnvironment.systemEnvironment() if env_vars: for key in env_vars: env.insert(key, env_vars[key]) <DeepExtract> if env.contains('APPIMAGE'): appdir = env.value('APPDIR') keys = env.keys() for k in keys: vals = env.value(k).split(':') newvals = '' for val in vals: if not val.startswith(appdir): newvals += val + ':' newvals = newvals.rstrip(':') if newvals: env.insert(k, newvals) else: env.remove(k) </DeepExtract> self.process.setProcessEnvironment(env) if working_dir: self.process.setWorkingDirectory(working_dir) if platform.system() == 'Windows': cmd = [os.path.join(FreeCAD.getHomePath(), 'bin', 'python.exe'), '-u', os.path.join(os.path.dirname(__file__), 'WindowsRunWrapper.py')] + cmd FreeCAD.Console.PrintLog('CfdConsoleProcess running command: {}\n'.format(cmd)) self.process.start(cmd[0], cmd[1:])
def start(self, cmd, env_vars=None, working_dir=None): """ Start process and return immediately """ self.print_next_error_lines = 0 self.print_next_error_file = False env = QtCore.QProcessEnvironment.systemEnvironment() if env_vars: for key in env_vars: env.insert(key, env_vars[key]) if env.contains('APPIMAGE'): appdir = env.value('APPDIR') keys = env.keys() for k in keys: vals = env.value(k).split(':') newvals = '' for val in vals: if not val.startswith(appdir): newvals += val + ':' newvals = newvals.rstrip(':') if newvals: env.insert(k, newvals) else: env.remove(k) self.process.setProcessEnvironment(env) if working_dir: self.process.setWorkingDirectory(working_dir) if platform.system() == 'Windows': cmd = [os.path.join(FreeCAD.getHomePath(), 'bin', 'python.exe'), '-u', os.path.join(os.path.dirname(__file__), 'WindowsRunWrapper.py')] + cmd FreeCAD.Console.PrintLog('CfdConsoleProcess running command: {}\n'.format(cmd)) self.process.start(cmd[0], cmd[1:])
CfdOF
positive
def export_graph(root, write): def export_edges(node): for e in node.edges.values(): write(e) <DeepExtract> for e in e.node.edges.values(): write(e) export_edges(e.node) </DeepExtract> <DeepExtract> for e in root.edges.values(): write(e) export_edges(e.node) </DeepExtract>
def export_graph(root, write): def export_edges(node): for e in node.edges.values(): write(e) for e in e.node.edges.values(): write(e) export_edges(e.node) for e in root.edges.values(): write(e) export_edges(e.node) </DeepExtract>
Coursera_Data_Structures_and_Algorithms_Specialization
positive
def get_climatology_variable(ds): """ Returns the variable describing climatology bounds if it exists. Climatology variables are similar to cell boundary variables that describe the climatology bounds. See Example 7.8 in CF 1.6 :param netCDF4.Dataset ds: An open netCDF4 Dataset :rtype: str or None """ <DeepExtract> for var in ds.variables: if getattr(ds.variables[var], 'axis', '') == 'T': time = var else: candidates = ds.get_variables_by_attributes(standard_name='time') if len(candidates) == 1: time = candidates[0].name else: for candidate in candidates: if candidate.dimensions == (candidate.name,): time = candidate.name time_variables = set(get_time_variables(ds)) coordinate_variables = set(get_coordinate_variables(ds)) if len(time_variables.intersection(coordinate_variables)) == 1: time = list(time_variables.intersection(coordinate_variables))[0] auxiliary_coordinates = set(get_auxiliary_coordinate_variables(ds)) if len(time_variables.intersection(auxiliary_coordinates)) == 1: time = list(time_variables.intersection(auxiliary_coordinates))[0] time = None </DeepExtract> if not time: return None if hasattr(ds.variables[time], 'climatology'): if ds.variables[time].climatology in ds.variables: return ds.variables[time].climatology return None
def get_climatology_variable(ds): """ Returns the variable describing climatology bounds if it exists. Climatology variables are similar to cell boundary variables that describe the climatology bounds. See Example 7.8 in CF 1.6 :param netCDF4.Dataset ds: An open netCDF4 Dataset :rtype: str or None """ for var in ds.variables: if getattr(ds.variables[var], 'axis', '') == 'T': time = var else: candidates = ds.get_variables_by_attributes(standard_name='time') if len(candidates) == 1: time = candidates[0].name else: for candidate in candidates: if candidate.dimensions == (candidate.name,): time = candidate.name time_variables = set(get_time_variables(ds)) coordinate_variables = set(get_coordinate_variables(ds)) if len(time_variables.intersection(coordinate_variables)) == 1: time = list(time_variables.intersection(coordinate_variables))[0] auxiliary_coordinates = set(get_auxiliary_coordinate_variables(ds)) if len(time_variables.intersection(auxiliary_coordinates)) == 1: time = list(time_variables.intersection(auxiliary_coordinates))[0] time = None if not time: return None if hasattr(ds.variables[time], 'climatology'): if ds.variables[time].climatology in ds.variables: return ds.variables[time].climatology return None
compliance-checker
positive
def _random_scale(self, results): if self.ratio_range is not None: <DeepExtract> assert isinstance(self.img_scale[0], tuple) and len(self.img_scale[0]) == 2 (min_ratio, max_ratio) = self.ratio_range assert min_ratio <= max_ratio ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio scale = (int(self.img_scale[0][0] * ratio), int(self.img_scale[0][1] * ratio)) (scale, scale_idx) = (scale, None) </DeepExtract> elif len(self.img_scale) == 1: (scale, scale_idx) = (self.img_scale[0], 0) elif self.multiscale_mode == 'range': <DeepExtract> assert mmcv.is_list_of(self.img_scale, tuple) and len(self.img_scale) == 2 img_scale_long = [max(s) for s in self.img_scale] img_scale_short = [min(s) for s in self.img_scale] long_edge = np.random.randint(min(img_scale_long), max(img_scale_long) + 1) short_edge = np.random.randint(min(img_scale_short), max(img_scale_short) + 1) img_scale = (long_edge, short_edge) (scale, scale_idx) = (img_scale, None) </DeepExtract> elif self.multiscale_mode == 'value': <DeepExtract> assert mmcv.is_list_of(self.img_scale, tuple) scale_idx = np.random.randint(len(self.img_scale)) img_scale = self.img_scale[scale_idx] (scale, scale_idx) = (img_scale, scale_idx) </DeepExtract> else: raise NotImplementedError results['scale'] = scale results['scale_idx'] = scale_idx
def _random_scale(self, results): if self.ratio_range is not None: assert isinstance(self.img_scale[0], tuple) and len(self.img_scale[0]) == 2 (min_ratio, max_ratio) = self.ratio_range assert min_ratio <= max_ratio ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio scale = (int(self.img_scale[0][0] * ratio), int(self.img_scale[0][1] * ratio)) (scale, scale_idx) = (scale, None) elif len(self.img_scale) == 1: (scale, scale_idx) = (self.img_scale[0], 0) elif self.multiscale_mode == 'range': assert mmcv.is_list_of(self.img_scale, tuple) and len(self.img_scale) == 2 img_scale_long = [max(s) for s in self.img_scale] img_scale_short = [min(s) for s in self.img_scale] long_edge = np.random.randint(min(img_scale_long), max(img_scale_long) + 1) short_edge = np.random.randint(min(img_scale_short), max(img_scale_short) + 1) img_scale = (long_edge, short_edge) (scale, scale_idx) = (img_scale, None) elif self.multiscale_mode == 'value': assert mmcv.is_list_of(self.img_scale, tuple) scale_idx = np.random.randint(len(self.img_scale)) img_scale = self.img_scale[scale_idx] (scale, scale_idx) = (img_scale, scale_idx) else: raise NotImplementedError results['scale'] = scale results['scale_idx'] = scale_idx
ACSL
positive
def dumps(self, reqs, project: RootDependency, content=None) -> str: if content: doc = tomlkit.parse(content) else: doc = tomlkit.document() if 'tool' not in doc: doc['tool'] = {'poetry': tomlkit.table()} elif 'poetry' not in doc['tool']: doc['tool']['poetry'] = tomlkit.table() section = doc['tool']['poetry'] section['name'] = project.raw_name for field in self._metafields: value = getattr(project, field) if isinstance(value, tuple): value = list(value) if not value: if field in section: del section[field] elif field not in section: section[field] = value elif section[field].value != value: section[field] = value for name in ('homepage', 'repository', 'documentation'): if name in project.links: section[name] = project.links[name] if project.authors: section['authors'] = [str(author) for author in project.authors] elif 'authors' in section: del section['authors'] if project.readme: section['readme'] = project.readme.path.name elif 'readme' in section: del section['readme'] <DeepExtract> if 'scripts' in section: scripts = {e.name for e in project.entrypoints if e.group == 'console_scripts'} for script_name in list(section['scripts']): if script_name not in scripts: del section['scripts'][script_name] for entrypoint in project.entrypoints: if entrypoint.group != 'console_scripts': continue if 'scripts' not in section: section['scripts'] = tomlkit.table() if entrypoint.extras: content = tomlkit.inline_table() content['callable'] = entrypoint.path content['extras'] = entrypoint.extras else: content = entrypoint.path section['scripts'][entrypoint.name] = content if 'plugins' in section: groups = defaultdict(set) for entrypoint in project.entrypoints: if entrypoint.group != 'console_scripts': groups[entrypoint.group].add(entrypoint.name) for (group_name, group_content) in section['plugins'].items(): if group_name not in groups: del section['plugins'][group_name] continue for script_name in group_content: if script_name not in groups[group_name]: del section['plugins'][group_name][script_name] for entrypoint in project.entrypoints: if entrypoint.group == 'console_scripts': continue if 'plugins' not in section: section['plugins'] = tomlkit.table() if entrypoint.group not in section['plugins']: section['plugins'][entrypoint.group] = tomlkit.table() section['plugins'][entrypoint.group][entrypoint.name] = entrypoint.path </DeepExtract> names_mapping = dict() for (section_name, is_dev) in [('dependencies', False), ('dev-dependencies', True)]: if section_name not in section: section[section_name] = tomlkit.table() continue names = {req.name for req in reqs if is_dev is req.is_dev} | {'python'} for name in dict(section[section_name]): normalized_name = canonicalize_name(name) names_mapping[normalized_name] = name if normalized_name not in names: del section[section_name][name] if section['dependencies'].get('python', '') != (project.python or '*'): section['dependencies']['python'] = str(project.python) or '*' for (section_name, is_dev) in [('dependencies', False), ('dev-dependencies', True)]: for req in reqs: if is_dev is not req.is_dev: continue raw_name = names_mapping.get(req.name, req.raw_name) old_spec = section[section_name].get(raw_name) if old_spec: old_dep = self._make_deps(root=RootDependency(), name=raw_name, content=old_spec, envs={'main'})[0] if req.same_dep(old_dep): continue <DeepExtract> result = tomlkit.inline_table() for (name, value) in req: if name in self.fields: if isinstance(value, tuple): value = list(value) result[name] = value if req.prereleases: result['allows-prereleases'] = True if 'version' not in result and 'git' not in result: result['version'] = '*' if tuple(result.value) == ('version',): section[section_name][raw_name] = result['version'] section[section_name][raw_name] = result </DeepExtract> if not section[section_name].value: del section[section_name] extras = defaultdict(list) for req in reqs: if req.is_main: for extra in req.main_envs: extras[extra].append(req.name) if req.is_dev: for extra in req.dev_envs: extras[extra].append(req.name) if extras: if 'extras' in section: for extra in section['extras']: if extra not in extras: del section['extras'][extra] else: section['extras'] = tomlkit.table() for (extra, deps) in extras.items(): if list(section['extras'].get(extra, [])) == deps: continue section['extras'][extra] = deps elif 'extras' in section: del section['extras'] if not project.dependencies: project.attach_dependencies([req.dep for req in reqs]) <DeepExtract> urls = dict() for repo in project.warehouses: if isinstance(repo, WarehouseLocalRepo): continue urls[repo.name] = repo.pretty_url added = [] sources = tomlkit.aot() if section.get('source'): if hasattr(section, 'item'): old_sources = section.item('source') else: old_sources = section['source'] for source in old_sources: if source['name'] in urls: if source['url'] != urls[source['name']]: source['url'] = urls[source['name']] sources.append(source) added.append(source['name']) for (name, url) in sorted(urls.items()): if name not in added: source = tomlkit.table() source['name'] = name source['url'] = url sources.append(source) section['source'] = sources if not section['source']: del section['source'] </DeepExtract> return tomlkit.dumps(doc).rstrip() + '\n'
def dumps(self, reqs, project: RootDependency, content=None) -> str: if content: doc = tomlkit.parse(content) else: doc = tomlkit.document() if 'tool' not in doc: doc['tool'] = {'poetry': tomlkit.table()} elif 'poetry' not in doc['tool']: doc['tool']['poetry'] = tomlkit.table() section = doc['tool']['poetry'] section['name'] = project.raw_name for field in self._metafields: value = getattr(project, field) if isinstance(value, tuple): value = list(value) if not value: if field in section: del section[field] elif field not in section: section[field] = value elif section[field].value != value: section[field] = value for name in ('homepage', 'repository', 'documentation'): if name in project.links: section[name] = project.links[name] if project.authors: section['authors'] = [str(author) for author in project.authors] elif 'authors' in section: del section['authors'] if project.readme: section['readme'] = project.readme.path.name elif 'readme' in section: del section['readme'] if 'scripts' in section: scripts = {e.name for e in project.entrypoints if e.group == 'console_scripts'} for script_name in list(section['scripts']): if script_name not in scripts: del section['scripts'][script_name] for entrypoint in project.entrypoints: if entrypoint.group != 'console_scripts': continue if 'scripts' not in section: section['scripts'] = tomlkit.table() if entrypoint.extras: content = tomlkit.inline_table() content['callable'] = entrypoint.path content['extras'] = entrypoint.extras else: content = entrypoint.path section['scripts'][entrypoint.name] = content if 'plugins' in section: groups = defaultdict(set) for entrypoint in project.entrypoints: if entrypoint.group != 'console_scripts': groups[entrypoint.group].add(entrypoint.name) for (group_name, group_content) in section['plugins'].items(): if group_name not in groups: del section['plugins'][group_name] continue for script_name in group_content: if script_name not in groups[group_name]: del section['plugins'][group_name][script_name] for entrypoint in project.entrypoints: if entrypoint.group == 'console_scripts': continue if 'plugins' not in section: section['plugins'] = tomlkit.table() if entrypoint.group not in section['plugins']: section['plugins'][entrypoint.group] = tomlkit.table() section['plugins'][entrypoint.group][entrypoint.name] = entrypoint.path names_mapping = dict() for (section_name, is_dev) in [('dependencies', False), ('dev-dependencies', True)]: if section_name not in section: section[section_name] = tomlkit.table() continue names = {req.name for req in reqs if is_dev is req.is_dev} | {'python'} for name in dict(section[section_name]): normalized_name = canonicalize_name(name) names_mapping[normalized_name] = name if normalized_name not in names: del section[section_name][name] if section['dependencies'].get('python', '') != (project.python or '*'): section['dependencies']['python'] = str(project.python) or '*' for (section_name, is_dev) in [('dependencies', False), ('dev-dependencies', True)]: for req in reqs: if is_dev is not req.is_dev: continue raw_name = names_mapping.get(req.name, req.raw_name) old_spec = section[section_name].get(raw_name) if old_spec: old_dep = self._make_deps(root=RootDependency(), name=raw_name, content=old_spec, envs={'main'})[0] if req.same_dep(old_dep): continue result = tomlkit.inline_table() for (name, value) in req: if name in self.fields: if isinstance(value, tuple): value = list(value) result[name] = value if req.prereleases: result['allows-prereleases'] = True if 'version' not in result and 'git' not in result: result['version'] = '*' if tuple(result.value) == ('version',): section[section_name][raw_name] = result['version'] section[section_name][raw_name] = result if not section[section_name].value: del section[section_name] extras = defaultdict(list) for req in reqs: if req.is_main: for extra in req.main_envs: extras[extra].append(req.name) if req.is_dev: for extra in req.dev_envs: extras[extra].append(req.name) if extras: if 'extras' in section: for extra in section['extras']: if extra not in extras: del section['extras'][extra] else: section['extras'] = tomlkit.table() for (extra, deps) in extras.items(): if list(section['extras'].get(extra, [])) == deps: continue section['extras'][extra] = deps elif 'extras' in section: del section['extras'] if not project.dependencies: project.attach_dependencies([req.dep for req in reqs]) urls = dict() for repo in project.warehouses: if isinstance(repo, WarehouseLocalRepo): continue urls[repo.name] = repo.pretty_url added = [] sources = tomlkit.aot() if section.get('source'): if hasattr(section, 'item'): old_sources = section.item('source') else: old_sources = section['source'] for source in old_sources: if source['name'] in urls: if source['url'] != urls[source['name']]: source['url'] = urls[source['name']] sources.append(source) added.append(source['name']) for (name, url) in sorted(urls.items()): if name not in added: source = tomlkit.table() source['name'] = name source['url'] = url sources.append(source) section['source'] = sources if not section['source']: del section['source'] return tomlkit.dumps(doc).rstrip() + '\n'
dephell
positive
@classmethod def from_existing(cls, train_job_id, train_run_id='latest', client=None): """Create a :class:`ModelPipeline` object from existing model IDs Parameters ---------- train_job_id : int The ID of the CivisML job in the Civis Platform train_run_id : int or string, optional Location of the model run, either * an explicit run ID, * "latest" : The most recent run * "active" : The run designated by the training job's "active build" parameter client : :class:`~civis.APIClient`, optional If not provided, an :class:`~civis.APIClient` object will be created from the :envvar:`CIVIS_API_KEY`. Returns ------- :class:`~civis.ml.ModelPipeline` A :class:`~civis.ml.ModelPipeline` which refers to a previously-trained model Examples -------- >>> from civis.ml import ModelPipeline >>> model = ModelPipeline.from_existing(job_id) >>> model.train_result_.metrics['roc_auc'] 0.843 """ train_job_id = int(train_job_id) if client is None: client = APIClient() <DeepExtract> try: train_run_id = int(train_run_id) except ValueError: container = client.scripts.get_containers(int(train_job_id)) if train_run_id == 'active': train_run_id = container.arguments.get('ACTIVE_BUILD', find_one(container.params, name='ACTIVE_BUILD'))['default'] if train_run_id == 'latest': train_run_id = container.last_run.id try: train_run_id = int(train_run_id) except Exception as exc: msg = 'Please provide valid train_run_id! Needs to be integer corresponding to a training run ID or one of "active" or "latest".' raise ValueError(msg) from exc </DeepExtract> try: fut = ModelFuture(train_job_id, train_run_id, client=client) container = client.scripts.get_containers(train_job_id) except CivisAPIError as api_err: if api_err.status_code == 404: msg = 'There is no Civis Platform job with script ID {} and run ID {}!'.format(train_job_id, train_run_id) raise ValueError(msg) from api_err raise args = container.arguments model = args.get('MODEL', args.get('WORKFLOW')) dependent_variable = args['TARGET_COLUMN'].split() primary_key = args.get('PRIMARY_KEY') parameters = json.loads(args.get('PARAMS', '{}')) cross_validation_parameters = json.loads(args.get('CVPARAMS', '{}')) calibration = args.get('CALIBRATION') excluded_columns = args.get('EXCLUDE_COLS', None) if excluded_columns: excluded_columns = excluded_columns.split() cpu_requested = args.get('REQUIRED_CPU') memory_requested = args.get('REQUIRED_MEMORY') disk_requested = args.get('REQUIRED_DISK_SPACE') name = container.name if name.endswith(' Train'): name = name[:-len(' Train')] notifications = {camel_to_snake(key): val for (key, val) in container.notifications.items()} dependencies = args.get('DEPENDENCIES', None) if dependencies: dependencies = dependencies.split() git_token_name = args.get('GIT_CRED', None) if git_token_name: git_token_name = client.credentials.get(git_token_name).name klass = cls(model=model, dependent_variable=dependent_variable, primary_key=primary_key, model_name=name, parameters=parameters, cross_validation_parameters=cross_validation_parameters, calibration=calibration, excluded_columns=excluded_columns, client=client, cpu_requested=cpu_requested, disk_requested=disk_requested, memory_requested=memory_requested, notifications=notifications, dependencies=dependencies, git_token_name=git_token_name, verbose=args.get('DEBUG', False)) klass.train_result_ = fut template_id = int(container['from_template_id']) ids = find_one(_get_template_ids_all_versions(client).values(), lambda ids: ids['training'] == template_id or ids['registration'] == template_id) p_id = ids['prediction'] klass.predict_template_id = p_id return klass
@classmethod def from_existing(cls, train_job_id, train_run_id='latest', client=None): """Create a :class:`ModelPipeline` object from existing model IDs Parameters ---------- train_job_id : int The ID of the CivisML job in the Civis Platform train_run_id : int or string, optional Location of the model run, either * an explicit run ID, * "latest" : The most recent run * "active" : The run designated by the training job's "active build" parameter client : :class:`~civis.APIClient`, optional If not provided, an :class:`~civis.APIClient` object will be created from the :envvar:`CIVIS_API_KEY`. Returns ------- :class:`~civis.ml.ModelPipeline` A :class:`~civis.ml.ModelPipeline` which refers to a previously-trained model Examples -------- >>> from civis.ml import ModelPipeline >>> model = ModelPipeline.from_existing(job_id) >>> model.train_result_.metrics['roc_auc'] 0.843 """ train_job_id = int(train_job_id) if client is None: client = APIClient() try: train_run_id = int(train_run_id) except ValueError: container = client.scripts.get_containers(int(train_job_id)) if train_run_id == 'active': train_run_id = container.arguments.get('ACTIVE_BUILD', find_one(container.params, name='ACTIVE_BUILD'))['default'] if train_run_id == 'latest': train_run_id = container.last_run.id try: train_run_id = int(train_run_id) except Exception as exc: msg = 'Please provide valid train_run_id! Needs to be integer corresponding to a training run ID or one of "active" or "latest".' raise ValueError(msg) from exc try: fut = ModelFuture(train_job_id, train_run_id, client=client) container = client.scripts.get_containers(train_job_id) except CivisAPIError as api_err: if api_err.status_code == 404: msg = 'There is no Civis Platform job with script ID {} and run ID {}!'.format(train_job_id, train_run_id) raise ValueError(msg) from api_err raise args = container.arguments model = args.get('MODEL', args.get('WORKFLOW')) dependent_variable = args['TARGET_COLUMN'].split() primary_key = args.get('PRIMARY_KEY') parameters = json.loads(args.get('PARAMS', '{}')) cross_validation_parameters = json.loads(args.get('CVPARAMS', '{}')) calibration = args.get('CALIBRATION') excluded_columns = args.get('EXCLUDE_COLS', None) if excluded_columns: excluded_columns = excluded_columns.split() cpu_requested = args.get('REQUIRED_CPU') memory_requested = args.get('REQUIRED_MEMORY') disk_requested = args.get('REQUIRED_DISK_SPACE') name = container.name if name.endswith(' Train'): name = name[:-len(' Train')] notifications = {camel_to_snake(key): val for (key, val) in container.notifications.items()} dependencies = args.get('DEPENDENCIES', None) if dependencies: dependencies = dependencies.split() git_token_name = args.get('GIT_CRED', None) if git_token_name: git_token_name = client.credentials.get(git_token_name).name klass = cls(model=model, dependent_variable=dependent_variable, primary_key=primary_key, model_name=name, parameters=parameters, cross_validation_parameters=cross_validation_parameters, calibration=calibration, excluded_columns=excluded_columns, client=client, cpu_requested=cpu_requested, disk_requested=disk_requested, memory_requested=memory_requested, notifications=notifications, dependencies=dependencies, git_token_name=git_token_name, verbose=args.get('DEBUG', False)) klass.train_result_ = fut template_id = int(container['from_template_id']) ids = find_one(_get_template_ids_all_versions(client).values(), lambda ids: ids['training'] == template_id or ids['registration'] == template_id) p_id = ids['prediction'] klass.predict_template_id = p_id return klass
civis-python
positive
def purge_threads(self): while len(self._threads) > self.max_threads: <DeepExtract> sel_thread_id = list(self._threads.keys())[0] first_thread = self._threads[sel_thread_id] timestamp = first_thread['timestamp'] for (thread_id, thread) in self._threads.items(): if thread['timestamp'] < timestamp: timestamp = thread['timestamp'] sel_thread_id = thread_id last_thread = sel_thread_id </DeepExtract> try: self._threads.pop(last_thread) except KeyError: pass
def purge_threads(self): while len(self._threads) > self.max_threads: sel_thread_id = list(self._threads.keys())[0] first_thread = self._threads[sel_thread_id] timestamp = first_thread['timestamp'] for (thread_id, thread) in self._threads.items(): if thread['timestamp'] < timestamp: timestamp = thread['timestamp'] sel_thread_id = thread_id last_thread = sel_thread_id try: self._threads.pop(last_thread) except KeyError: pass
DarkWallet
positive
def evaluate_masks(json_dataset, all_boxes, all_segms, output_dir, use_salt=True, cleanup=False): res_file = os.path.join(output_dir, 'segmentations_' + json_dataset.name + '_results') if use_salt: res_file += '_{}'.format(str(uuid.uuid4())) res_file += '.json' <DeepExtract> results = [] for (cls_ind, cls) in enumerate(json_dataset.classes): if cls == '__background__': continue if cls_ind >= len(all_boxes): break cat_id = json_dataset.category_to_id_map[cls] results.extend(_coco_segms_results_one_category(json_dataset, all_boxes[cls_ind], all_segms[cls_ind], cat_id)) logger.info('Writing segmentation results json to: {}'.format(os.path.abspath(res_file))) with open(res_file, 'w') as fid: json.dump(results, fid) </DeepExtract> if json_dataset.name.find('test') == -1: <DeepExtract> coco_dt = json_dataset.COCO.loadRes(str(res_file)) coco_eval = COCOeval(json_dataset.COCO, coco_dt, 'segm') coco_eval.evaluate() coco_eval.accumulate() _log_detection_eval_metrics(json_dataset, coco_eval) eval_file = os.path.join(output_dir, 'segmentation_results.pkl') save_object(coco_eval, eval_file) logger.info('Wrote json eval results to: {}'.format(eval_file)) coco_eval = coco_eval </DeepExtract> else: logger.warning('{} eval ignored as annotations are undisclosed on test: {} ignored'.format('Segmentation', json_dataset.name)) coco_eval = None if cleanup: os.remove(res_file) return coco_eval
def evaluate_masks(json_dataset, all_boxes, all_segms, output_dir, use_salt=True, cleanup=False): res_file = os.path.join(output_dir, 'segmentations_' + json_dataset.name + '_results') if use_salt: res_file += '_{}'.format(str(uuid.uuid4())) res_file += '.json' results = [] for (cls_ind, cls) in enumerate(json_dataset.classes): if cls == '__background__': continue if cls_ind >= len(all_boxes): break cat_id = json_dataset.category_to_id_map[cls] results.extend(_coco_segms_results_one_category(json_dataset, all_boxes[cls_ind], all_segms[cls_ind], cat_id)) logger.info('Writing segmentation results json to: {}'.format(os.path.abspath(res_file))) with open(res_file, 'w') as fid: json.dump(results, fid) if json_dataset.name.find('test') == -1: coco_dt = json_dataset.COCO.loadRes(str(res_file)) coco_eval = COCOeval(json_dataset.COCO, coco_dt, 'segm') coco_eval.evaluate() coco_eval.accumulate() _log_detection_eval_metrics(json_dataset, coco_eval) eval_file = os.path.join(output_dir, 'segmentation_results.pkl') save_object(coco_eval, eval_file) logger.info('Wrote json eval results to: {}'.format(eval_file)) coco_eval = coco_eval else: logger.warning('{} eval ignored as annotations are undisclosed on test: {} ignored'.format('Segmentation', json_dataset.name)) coco_eval = None if cleanup: os.remove(res_file) return coco_eval
Clustered-Object-Detection-in-Aerial-Image
positive
def handleMatch(self, m): el = util.etree.Element('img') src_parts = m.group(9).split() if src_parts: src = src_parts[0] if src[0] == '<' and src[-1] == '>': src = src[1:-1] el.set('src', self.sanitize_url(self.unescape(src))) else: el.set('src', '') if len(src_parts) > 1: el.set('title', dequote(self.unescape(' '.join(src_parts[1:])))) if self.markdown.enable_attributes: <DeepExtract> def attributeCallback(match): el.set(match.group(1), match.group(2).replace('\n', ' ')) truealt = ATTR_RE.sub(attributeCallback, m.group(2)) </DeepExtract> else: truealt = m.group(2) el.set('alt', self.unescape(truealt)) return el
def handleMatch(self, m): el = util.etree.Element('img') src_parts = m.group(9).split() if src_parts: src = src_parts[0] if src[0] == '<' and src[-1] == '>': src = src[1:-1] el.set('src', self.sanitize_url(self.unescape(src))) else: el.set('src', '') if len(src_parts) > 1: el.set('title', dequote(self.unescape(' '.join(src_parts[1:])))) if self.markdown.enable_attributes: def attributeCallback(match): el.set(match.group(1), match.group(2).replace('\n', ' ')) truealt = ATTR_RE.sub(attributeCallback, m.group(2)) else: truealt = m.group(2) el.set('alt', self.unescape(truealt)) return el
appengine-gcs-blobstore-python
positive
def test_with(): <DeepExtract> np.random.seed(0) data = {} for (k, v) in kwargs.items(): if not isinstance(v, tuple): v = (v, 0.0) (dt, null_ct) = (v[0], v[1]) if dt is int: s = np.random.randint(10, size=30) elif dt is bool: s = np.where(np.random.randint(2, size=30), True, False) elif dt is float: s = np.random.rand(30) elif dt is str: r = [f'ssssss{x}' for x in range(10)] c = np.random.randint(10, size=30) s = np.array([r[x] for x in c]) elif dt is pd.StringDtype: r = [f'ssssss{x}' for x in range(10)] c = np.random.randint(10, size=30) s = np.array([r[x] for x in c]) s = pd.array(s, dtype='string') elif dt is datetime: rt = [datetime(2020, 1, 1) + timedelta(days=x) for x in range(10)] c = np.random.randint(10, size=30) s = np.array([rt[x] for x in c]) else: raise NotImplementedError ps = pd.Series(s) if null_ct > 0: idx = np.random.choice(30, null_ct, replace=False).tolist() ps[idx] = None data[k] = ps (int, 10) = pd.DataFrame(data) </DeepExtract> <DeepExtract> np.random.seed(0) data = {} for (k, v) in kwargs.items(): if not isinstance(v, tuple): v = (v, 0.0) (dt, null_ct) = (v[0], v[1]) if dt is int: s = np.random.randint(10, size=80) elif dt is bool: s = np.where(np.random.randint(2, size=80), True, False) elif dt is float: s = np.random.rand(80) elif dt is str: r = [f'ssssss{x}' for x in range(10)] c = np.random.randint(10, size=80) s = np.array([r[x] for x in c]) elif dt is pd.StringDtype: r = [f'ssssss{x}' for x in range(10)] c = np.random.randint(10, size=80) s = np.array([r[x] for x in c]) s = pd.array(s, dtype='string') elif dt is datetime: rt = [datetime(2020, 1, 1) + timedelta(days=x) for x in range(10)] c = np.random.randint(10, size=80) s = np.array([rt[x] for x in c]) else: raise NotImplementedError ps = pd.Series(s) if null_ct > 0: idx = np.random.choice(80, null_ct, replace=False).tolist() ps[idx] = None data[k] = ps b = pd.DataFrame(data) </DeepExtract> <DeepExtract> c = Context() engine = sqlite3.connect(':memory:') for (name, df) in dfs.items(): c.create_table(name, df) df.to_sql(name, engine, index=False) dask_result = c.sql('\n WITH\n aa AS (\n SELECT a AS aa, b AS bb FROM a\n ),\n c AS (\n SELECT aa-1 AS aa, bb FROM aa\n )\n SELECT * FROM c UNION SELECT * FROM b\n ORDER BY aa NULLS FIRST, bb NULLS FIRST\n ').reset_index(drop=True) sqlite_result = pd.read_sql('\n WITH\n aa AS (\n SELECT a AS aa, b AS bb FROM a\n ),\n c AS (\n SELECT aa-1 AS aa, bb FROM aa\n )\n SELECT * FROM c UNION SELECT * FROM b\n ORDER BY aa NULLS FIRST, bb NULLS FIRST\n ', engine).reset_index(drop=True) dask_result = cast_datetime_to_string(dask_result) dask_result = dask_result.fillna(np.NaN) sqlite_result = sqlite_result.fillna(np.NaN) assert_eq(dask_result, sqlite_result, check_dtype=False, check_index=check_index) </DeepExtract>
def test_with(): np.random.seed(0) data = {} for (k, v) in kwargs.items(): if not isinstance(v, tuple): v = (v, 0.0) (dt, null_ct) = (v[0], v[1]) if dt is int: s = np.random.randint(10, size=30) elif dt is bool: s = np.where(np.random.randint(2, size=30), True, False) elif dt is float: s = np.random.rand(30) elif dt is str: r = [f'ssssss{x}' for x in range(10)] c = np.random.randint(10, size=30) s = np.array([r[x] for x in c]) elif dt is pd.StringDtype: r = [f'ssssss{x}' for x in range(10)] c = np.random.randint(10, size=30) s = np.array([r[x] for x in c]) s = pd.array(s, dtype='string') elif dt is datetime: rt = [datetime(2020, 1, 1) + timedelta(days=x) for x in range(10)] c = np.random.randint(10, size=30) s = np.array([rt[x] for x in c]) else: raise NotImplementedError ps = pd.Series(s) if null_ct > 0: idx = np.random.choice(30, null_ct, replace=False).tolist() ps[idx] = None data[k] = ps (int, 10) = pd.DataFrame(data) np.random.seed(0) data = {} for (k, v) in kwargs.items(): if not isinstance(v, tuple): v = (v, 0.0) (dt, null_ct) = (v[0], v[1]) if dt is int: s = np.random.randint(10, size=80) elif dt is bool: s = np.where(np.random.randint(2, size=80), True, False) elif dt is float: s = np.random.rand(80) elif dt is str: r = [f'ssssss{x}' for x in range(10)] c = np.random.randint(10, size=80) s = np.array([r[x] for x in c]) elif dt is pd.StringDtype: r = [f'ssssss{x}' for x in range(10)] c = np.random.randint(10, size=80) s = np.array([r[x] for x in c]) s = pd.array(s, dtype='string') elif dt is datetime: rt = [datetime(2020, 1, 1) + timedelta(days=x) for x in range(10)] c = np.random.randint(10, size=80) s = np.array([rt[x] for x in c]) else: raise NotImplementedError ps = pd.Series(s) if null_ct > 0: idx = np.random.choice(80, null_ct, replace=False).tolist() ps[idx] = None data[k] = ps b = pd.DataFrame(data) c = Context() engine = sqlite3.connect(':memory:') for (name, df) in dfs.items(): c.create_table(name, df) df.to_sql(name, engine, index=False) dask_result = c.sql('\n WITH\n aa AS (\n SELECT a AS aa, b AS bb FROM a\n ),\n c AS (\n SELECT aa-1 AS aa, bb FROM aa\n )\n SELECT * FROM c UNION SELECT * FROM b\n ORDER BY aa NULLS FIRST, bb NULLS FIRST\n ').reset_index(drop=True) sqlite_result = pd.read_sql('\n WITH\n aa AS (\n SELECT a AS aa, b AS bb FROM a\n ),\n c AS (\n SELECT aa-1 AS aa, bb FROM aa\n )\n SELECT * FROM c UNION SELECT * FROM b\n ORDER BY aa NULLS FIRST, bb NULLS FIRST\n ', engine).reset_index(drop=True) dask_result = cast_datetime_to_string(dask_result) dask_result = dask_result.fillna(np.NaN) sqlite_result = sqlite_result.fillna(np.NaN) assert_eq(dask_result, sqlite_result, check_dtype=False, check_index=check_index) </DeepExtract>
dask-sql
positive
def test_detail_update_update_and_success(admin_client): """Tests that plan list update and success message works as expected.""" <DeepExtract> plan = models.SubscriptionPlan.objects.create(plan_name=plan_name, plan_description=plan_description) </DeepExtract> <DeepExtract> plan_list = models.PlanList.objects.create(title=title) </DeepExtract> detail = models.PlanListDetail.objects.create(plan=plan, plan_list=plan_list) detail_count = models.PlanListDetail.objects.all().count() response = admin_client.post(reverse('dfs_plan_list_detail_update', kwargs={'plan_list_id': plan_list.id, 'plan_list_detail_id': detail.id}), {'plan': plan.id, 'plan_list': plan_list.id, 'order': 1, 'html_content': '<b>Test</b>'}, follow=True) messages = list(get_messages(response.wsgi_request)) assert models.PlanListDetail.objects.all().count() == detail_count assert models.PlanListDetail.objects.get(id=plan_list.id).html_content == '<b>Test</b>' assert messages[0].tags == 'success' assert messages[0].message == 'Plan list details successfully updated'
def test_detail_update_update_and_success(admin_client): """Tests that plan list update and success message works as expected.""" plan = models.SubscriptionPlan.objects.create(plan_name=plan_name, plan_description=plan_description) plan_list = models.PlanList.objects.create(title=title) detail = models.PlanListDetail.objects.create(plan=plan, plan_list=plan_list) detail_count = models.PlanListDetail.objects.all().count() response = admin_client.post(reverse('dfs_plan_list_detail_update', kwargs={'plan_list_id': plan_list.id, 'plan_list_detail_id': detail.id}), {'plan': plan.id, 'plan_list': plan_list.id, 'order': 1, 'html_content': '<b>Test</b>'}, follow=True) messages = list(get_messages(response.wsgi_request)) assert models.PlanListDetail.objects.all().count() == detail_count assert models.PlanListDetail.objects.get(id=plan_list.id).html_content == '<b>Test</b>' assert messages[0].tags == 'success' assert messages[0].message == 'Plan list details successfully updated'
django-flexible-subscriptions
positive
def testFindUndeclaredDictVariables(self): <DeepExtract> self.assertItemsEqual(['foo.bar', 'baz'], variablehandler.GetVariableAttributes(_TEMPLATE)) </DeepExtract> <DeepExtract> self.assertItemsEqual(['foo.bar'], variablehandler.GetVariableAttributes(_TEMPLATE_WITH_ASSIGNMENT)) </DeepExtract> <DeepExtract> self.assertItemsEqual(['foo.bar'], variablehandler.GetVariableAttributes(_TEMPLATE_WITH_FILTER)) </DeepExtract> <DeepExtract> self.assertItemsEqual(['foo.bar'], variablehandler.GetVariableAttributes(_TEMPLATE_WITH_FOR)) </DeepExtract> <DeepExtract> self.assertItemsEqual(['foo.bar', 'foo.baz'], variablehandler.GetVariableAttributes(_TEMPLATE_WITH_IF)) </DeepExtract> <DeepExtract> self.assertItemsEqual(['foo.bar.baz.qux.quux'], variablehandler.GetVariableAttributes(_TEMPLATE_WITH_MANY_ATTRIBUTES)) </DeepExtract> <DeepExtract> self.assertItemsEqual(['foo.bar.baz.qux.quux', 'foo.bar.ball', 'goo'], variablehandler.GetVariableAttributes(_TEMPLATE_WITH_MANY_ATTRIBUTES2)) </DeepExtract> with self.assertRaises(Exception): variablehandler.GetVariableAttributes(' and {% endif %}')
def testFindUndeclaredDictVariables(self): self.assertItemsEqual(['foo.bar', 'baz'], variablehandler.GetVariableAttributes(_TEMPLATE)) self.assertItemsEqual(['foo.bar'], variablehandler.GetVariableAttributes(_TEMPLATE_WITH_ASSIGNMENT)) self.assertItemsEqual(['foo.bar'], variablehandler.GetVariableAttributes(_TEMPLATE_WITH_FILTER)) self.assertItemsEqual(['foo.bar'], variablehandler.GetVariableAttributes(_TEMPLATE_WITH_FOR)) self.assertItemsEqual(['foo.bar', 'foo.baz'], variablehandler.GetVariableAttributes(_TEMPLATE_WITH_IF)) self.assertItemsEqual(['foo.bar.baz.qux.quux'], variablehandler.GetVariableAttributes(_TEMPLATE_WITH_MANY_ATTRIBUTES)) self.assertItemsEqual(['foo.bar.baz.qux.quux', 'foo.bar.ball', 'goo'], variablehandler.GetVariableAttributes(_TEMPLATE_WITH_MANY_ATTRIBUTES2)) with self.assertRaises(Exception): variablehandler.GetVariableAttributes(' and {% endif %}')
Data-Pipeline
positive
def test_deposit_delete_permissions(app, test_records_data, login_user, test_users): """Test deposit delete with HTTP DELETE.""" with app.app_context(): admin = test_users['admin'] def test_delete(deposit, status, user=None): with app.test_client() as client: if user is not None: login_user(user, client) headers = [('Accept', 'application/json')] request_res = client.delete(url_for('b2share_deposit_rest.b2dep_item', pid_value=deposit.pid.pid_value), headers=headers) assert request_res.status_code == status creator = create_user('creator') non_creator = create_user('non-creator') deposit = create_deposit(test_records_data[0], creator) <DeepExtract> with app.test_client() as client: if user is not None: login_user(user, client) headers = [('Accept', 'application/json')] request_res = client.delete(url_for('b2share_deposit_rest.b2dep_item', pid_value=deposit.pid.pid_value), headers=headers) assert request_res.status_code == 401 </DeepExtract> deposit.submit() <DeepExtract> with app.test_client() as client: if user is not None: login_user(user, client) headers = [('Accept', 'application/json')] request_res = client.delete(url_for('b2share_deposit_rest.b2dep_item', pid_value=deposit.pid.pid_value), headers=headers) assert request_res.status_code == 401 </DeepExtract> deposit.publish() <DeepExtract> with app.test_client() as client: if user is not None: login_user(user, client) headers = [('Accept', 'application/json')] request_res = client.delete(url_for('b2share_deposit_rest.b2dep_item', pid_value=deposit.pid.pid_value), headers=headers) assert request_res.status_code == 401 </DeepExtract> deposit = create_deposit(test_records_data[0], creator) <DeepExtract> with app.test_client() as client: if non_creator is not None: login_user(non_creator, client) headers = [('Accept', 'application/json')] request_res = client.delete(url_for('b2share_deposit_rest.b2dep_item', pid_value=deposit.pid.pid_value), headers=headers) assert request_res.status_code == 403 </DeepExtract> deposit.submit() <DeepExtract> with app.test_client() as client: if non_creator is not None: login_user(non_creator, client) headers = [('Accept', 'application/json')] request_res = client.delete(url_for('b2share_deposit_rest.b2dep_item', pid_value=deposit.pid.pid_value), headers=headers) assert request_res.status_code == 403 </DeepExtract> deposit.publish() <DeepExtract> with app.test_client() as client: if non_creator is not None: login_user(non_creator, client) headers = [('Accept', 'application/json')] request_res = client.delete(url_for('b2share_deposit_rest.b2dep_item', pid_value=deposit.pid.pid_value), headers=headers) assert request_res.status_code == 403 </DeepExtract> deposit = create_deposit(test_records_data[0], creator) <DeepExtract> with app.test_client() as client: if creator is not None: login_user(creator, client) headers = [('Accept', 'application/json')] request_res = client.delete(url_for('b2share_deposit_rest.b2dep_item', pid_value=deposit.pid.pid_value), headers=headers) assert request_res.status_code == 204 </DeepExtract> deposit = create_deposit(test_records_data[0], creator) deposit.submit() <DeepExtract> with app.test_client() as client: if creator is not None: login_user(creator, client) headers = [('Accept', 'application/json')] request_res = client.delete(url_for('b2share_deposit_rest.b2dep_item', pid_value=deposit.pid.pid_value), headers=headers) assert request_res.status_code == 204 </DeepExtract> deposit = create_deposit(test_records_data[0], creator) deposit.submit() deposit.publish() <DeepExtract> with app.test_client() as client: if creator is not None: login_user(creator, client) headers = [('Accept', 'application/json')] request_res = client.delete(url_for('b2share_deposit_rest.b2dep_item', pid_value=deposit.pid.pid_value), headers=headers) assert request_res.status_code == 403 </DeepExtract> deposit = create_deposit(test_records_data[0], creator) <DeepExtract> with app.test_client() as client: if admin is not None: login_user(admin, client) headers = [('Accept', 'application/json')] request_res = client.delete(url_for('b2share_deposit_rest.b2dep_item', pid_value=deposit.pid.pid_value), headers=headers) assert request_res.status_code == 204 </DeepExtract> deposit = create_deposit(test_records_data[0], creator) deposit.submit() <DeepExtract> with app.test_client() as client: if admin is not None: login_user(admin, client) headers = [('Accept', 'application/json')] request_res = client.delete(url_for('b2share_deposit_rest.b2dep_item', pid_value=deposit.pid.pid_value), headers=headers) assert request_res.status_code == 204 </DeepExtract> deposit = create_deposit(test_records_data[0], creator) deposit.submit() deposit.publish() <DeepExtract> with app.test_client() as client: if admin is not None: login_user(admin, client) headers = [('Accept', 'application/json')] request_res = client.delete(url_for('b2share_deposit_rest.b2dep_item', pid_value=deposit.pid.pid_value), headers=headers) assert request_res.status_code == 204 </DeepExtract>
def test_deposit_delete_permissions(app, test_records_data, login_user, test_users): """Test deposit delete with HTTP DELETE.""" with app.app_context(): admin = test_users['admin'] def test_delete(deposit, status, user=None): with app.test_client() as client: if user is not None: login_user(user, client) headers = [('Accept', 'application/json')] request_res = client.delete(url_for('b2share_deposit_rest.b2dep_item', pid_value=deposit.pid.pid_value), headers=headers) assert request_res.status_code == status creator = create_user('creator') non_creator = create_user('non-creator') deposit = create_deposit(test_records_data[0], creator) with app.test_client() as client: if user is not None: login_user(user, client) headers = [('Accept', 'application/json')] request_res = client.delete(url_for('b2share_deposit_rest.b2dep_item', pid_value=deposit.pid.pid_value), headers=headers) assert request_res.status_code == 401 deposit.submit() with app.test_client() as client: if user is not None: login_user(user, client) headers = [('Accept', 'application/json')] request_res = client.delete(url_for('b2share_deposit_rest.b2dep_item', pid_value=deposit.pid.pid_value), headers=headers) assert request_res.status_code == 401 deposit.publish() with app.test_client() as client: if user is not None: login_user(user, client) headers = [('Accept', 'application/json')] request_res = client.delete(url_for('b2share_deposit_rest.b2dep_item', pid_value=deposit.pid.pid_value), headers=headers) assert request_res.status_code == 401 deposit = create_deposit(test_records_data[0], creator) with app.test_client() as client: if non_creator is not None: login_user(non_creator, client) headers = [('Accept', 'application/json')] request_res = client.delete(url_for('b2share_deposit_rest.b2dep_item', pid_value=deposit.pid.pid_value), headers=headers) assert request_res.status_code == 403 deposit.submit() with app.test_client() as client: if non_creator is not None: login_user(non_creator, client) headers = [('Accept', 'application/json')] request_res = client.delete(url_for('b2share_deposit_rest.b2dep_item', pid_value=deposit.pid.pid_value), headers=headers) assert request_res.status_code == 403 deposit.publish() with app.test_client() as client: if non_creator is not None: login_user(non_creator, client) headers = [('Accept', 'application/json')] request_res = client.delete(url_for('b2share_deposit_rest.b2dep_item', pid_value=deposit.pid.pid_value), headers=headers) assert request_res.status_code == 403 deposit = create_deposit(test_records_data[0], creator) with app.test_client() as client: if creator is not None: login_user(creator, client) headers = [('Accept', 'application/json')] request_res = client.delete(url_for('b2share_deposit_rest.b2dep_item', pid_value=deposit.pid.pid_value), headers=headers) assert request_res.status_code == 204 deposit = create_deposit(test_records_data[0], creator) deposit.submit() with app.test_client() as client: if creator is not None: login_user(creator, client) headers = [('Accept', 'application/json')] request_res = client.delete(url_for('b2share_deposit_rest.b2dep_item', pid_value=deposit.pid.pid_value), headers=headers) assert request_res.status_code == 204 deposit = create_deposit(test_records_data[0], creator) deposit.submit() deposit.publish() with app.test_client() as client: if creator is not None: login_user(creator, client) headers = [('Accept', 'application/json')] request_res = client.delete(url_for('b2share_deposit_rest.b2dep_item', pid_value=deposit.pid.pid_value), headers=headers) assert request_res.status_code == 403 deposit = create_deposit(test_records_data[0], creator) with app.test_client() as client: if admin is not None: login_user(admin, client) headers = [('Accept', 'application/json')] request_res = client.delete(url_for('b2share_deposit_rest.b2dep_item', pid_value=deposit.pid.pid_value), headers=headers) assert request_res.status_code == 204 deposit = create_deposit(test_records_data[0], creator) deposit.submit() with app.test_client() as client: if admin is not None: login_user(admin, client) headers = [('Accept', 'application/json')] request_res = client.delete(url_for('b2share_deposit_rest.b2dep_item', pid_value=deposit.pid.pid_value), headers=headers) assert request_res.status_code == 204 deposit = create_deposit(test_records_data[0], creator) deposit.submit() deposit.publish() with app.test_client() as client: if admin is not None: login_user(admin, client) headers = [('Accept', 'application/json')] request_res = client.delete(url_for('b2share_deposit_rest.b2dep_item', pid_value=deposit.pid.pid_value), headers=headers) assert request_res.status_code == 204 </DeepExtract>
b2share
positive
def reorganize_records(records): def _parse(lines, is_train=True): (time, step, loss, top1, top5, ppl, bits) = ([], [], [], [], [], [], []) for line in lines: time.append(line['time']) step.append(line['local_index'] if is_train else line['epoch']) loss.append(line['loss']) top1.append(line['top1'] if 'top1' in line else 0) top5.append(line['top5'] if 'top5' in line else 0) ppl.append(line['ppl'] if 'ppl' in line else 0) bits.append(line['n_bits_to_transmit'] if is_train else 0) return (time, step, loss, top1, top5, ppl, bits) (tr_records, te_records, te_avg_records) = records['single_records'] te_records = [record for record in te_records] <DeepExtract> (time, step, loss, top1, top5, ppl, bits) = ([], [], [], [], [], [], []) for line in tr_records: time.append(line['time']) step.append(line['local_index'] if True else line['epoch']) loss.append(line['loss']) top1.append(line['top1'] if 'top1' in line else 0) top5.append(line['top5'] if 'top5' in line else 0) ppl.append(line['ppl'] if 'ppl' in line else 0) bits.append(line['n_bits_to_transmit'] if True else 0) (tr_time, tr_step, tr_loss, tr_top1, tr_top5, tr_ppl, tr_MB) = (time, step, loss, top1, top5, ppl, bits) </DeepExtract> <DeepExtract> (time, step, loss, top1, top5, ppl, bits) = ([], [], [], [], [], [], []) for line in te_records: time.append(line['time']) step.append(line['local_index'] if False else line['epoch']) loss.append(line['loss']) top1.append(line['top1'] if 'top1' in line else 0) top5.append(line['top5'] if 'top5' in line else 0) ppl.append(line['ppl'] if 'ppl' in line else 0) bits.append(line['n_bits_to_transmit'] if False else 0) (te_time, te_epoch, te_loss, te_top1, te_top5, te_ppl, _) = (time, step, loss, top1, top5, ppl, bits) </DeepExtract> te_avg_perf = [0] + [record['best_perf'] for record in te_avg_records] return {'tr_time': tr_time, 'tr_MB': tr_MB, 'tr_loss': tr_loss, 'tr_top1': tr_top1, 'tr_top5': tr_top5, 'tr_ppl': tr_ppl, 'te_time': te_time, 'te_step': te_epoch, 'te_loss': te_loss, 'te_top1': te_top1, 'te_top1_upon': [max(te_top1[:idx]) for idx in range(1, 1 + len(te_top1))], 'te_top5': te_top5, 'te_top5_upon': [max(te_top5[:idx]) for idx in range(1, 1 + len(te_top5))], 'te_ppl': te_ppl, 'te_ppl_upon': [max(te_ppl[:idx]) for idx in range(1, 1 + len(te_ppl))], 'te_avg_perf': te_avg_perf, 'te_avg_perf_upon': [max(te_avg_perf[:idx]) for idx in range(1, 1 + len(te_avg_perf))]}
def reorganize_records(records): def _parse(lines, is_train=True): (time, step, loss, top1, top5, ppl, bits) = ([], [], [], [], [], [], []) for line in lines: time.append(line['time']) step.append(line['local_index'] if is_train else line['epoch']) loss.append(line['loss']) top1.append(line['top1'] if 'top1' in line else 0) top5.append(line['top5'] if 'top5' in line else 0) ppl.append(line['ppl'] if 'ppl' in line else 0) bits.append(line['n_bits_to_transmit'] if is_train else 0) return (time, step, loss, top1, top5, ppl, bits) (tr_records, te_records, te_avg_records) = records['single_records'] te_records = [record for record in te_records] (time, step, loss, top1, top5, ppl, bits) = ([], [], [], [], [], [], []) for line in tr_records: time.append(line['time']) step.append(line['local_index'] if True else line['epoch']) loss.append(line['loss']) top1.append(line['top1'] if 'top1' in line else 0) top5.append(line['top5'] if 'top5' in line else 0) ppl.append(line['ppl'] if 'ppl' in line else 0) bits.append(line['n_bits_to_transmit'] if True else 0) (tr_time, tr_step, tr_loss, tr_top1, tr_top5, tr_ppl, tr_MB) = (time, step, loss, top1, top5, ppl, bits) (time, step, loss, top1, top5, ppl, bits) = ([], [], [], [], [], [], []) for line in te_records: time.append(line['time']) step.append(line['local_index'] if False else line['epoch']) loss.append(line['loss']) top1.append(line['top1'] if 'top1' in line else 0) top5.append(line['top5'] if 'top5' in line else 0) ppl.append(line['ppl'] if 'ppl' in line else 0) bits.append(line['n_bits_to_transmit'] if False else 0) (te_time, te_epoch, te_loss, te_top1, te_top5, te_ppl, _) = (time, step, loss, top1, top5, ppl, bits) te_avg_perf = [0] + [record['best_perf'] for record in te_avg_records] return {'tr_time': tr_time, 'tr_MB': tr_MB, 'tr_loss': tr_loss, 'tr_top1': tr_top1, 'tr_top5': tr_top5, 'tr_ppl': tr_ppl, 'te_time': te_time, 'te_step': te_epoch, 'te_loss': te_loss, 'te_top1': te_top1, 'te_top1_upon': [max(te_top1[:idx]) for idx in range(1, 1 + len(te_top1))], 'te_top5': te_top5, 'te_top5_upon': [max(te_top5[:idx]) for idx in range(1, 1 + len(te_top5))], 'te_ppl': te_ppl, 'te_ppl_upon': [max(te_ppl[:idx]) for idx in range(1, 1 + len(te_ppl))], 'te_avg_perf': te_avg_perf, 'te_avg_perf_upon': [max(te_avg_perf[:idx]) for idx in range(1, 1 + len(te_avg_perf))]}
ChocoSGD
positive
def check_function_decl(self, visitor, func): if func.body.block_items is None: return 0 if tc is None: return 0 ilvl = 0 for b in func.body.block_items: <DeepExtract> stmb = 0 for st in sub_stmt: if isinstance(b, st): stmb = 1 if not stmb: return 0 stms = [] stms_expr = [] if hasattr(b, 'iftrue') and b.iftrue is not None: stms.append(b.iftrue) stms_expr.append('iftrue') if hasattr(b, 'iffalse') and b.iffalse is not None: stms.append(b.iffalse) stms_expr.append('iffalse') if hasattr(b, 'stmt') and b.stmt is not None: stms.append(b.stmt) stms_expr.append('stmt') pos = -1 for stm1 in stms: pos += 1 if self.stmt_parse(stm1, b, ilvl + 1 + 1, stms_expr[pos]): continue try: for stm in stm1: if self.stmt_parse(stm, b, ilvl + 1 + 1, stms_expr[pos]): continue line_index = stm.coord.line - 1 line_ = tc[line_index] s = len(line_) - len(line_.lstrip()) line = line_index + 1 + self.header_lines + (1 if self.header_lines > 0 else 0) ilvl_t = ilvl + 1 if isinstance(b, If) and isinstance(None, If) and None.iffalse: if hasattr(None.iffalse, 'iftrue') and stm1 == None.iffalse.iftrue: ilvl_t -= 1 elif hasattr(None.iffalse, 'iffalse') and stm1 == None.iffalse.iffalse: ilvl_t -= 1 if ilvl_t > self.get_config()['max_branches']: BuErrors.print_error(self.path, self.file_name, line, self.get_check_level(), self.get_check_id(), self.message) except Exception: pass return 1 </DeepExtract> return 0
def check_function_decl(self, visitor, func): if func.body.block_items is None: return 0 if tc is None: return 0 ilvl = 0 for b in func.body.block_items: stmb = 0 for st in sub_stmt: if isinstance(b, st): stmb = 1 if not stmb: return 0 stms = [] stms_expr = [] if hasattr(b, 'iftrue') and b.iftrue is not None: stms.append(b.iftrue) stms_expr.append('iftrue') if hasattr(b, 'iffalse') and b.iffalse is not None: stms.append(b.iffalse) stms_expr.append('iffalse') if hasattr(b, 'stmt') and b.stmt is not None: stms.append(b.stmt) stms_expr.append('stmt') pos = -1 for stm1 in stms: pos += 1 if self.stmt_parse(stm1, b, ilvl + 1 + 1, stms_expr[pos]): continue try: for stm in stm1: if self.stmt_parse(stm, b, ilvl + 1 + 1, stms_expr[pos]): continue line_index = stm.coord.line - 1 line_ = tc[line_index] s = len(line_) - len(line_.lstrip()) line = line_index + 1 + self.header_lines + (1 if self.header_lines > 0 else 0) ilvl_t = ilvl + 1 if isinstance(b, If) and isinstance(None, If) and None.iffalse: if hasattr(None.iffalse, 'iftrue') and stm1 == None.iffalse.iftrue: ilvl_t -= 1 elif hasattr(None.iffalse, 'iffalse') and stm1 == None.iffalse.iffalse: ilvl_t -= 1 if ilvl_t > self.get_config()['max_branches']: BuErrors.print_error(self.path, self.file_name, line, self.get_check_level(), self.get_check_id(), self.message) except Exception: pass return 1 return 0
Bubulle-Norminette
positive
def inference(ckpt, inference_input_file, inference_output_file, hparams, num_workers=1, jobid=0, scope=None): """Perform translation.""" if hparams.inference_indices: assert num_workers == 1 if not hparams.attention: model_creator = nmt_model.Model elif hparams.attention_architecture == 'standard': model_creator = attention_model.AttentionModel elif hparams.attention_architecture in ['gnmt', 'gnmt_v2']: model_creator = gnmt_model.GNMTModel else: raise ValueError('Unknown model architecture') infer_model = model_helper.create_infer_model(model_creator, hparams, scope) inference_context_file = None if hparams.ctx is not None: inference_context_file = '%s.%s' % (inference_input_file, hparams.ctx) if num_workers == 1: <DeepExtract> output_infer = inference_output_file infer_data = load_data(inference_input_file, hparams) if inference_context_file is not None: infer_context = load_data(inference_context_file, hparams) else: infer_context = None infer_feed_dict = {infer_model.src_placeholder: infer_data, infer_model.batch_size_placeholder: hparams.infer_batch_size} if infer_context is not None: infer_feed_dict[infer_model.ctx_placeholder] = infer_context with tf.Session(graph=infer_model.graph, config=utils.get_config_proto()) as sess: loaded_infer_model = model_helper.load_model(infer_model.model, ckpt, sess, 'infer') sess.run(infer_model.iterator.initializer, feed_dict=infer_feed_dict) utils.print_out('# Start decoding') if hparams.inference_indices: _decode_inference_indices(loaded_infer_model, sess, output_infer=output_infer, output_infer_summary_prefix=output_infer, inference_indices=hparams.inference_indices, tgt_eos=hparams.eos, subword_option=hparams.subword_option) else: nmt_utils.decode_and_evaluate('infer', loaded_infer_model, sess, output_infer, ref_file=None, metrics=hparams.metrics, subword_option=hparams.subword_option, beam_width=hparams.beam_width, tgt_eos=hparams.eos, hparams=hparams, num_translations_per_input=hparams.num_translations_per_input) </DeepExtract> else: <DeepExtract> assert num_workers > 1 final_output_infer = inference_output_file output_infer = '%s_%d' % (inference_output_file, jobid) output_infer_done = '%s_done_%d' % (inference_output_file, jobid) infer_data = load_data(inference_input_file, hparams) if inference_context_file is not None: infer_context = load_data(inference_context_file, hparams) else: infer_context = None total_load = len(infer_data) load_per_worker = int((total_load - 1) / num_workers) + 1 start_position = jobid * load_per_worker end_position = min(start_position + load_per_worker, total_load) infer_data = infer_data[start_position:end_position] if infer_context is not None: infer_context = infer_context[start_position:end_position] infer_feed_dict = {infer_model.src_placeholder: infer_data, infer_model.batch_size_placeholder: hparams.infer_batch_size} if infer_context is not None: infer_feed_dict[infer_model.ctx_placeholder] = infer_context with tf.Session(graph=infer_model.graph, config=utils.get_config_proto()) as sess: loaded_infer_model = model_helper.load_model(infer_model.model, ckpt, sess, 'infer') sess.run(infer_model.iterator.initializer, feed_dict=infer_feed_dict) utils.print_out('# Start decoding') nmt_utils.decode_and_evaluate('infer', loaded_infer_model, sess, output_infer, ref_file=None, metrics=hparams.metrics, subword_option=hparams.subword_option, beam_width=hparams.beam_width, tgt_eos=hparams.eos, hparams=hparams, num_translations_per_input=hparams.num_translations_per_input) tf.gfile.Rename(output_infer, output_infer_done, overwrite=True) if jobid != 0: return with codecs.getwriter('utf-8')(tf.gfile.GFile(final_output_infer, mode='wb')) as final_f: for worker_id in range(num_workers): worker_infer_done = '%s_done_%d' % (inference_output_file, worker_id) while not tf.gfile.Exists(worker_infer_done): utils.print_out(' waitting job %d to complete.' % worker_id) time.sleep(10) with codecs.getreader('utf-8')(tf.gfile.GFile(worker_infer_done, mode='rb')) as f: for translation in f: final_f.write('%s' % translation) for worker_id in range(num_workers): worker_infer_done = '%s_done_%d' % (inference_output_file, worker_id) tf.gfile.Remove(worker_infer_done) </DeepExtract>
def inference(ckpt, inference_input_file, inference_output_file, hparams, num_workers=1, jobid=0, scope=None): """Perform translation.""" if hparams.inference_indices: assert num_workers == 1 if not hparams.attention: model_creator = nmt_model.Model elif hparams.attention_architecture == 'standard': model_creator = attention_model.AttentionModel elif hparams.attention_architecture in ['gnmt', 'gnmt_v2']: model_creator = gnmt_model.GNMTModel else: raise ValueError('Unknown model architecture') infer_model = model_helper.create_infer_model(model_creator, hparams, scope) inference_context_file = None if hparams.ctx is not None: inference_context_file = '%s.%s' % (inference_input_file, hparams.ctx) if num_workers == 1: output_infer = inference_output_file infer_data = load_data(inference_input_file, hparams) if inference_context_file is not None: infer_context = load_data(inference_context_file, hparams) else: infer_context = None infer_feed_dict = {infer_model.src_placeholder: infer_data, infer_model.batch_size_placeholder: hparams.infer_batch_size} if infer_context is not None: infer_feed_dict[infer_model.ctx_placeholder] = infer_context with tf.Session(graph=infer_model.graph, config=utils.get_config_proto()) as sess: loaded_infer_model = model_helper.load_model(infer_model.model, ckpt, sess, 'infer') sess.run(infer_model.iterator.initializer, feed_dict=infer_feed_dict) utils.print_out('# Start decoding') if hparams.inference_indices: _decode_inference_indices(loaded_infer_model, sess, output_infer=output_infer, output_infer_summary_prefix=output_infer, inference_indices=hparams.inference_indices, tgt_eos=hparams.eos, subword_option=hparams.subword_option) else: nmt_utils.decode_and_evaluate('infer', loaded_infer_model, sess, output_infer, ref_file=None, metrics=hparams.metrics, subword_option=hparams.subword_option, beam_width=hparams.beam_width, tgt_eos=hparams.eos, hparams=hparams, num_translations_per_input=hparams.num_translations_per_input) else: assert num_workers > 1 final_output_infer = inference_output_file output_infer = '%s_%d' % (inference_output_file, jobid) output_infer_done = '%s_done_%d' % (inference_output_file, jobid) infer_data = load_data(inference_input_file, hparams) if inference_context_file is not None: infer_context = load_data(inference_context_file, hparams) else: infer_context = None total_load = len(infer_data) load_per_worker = int((total_load - 1) / num_workers) + 1 start_position = jobid * load_per_worker end_position = min(start_position + load_per_worker, total_load) infer_data = infer_data[start_position:end_position] if infer_context is not None: infer_context = infer_context[start_position:end_position] infer_feed_dict = {infer_model.src_placeholder: infer_data, infer_model.batch_size_placeholder: hparams.infer_batch_size} if infer_context is not None: infer_feed_dict[infer_model.ctx_placeholder] = infer_context with tf.Session(graph=infer_model.graph, config=utils.get_config_proto()) as sess: loaded_infer_model = model_helper.load_model(infer_model.model, ckpt, sess, 'infer') sess.run(infer_model.iterator.initializer, feed_dict=infer_feed_dict) utils.print_out('# Start decoding') nmt_utils.decode_and_evaluate('infer', loaded_infer_model, sess, output_infer, ref_file=None, metrics=hparams.metrics, subword_option=hparams.subword_option, beam_width=hparams.beam_width, tgt_eos=hparams.eos, hparams=hparams, num_translations_per_input=hparams.num_translations_per_input) tf.gfile.Rename(output_infer, output_infer_done, overwrite=True) if jobid != 0: return with codecs.getwriter('utf-8')(tf.gfile.GFile(final_output_infer, mode='wb')) as final_f: for worker_id in range(num_workers): worker_infer_done = '%s_done_%d' % (inference_output_file, worker_id) while not tf.gfile.Exists(worker_infer_done): utils.print_out(' waitting job %d to complete.' % worker_id) time.sleep(10) with codecs.getreader('utf-8')(tf.gfile.GFile(worker_infer_done, mode='rb')) as f: for translation in f: final_f.write('%s' % translation) for worker_id in range(num_workers): worker_infer_done = '%s_done_%d' % (inference_output_file, worker_id) tf.gfile.Remove(worker_infer_done) </DeepExtract>
active-qa
positive
def cmp_using(eq=None, lt=None, le=None, gt=None, ge=None, require_same_type=True, class_name='Comparable'): """ Create a class that can be passed into `attrs.field`'s ``eq``, ``order``, and ``cmp`` arguments to customize field comparison. The resulting class will have a full set of ordering methods if at least one of ``{lt, le, gt, ge}`` and ``eq`` are provided. :param Optional[callable] eq: `callable` used to evaluate equality of two objects. :param Optional[callable] lt: `callable` used to evaluate whether one object is less than another object. :param Optional[callable] le: `callable` used to evaluate whether one object is less than or equal to another object. :param Optional[callable] gt: `callable` used to evaluate whether one object is greater than another object. :param Optional[callable] ge: `callable` used to evaluate whether one object is greater than or equal to another object. :param bool require_same_type: When `True`, equality and ordering methods will return `NotImplemented` if objects are not of the same type. :param Optional[str] class_name: Name of class. Defaults to 'Comparable'. See `comparison` for more details. .. versionadded:: 21.1.0 """ body = {'__slots__': ['value'], '__init__': _make_init(), '_requirements': [], '_is_comparable_to': _is_comparable_to} num_order_functions = 0 has_eq_function = False if eq is not None: has_eq_function = True <DeepExtract> def method(self, other): if not self._is_comparable_to(other): body['__eq__'] = NotImplemented result = eq(self.value, other.value) if result is NotImplemented: body['__eq__'] = NotImplemented body['__eq__'] = result method.__name__ = f"__{'eq'}__" method.__doc__ = f"Return a {_operation_names['eq']} b. Computed by attrs." body['__eq__'] = method </DeepExtract> body['__ne__'] = _make_ne() if lt is not None: num_order_functions += 1 <DeepExtract> def method(self, other): if not self._is_comparable_to(other): body['__lt__'] = NotImplemented result = lt(self.value, other.value) if result is NotImplemented: body['__lt__'] = NotImplemented body['__lt__'] = result method.__name__ = f"__{'lt'}__" method.__doc__ = f"Return a {_operation_names['lt']} b. Computed by attrs." body['__lt__'] = method </DeepExtract> if le is not None: num_order_functions += 1 <DeepExtract> def method(self, other): if not self._is_comparable_to(other): body['__le__'] = NotImplemented result = le(self.value, other.value) if result is NotImplemented: body['__le__'] = NotImplemented body['__le__'] = result method.__name__ = f"__{'le'}__" method.__doc__ = f"Return a {_operation_names['le']} b. Computed by attrs." body['__le__'] = method </DeepExtract> if gt is not None: num_order_functions += 1 <DeepExtract> def method(self, other): if not self._is_comparable_to(other): body['__gt__'] = NotImplemented result = gt(self.value, other.value) if result is NotImplemented: body['__gt__'] = NotImplemented body['__gt__'] = result method.__name__ = f"__{'gt'}__" method.__doc__ = f"Return a {_operation_names['gt']} b. Computed by attrs." body['__gt__'] = method </DeepExtract> if ge is not None: num_order_functions += 1 <DeepExtract> def method(self, other): if not self._is_comparable_to(other): body['__ge__'] = NotImplemented result = ge(self.value, other.value) if result is NotImplemented: body['__ge__'] = NotImplemented body['__ge__'] = result method.__name__ = f"__{'ge'}__" method.__doc__ = f"Return a {_operation_names['ge']} b. Computed by attrs." body['__ge__'] = method </DeepExtract> type_ = types.new_class(class_name, (object,), {}, lambda ns: ns.update(body)) if require_same_type: type_._requirements.append(_check_same_type) if 0 < num_order_functions < 4: if not has_eq_function: raise ValueError('eq must be define is order to complete ordering from lt, le, gt, ge.') type_ = functools.total_ordering(type_) return type_
def cmp_using(eq=None, lt=None, le=None, gt=None, ge=None, require_same_type=True, class_name='Comparable'): """ Create a class that can be passed into `attrs.field`'s ``eq``, ``order``, and ``cmp`` arguments to customize field comparison. The resulting class will have a full set of ordering methods if at least one of ``{lt, le, gt, ge}`` and ``eq`` are provided. :param Optional[callable] eq: `callable` used to evaluate equality of two objects. :param Optional[callable] lt: `callable` used to evaluate whether one object is less than another object. :param Optional[callable] le: `callable` used to evaluate whether one object is less than or equal to another object. :param Optional[callable] gt: `callable` used to evaluate whether one object is greater than another object. :param Optional[callable] ge: `callable` used to evaluate whether one object is greater than or equal to another object. :param bool require_same_type: When `True`, equality and ordering methods will return `NotImplemented` if objects are not of the same type. :param Optional[str] class_name: Name of class. Defaults to 'Comparable'. See `comparison` for more details. .. versionadded:: 21.1.0 """ body = {'__slots__': ['value'], '__init__': _make_init(), '_requirements': [], '_is_comparable_to': _is_comparable_to} num_order_functions = 0 has_eq_function = False if eq is not None: has_eq_function = True def method(self, other): if not self._is_comparable_to(other): body['__eq__'] = NotImplemented result = eq(self.value, other.value) if result is NotImplemented: body['__eq__'] = NotImplemented body['__eq__'] = result method.__name__ = f"__{'eq'}__" method.__doc__ = f"Return a {_operation_names['eq']} b. Computed by attrs." body['__eq__'] = method body['__ne__'] = _make_ne() if lt is not None: num_order_functions += 1 def method(self, other): if not self._is_comparable_to(other): body['__lt__'] = NotImplemented result = lt(self.value, other.value) if result is NotImplemented: body['__lt__'] = NotImplemented body['__lt__'] = result method.__name__ = f"__{'lt'}__" method.__doc__ = f"Return a {_operation_names['lt']} b. Computed by attrs." body['__lt__'] = method if le is not None: num_order_functions += 1 def method(self, other): if not self._is_comparable_to(other): body['__le__'] = NotImplemented result = le(self.value, other.value) if result is NotImplemented: body['__le__'] = NotImplemented body['__le__'] = result method.__name__ = f"__{'le'}__" method.__doc__ = f"Return a {_operation_names['le']} b. Computed by attrs." body['__le__'] = method if gt is not None: num_order_functions += 1 def method(self, other): if not self._is_comparable_to(other): body['__gt__'] = NotImplemented result = gt(self.value, other.value) if result is NotImplemented: body['__gt__'] = NotImplemented body['__gt__'] = result method.__name__ = f"__{'gt'}__" method.__doc__ = f"Return a {_operation_names['gt']} b. Computed by attrs." body['__gt__'] = method if ge is not None: num_order_functions += 1 def method(self, other): if not self._is_comparable_to(other): body['__ge__'] = NotImplemented result = ge(self.value, other.value) if result is NotImplemented: body['__ge__'] = NotImplemented body['__ge__'] = result method.__name__ = f"__{'ge'}__" method.__doc__ = f"Return a {_operation_names['ge']} b. Computed by attrs." body['__ge__'] = method type_ = types.new_class(class_name, (object,), {}, lambda ns: ns.update(body)) if require_same_type: type_._requirements.append(_check_same_type) if 0 < num_order_functions < 4: if not has_eq_function: raise ValueError('eq must be define is order to complete ordering from lt, le, gt, ge.') type_ = functools.total_ordering(type_) return type_
attrs
positive
def __getitem__(self, pos): """Return a pair by its position. If *pos* is a slice, then return a generator that yields pairs as specified by the slice. """ size = len(self) if isinstance(pos, int): if pos < 0: pos += size if not 0 <= pos < size: raise IndexError('index out of range') <DeepExtract> node = self._head distance = 0 for i in reversed(range(self.level)): nnode = node[2 + i] ndistance = distance + (1 if i == 0 else nnode[-1]) while nnode is not self._tail and ndistance <= pos + 1: (nnode, node, distance) = (nnode[2 + i], nnode, ndistance) ndistance += 1 if i == 0 else nnode[-1] self._path[i] = node self._distance[i] = distance (path, _) = (self._path, self._distance) </DeepExtract> node = path[0] return (node[0], node[1]) elif isinstance(pos, slice): (start, stop) = (pos.start, pos.stop) if start is None: start = 0 elif start < 0: start += size if stop is None: stop = size elif stop < 0: stop += size <DeepExtract> node = self._head distance = 0 for i in reversed(range(self.level)): nnode = node[2 + i] ndistance = distance + (1 if i == 0 else nnode[-1]) while nnode is not self._tail and ndistance <= start + 1: (nnode, node, distance) = (nnode[2 + i], nnode, ndistance) ndistance += 1 if i == 0 else nnode[-1] self._path[i] = node self._distance[i] = distance (path, _) = (self._path, self._distance) </DeepExtract> def genpairs(): pos = start node = path[0] while node is not self._tail and pos < stop: yield (node[0], node[1]) node = node[2] pos += 1 return genpairs() else: raise TypeError('expecting int or slice, got {0.__name__!r}'.format(type(pos)))
def __getitem__(self, pos): """Return a pair by its position. If *pos* is a slice, then return a generator that yields pairs as specified by the slice. """ size = len(self) if isinstance(pos, int): if pos < 0: pos += size if not 0 <= pos < size: raise IndexError('index out of range') node = self._head distance = 0 for i in reversed(range(self.level)): nnode = node[2 + i] ndistance = distance + (1 if i == 0 else nnode[-1]) while nnode is not self._tail and ndistance <= pos + 1: (nnode, node, distance) = (nnode[2 + i], nnode, ndistance) ndistance += 1 if i == 0 else nnode[-1] self._path[i] = node self._distance[i] = distance (path, _) = (self._path, self._distance) node = path[0] return (node[0], node[1]) elif isinstance(pos, slice): (start, stop) = (pos.start, pos.stop) if start is None: start = 0 elif start < 0: start += size if stop is None: stop = size elif stop < 0: stop += size node = self._head distance = 0 for i in reversed(range(self.level)): nnode = node[2 + i] ndistance = distance + (1 if i == 0 else nnode[-1]) while nnode is not self._tail and ndistance <= start + 1: (nnode, node, distance) = (nnode[2 + i], nnode, ndistance) ndistance += 1 if i == 0 else nnode[-1] self._path[i] = node self._distance[i] = distance (path, _) = (self._path, self._distance) def genpairs(): pos = start node = path[0] while node is not self._tail and pos < stop: yield (node[0], node[1]) node = node[2] pos += 1 return genpairs() else: raise TypeError('expecting int or slice, got {0.__name__!r}'.format(type(pos)))
bluepass
positive
def parse_decl(self, text): """Parse declaration given in text and return an AST node for it.""" self.lexer.input(text) <DeepExtract> try: self.cur_token = self.lexer.token() if self.cur_token is None: self.cur_token = lexer.Token(None, None, None) except lexer.LexerError as e: self._error('Lexer error at position {}: {}'.format(e.pos, e)) </DeepExtract> <DeepExtract> name = self._match('ID') argnames = [] while self.cur_token.type == 'ID': argnames.append(self.cur_token.val) self._get_next_token() self._match('=') expr = self._expr() if len(argnames) > 0: decl = ast.Decl(name, ast.LambdaExpr(argnames, expr)) else: decl = ast.Decl(name, expr) </DeepExtract> if self.cur_token.type != None: <DeepExtract> raise ParseError('Unexpected token "{}" (at #{})'.format(self.cur_token.val, self.cur_token.pos)) </DeepExtract> return decl
def parse_decl(self, text): """Parse declaration given in text and return an AST node for it.""" self.lexer.input(text) try: self.cur_token = self.lexer.token() if self.cur_token is None: self.cur_token = lexer.Token(None, None, None) except lexer.LexerError as e: self._error('Lexer error at position {}: {}'.format(e.pos, e)) name = self._match('ID') argnames = [] while self.cur_token.type == 'ID': argnames.append(self.cur_token.val) self._get_next_token() self._match('=') expr = self._expr() if len(argnames) > 0: decl = ast.Decl(name, ast.LambdaExpr(argnames, expr)) else: decl = ast.Decl(name, expr) if self.cur_token.type != None: raise ParseError('Unexpected token "{}" (at #{})'.format(self.cur_token.val, self.cur_token.pos)) return decl
code-for-blog
positive
def _create_id_getter(field): def fn(self): id = self.get('id', 0) if not id: <DeepExtract> field += '_id_counter' scenes = list(bpy.data.scenes) scenes.sort(key=lambda s: getattr(s, field), reverse=True) max_id = getattr(scenes[0], field) id = max_id </DeepExtract> self['id'] = id <DeepExtract> field += '_id_counter' new_id = id + 1 for scene in bpy.data.scenes: setattr(scene, field, new_id) return new_id </DeepExtract> lib_offset = 0 if self.library: lib_offset = (self.library.id + 1) * LIB_ID_SPACE return id + lib_offset return fn
def _create_id_getter(field): def fn(self): id = self.get('id', 0) if not id: field += '_id_counter' scenes = list(bpy.data.scenes) scenes.sort(key=lambda s: getattr(s, field), reverse=True) max_id = getattr(scenes[0], field) id = max_id self['id'] = id field += '_id_counter' new_id = id + 1 for scene in bpy.data.scenes: setattr(scene, field, new_id) return new_id lib_offset = 0 if self.library: lib_offset = (self.library.id + 1) * LIB_ID_SPACE return id + lib_offset return fn
Blender-WMO-import-export-scripts
positive
def main(): <DeepExtract> parser = argparse.ArgumentParser(description='arg parser') parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training') parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training') parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for') parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader') parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment') parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from') parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model') parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none') parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training') parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn') parser.add_argument('--fix_random_seed', action='store_true', default=False, help='') parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs') parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training') parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint') parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='') parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER, help='set extra config keys if needed') parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes') parser.add_argument('--start_epoch', type=int, default=0, help='') parser.add_argument('--save_to_file', action='store_true', default=False, help='') args = parser.parse_args() cfg_from_yaml_file(args.cfg_file, cfg) cfg.TAG = Path(args.cfg_file).stem cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs, cfg) (args, cfg) = (args, cfg) </DeepExtract> if args.launcher == 'none': dist_train = False total_gpus = 1 else: (total_gpus, cfg.LOCAL_RANK) = getattr(common_utils, 'init_dist_%s' % args.launcher)(args.tcp_port, args.local_rank, backend='nccl') dist_train = True if args.batch_size is None: args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU else: assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus' args.batch_size = args.batch_size // total_gpus args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs if args.fix_random_seed: common_utils.set_random_seed(666) output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag ckpt_dir = output_dir / 'ckpt' output_dir.mkdir(parents=True, exist_ok=True) ckpt_dir.mkdir(parents=True, exist_ok=True) log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S')) logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK) logger.info('**********************Start logging**********************') gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL' logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list) if dist_train: logger.info('total_batch_size: %d' % (total_gpus * args.batch_size)) for (key, val) in vars(args).items(): logger.info('{:16} {}'.format(key, val)) log_config_to_file(cfg, logger=logger) if cfg.LOCAL_RANK == 0: os.system('cp %s %s' % (args.cfg_file, output_dir)) tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None (train_set, train_loader, train_sampler) = build_dataloader(dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, batch_size=args.batch_size, dist=dist_train, workers=args.workers, logger=logger, training=True, merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch, total_epochs=args.epochs) model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=train_set) if args.sync_bn: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) model.cuda() optimizer = build_optimizer(model, cfg.OPTIMIZATION) start_epoch = it = 0 last_epoch = -1 if args.pretrained_model is not None: model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist, logger=logger) if args.ckpt is not None: (it, start_epoch) = model.load_params_with_optimizer(args.ckpt, to_cpu=dist, optimizer=optimizer, logger=logger) last_epoch = start_epoch + 1 else: ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth')) if len(ckpt_list) > 0: ckpt_list.sort(key=os.path.getmtime) (it, start_epoch) = model.load_params_with_optimizer(ckpt_list[-1], to_cpu=dist, optimizer=optimizer, logger=logger) last_epoch = start_epoch + 1 model.train() if dist_train: model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()]) logger.info(model) (lr_scheduler, lr_warmup_scheduler) = build_scheduler(optimizer, total_iters_each_epoch=len(train_loader), total_epochs=args.epochs, last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION) logger.info('**********************Start training %s/%s(%s)**********************' % (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag)) train_model(model, optimizer, train_loader, model_func=model_fn_decorator(), lr_scheduler=lr_scheduler, optim_cfg=cfg.OPTIMIZATION, start_epoch=start_epoch, total_epochs=args.epochs, start_iter=it, rank=cfg.LOCAL_RANK, tb_log=tb_log, ckpt_save_dir=ckpt_dir, train_sampler=train_sampler, lr_warmup_scheduler=lr_warmup_scheduler, ckpt_save_interval=args.ckpt_save_interval, max_ckpt_save_num=args.max_ckpt_save_num, merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch) logger.info('**********************End training %s/%s(%s)**********************\n\n\n' % (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag)) logger.info('**********************Start evaluation %s/%s(%s)**********************' % (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag)) (test_set, test_loader, sampler) = build_dataloader(dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, batch_size=args.batch_size, dist=dist_train, workers=args.workers, logger=logger, training=False) eval_output_dir = output_dir / 'eval' / 'eval_with_train' eval_output_dir.mkdir(parents=True, exist_ok=True) args.start_epoch = max(args.epochs - 10, 0) repeat_eval_ckpt(model.module if dist_train else model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=dist_train) logger.info('**********************End evaluation %s/%s(%s)**********************' % (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
def main(): parser = argparse.ArgumentParser(description='arg parser') parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training') parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training') parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for') parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader') parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment') parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from') parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model') parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none') parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training') parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn') parser.add_argument('--fix_random_seed', action='store_true', default=False, help='') parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs') parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training') parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint') parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='') parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER, help='set extra config keys if needed') parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes') parser.add_argument('--start_epoch', type=int, default=0, help='') parser.add_argument('--save_to_file', action='store_true', default=False, help='') args = parser.parse_args() cfg_from_yaml_file(args.cfg_file, cfg) cfg.TAG = Path(args.cfg_file).stem cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs, cfg) (args, cfg) = (args, cfg) if args.launcher == 'none': dist_train = False total_gpus = 1 else: (total_gpus, cfg.LOCAL_RANK) = getattr(common_utils, 'init_dist_%s' % args.launcher)(args.tcp_port, args.local_rank, backend='nccl') dist_train = True if args.batch_size is None: args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU else: assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus' args.batch_size = args.batch_size // total_gpus args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs if args.fix_random_seed: common_utils.set_random_seed(666) output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag ckpt_dir = output_dir / 'ckpt' output_dir.mkdir(parents=True, exist_ok=True) ckpt_dir.mkdir(parents=True, exist_ok=True) log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S')) logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK) logger.info('**********************Start logging**********************') gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL' logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list) if dist_train: logger.info('total_batch_size: %d' % (total_gpus * args.batch_size)) for (key, val) in vars(args).items(): logger.info('{:16} {}'.format(key, val)) log_config_to_file(cfg, logger=logger) if cfg.LOCAL_RANK == 0: os.system('cp %s %s' % (args.cfg_file, output_dir)) tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None (train_set, train_loader, train_sampler) = build_dataloader(dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, batch_size=args.batch_size, dist=dist_train, workers=args.workers, logger=logger, training=True, merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch, total_epochs=args.epochs) model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=train_set) if args.sync_bn: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) model.cuda() optimizer = build_optimizer(model, cfg.OPTIMIZATION) start_epoch = it = 0 last_epoch = -1 if args.pretrained_model is not None: model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist, logger=logger) if args.ckpt is not None: (it, start_epoch) = model.load_params_with_optimizer(args.ckpt, to_cpu=dist, optimizer=optimizer, logger=logger) last_epoch = start_epoch + 1 else: ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth')) if len(ckpt_list) > 0: ckpt_list.sort(key=os.path.getmtime) (it, start_epoch) = model.load_params_with_optimizer(ckpt_list[-1], to_cpu=dist, optimizer=optimizer, logger=logger) last_epoch = start_epoch + 1 model.train() if dist_train: model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()]) logger.info(model) (lr_scheduler, lr_warmup_scheduler) = build_scheduler(optimizer, total_iters_each_epoch=len(train_loader), total_epochs=args.epochs, last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION) logger.info('**********************Start training %s/%s(%s)**********************' % (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag)) train_model(model, optimizer, train_loader, model_func=model_fn_decorator(), lr_scheduler=lr_scheduler, optim_cfg=cfg.OPTIMIZATION, start_epoch=start_epoch, total_epochs=args.epochs, start_iter=it, rank=cfg.LOCAL_RANK, tb_log=tb_log, ckpt_save_dir=ckpt_dir, train_sampler=train_sampler, lr_warmup_scheduler=lr_warmup_scheduler, ckpt_save_interval=args.ckpt_save_interval, max_ckpt_save_num=args.max_ckpt_save_num, merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch) logger.info('**********************End training %s/%s(%s)**********************\n\n\n' % (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag)) logger.info('**********************Start evaluation %s/%s(%s)**********************' % (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag)) (test_set, test_loader, sampler) = build_dataloader(dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, batch_size=args.batch_size, dist=dist_train, workers=args.workers, logger=logger, training=False) eval_output_dir = output_dir / 'eval' / 'eval_with_train' eval_output_dir.mkdir(parents=True, exist_ok=True) args.start_epoch = max(args.epochs - 10, 0) repeat_eval_ckpt(model.module if dist_train else model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=dist_train) logger.info('**********************End evaluation %s/%s(%s)**********************' % (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
3DIoUMatch
positive
def run_button(self): self.ensure_one() if not self.backend: raise UserError(_("No backend configured for download configuration '%s'.") % self.name) if not self.import_config_id: raise UserError(_("No invoice import configuration for download configuration '%s'.") % self.name) if self.credentials_stored(): logger.info('Credentials stored for %s, launching download', self.name) <DeepExtract> credentials = {'login': self.login, 'password': self.password} credentials = credentials </DeepExtract> <DeepExtract> self.ensure_one() amo = self.env['account.move'] aiio = self.env['account.invoice.import'] logger.info('Start to run invoice download %s (%s)', self.name, self.backend) if not self.backend: logger.error('Missing backend on invoice download %s', self.name) (invoice_ids, log_id) = ([], False) if not self.import_config_id: logger.error('Missing invoice import config on invoice download %s', self.name) (invoice_ids, log_id) = ([], False) logs = {'msg': [], 'result': 'success'} invoice_ids = [] invoices_dl = [] try: invoices_dl = self.download(credentials, logs) except Exception as e: logger.error('Failed to download invoice. Error: %s', e) logs['msg'].append(_('Failed to download invoice. Error: %s.') % e) logs['result'] = 'failure' company_id = self.company_id.id assert self.import_config_id.company_id.id == company_id import_config = self.import_config_id.convert_to_import_config() existing_refs = {} existing_invs = amo.search_read([('move_type', 'in', ('in_invoice', 'in_refund')), ('commercial_partner_id', '=', self.partner_id.id), ('company_id', '=', company_id), ('ref', '!=', False)], ['ref']) for existing_inv in existing_invs: existing_refs[existing_inv.get('ref')] = existing_inv['id'] logger.debug('existing_refs=%s', existing_refs) for inv_struc in invoices_dl: if isinstance(inv_struc, dict): parsed_inv = inv_struc elif isinstance(inv_struc, tuple): (invoice_file_b64, filename) = inv_struc parsed_inv = aiio.parse_invoice(invoice_file_b64, filename) else: logger.error('Technical error that should never happen: inv_struc is a %s', type(inv_struc)) continue parsed_inv['partner'] = {'recordset': self.partner_id} if parsed_inv.get('invoice_number') and parsed_inv['invoice_number'] in existing_refs: logger.info('Skipping invoice %s dated %s because it already exists in Odoo (ID %d)', parsed_inv['invoice_number'], parsed_inv.get('date'), existing_refs[parsed_inv['invoice_number']]) logs['msg'].append(_('Skipping invoice %s dated %s because it already exists in Odoo (ID %d).') % (parsed_inv['invoice_number'], parsed_inv.get('date'), existing_refs[parsed_inv['invoice_number']])) continue try: invoice = aiio.with_company(company_id).create_invoice(parsed_inv, import_config=import_config, origin="Download Bill '%s'" % self.name) except Exception as e: logs['msg'].append(_('Failed to create invoice. Error: %s. (parsed_inv=%s import_config=%s)') % (e, parsed_inv, import_config)) logs['result'] = 'failure' continue invoice_ids.append(invoice.id) logs['msg'].append(_('Invoice number %s dated %s created (ID %d).') % (parsed_inv.get('invoice_number', 'none'), parsed_inv.get('date', 'none'), invoice.id)) if logs['result'] == 'success': self.last_run = fields.Date.context_today(self) if not invoice_ids and logs['result'] == 'success': logs['msg'].append(_('No invoice downloaded.')) log = self.env['account.invoice.download.log'].create({'download_config_id': self.id, 'message': '\n'.join(logs['msg']), 'invoice_count': len(invoice_ids), 'result': logs['result']}) logger.info('End of invoice download %s (%s). IDs of created invoices: %s', self.name, self.backend, invoice_ids) (invoice_ids, log_id) = (invoice_ids, log.id) </DeepExtract> if invoice_ids: xmlid = 'account.action_move_in_invoice_type' action = self.env['ir.actions.act_window']._for_xml_id(xmlid) action.update({'views': False, 'view_id': False, 'domain': "[('id', 'in', %s)]" % invoice_ids}) else: xmlid = 'account_invoice_download.account_invoice_download_log_action' action = self.env['ir.actions.act_window']._for_xml_id(xmlid) action.update({'res_id': log_id, 'view_mode': 'form,tree', 'views': False}) return action else: xmlid = 'account_invoice_download.account_invoice_download_credentials_action' credentials_wiz_action = self.env['ir.actions.act_window']._for_xml_id(xmlid) return credentials_wiz_action
def run_button(self): self.ensure_one() if not self.backend: raise UserError(_("No backend configured for download configuration '%s'.") % self.name) if not self.import_config_id: raise UserError(_("No invoice import configuration for download configuration '%s'.") % self.name) if self.credentials_stored(): logger.info('Credentials stored for %s, launching download', self.name) credentials = {'login': self.login, 'password': self.password} credentials = credentials self.ensure_one() amo = self.env['account.move'] aiio = self.env['account.invoice.import'] logger.info('Start to run invoice download %s (%s)', self.name, self.backend) if not self.backend: logger.error('Missing backend on invoice download %s', self.name) (invoice_ids, log_id) = ([], False) if not self.import_config_id: logger.error('Missing invoice import config on invoice download %s', self.name) (invoice_ids, log_id) = ([], False) logs = {'msg': [], 'result': 'success'} invoice_ids = [] invoices_dl = [] try: invoices_dl = self.download(credentials, logs) except Exception as e: logger.error('Failed to download invoice. Error: %s', e) logs['msg'].append(_('Failed to download invoice. Error: %s.') % e) logs['result'] = 'failure' company_id = self.company_id.id assert self.import_config_id.company_id.id == company_id import_config = self.import_config_id.convert_to_import_config() existing_refs = {} existing_invs = amo.search_read([('move_type', 'in', ('in_invoice', 'in_refund')), ('commercial_partner_id', '=', self.partner_id.id), ('company_id', '=', company_id), ('ref', '!=', False)], ['ref']) for existing_inv in existing_invs: existing_refs[existing_inv.get('ref')] = existing_inv['id'] logger.debug('existing_refs=%s', existing_refs) for inv_struc in invoices_dl: if isinstance(inv_struc, dict): parsed_inv = inv_struc elif isinstance(inv_struc, tuple): (invoice_file_b64, filename) = inv_struc parsed_inv = aiio.parse_invoice(invoice_file_b64, filename) else: logger.error('Technical error that should never happen: inv_struc is a %s', type(inv_struc)) continue parsed_inv['partner'] = {'recordset': self.partner_id} if parsed_inv.get('invoice_number') and parsed_inv['invoice_number'] in existing_refs: logger.info('Skipping invoice %s dated %s because it already exists in Odoo (ID %d)', parsed_inv['invoice_number'], parsed_inv.get('date'), existing_refs[parsed_inv['invoice_number']]) logs['msg'].append(_('Skipping invoice %s dated %s because it already exists in Odoo (ID %d).') % (parsed_inv['invoice_number'], parsed_inv.get('date'), existing_refs[parsed_inv['invoice_number']])) continue try: invoice = aiio.with_company(company_id).create_invoice(parsed_inv, import_config=import_config, origin="Download Bill '%s'" % self.name) except Exception as e: logs['msg'].append(_('Failed to create invoice. Error: %s. (parsed_inv=%s import_config=%s)') % (e, parsed_inv, import_config)) logs['result'] = 'failure' continue invoice_ids.append(invoice.id) logs['msg'].append(_('Invoice number %s dated %s created (ID %d).') % (parsed_inv.get('invoice_number', 'none'), parsed_inv.get('date', 'none'), invoice.id)) if logs['result'] == 'success': self.last_run = fields.Date.context_today(self) if not invoice_ids and logs['result'] == 'success': logs['msg'].append(_('No invoice downloaded.')) log = self.env['account.invoice.download.log'].create({'download_config_id': self.id, 'message': '\n'.join(logs['msg']), 'invoice_count': len(invoice_ids), 'result': logs['result']}) logger.info('End of invoice download %s (%s). IDs of created invoices: %s', self.name, self.backend, invoice_ids) (invoice_ids, log_id) = (invoice_ids, log.id) if invoice_ids: xmlid = 'account.action_move_in_invoice_type' action = self.env['ir.actions.act_window']._for_xml_id(xmlid) action.update({'views': False, 'view_id': False, 'domain': "[('id', 'in', %s)]" % invoice_ids}) else: xmlid = 'account_invoice_download.account_invoice_download_log_action' action = self.env['ir.actions.act_window']._for_xml_id(xmlid) action.update({'res_id': log_id, 'view_mode': 'form,tree', 'views': False}) return action else: xmlid = 'account_invoice_download.account_invoice_download_credentials_action' credentials_wiz_action = self.env['ir.actions.act_window']._for_xml_id(xmlid) return credentials_wiz_action
edi
positive
def get_aws_security_credentials_from_assumed_profile(awsprofile): <DeepExtract> credentials = {'AccessKeyId': None, 'SecretAccessKey': None, 'Token': None} try: import botocore.session from botocore.exceptions import ProfileNotFound except ImportError: logging.error('Named profile credentials cannot be retrieved without botocore, please install botocore first.') credentials = credentials session = botocore.session.get_session() session.set_config_variable('profile', awsprofile) try: frozen_credentials = session.get_credentials().get_frozen_credentials() except ProfileNotFound as e: logging.error('%s, please add the [profile %s] section in the aws config file following %s and %s.' % (e, awsprofile, NAMED_PROFILE_HELP_URL, CONFIG_FILE_SETTINGS_HELP_URL)) credentials = credentials credentials['AccessKeyId'] = frozen_credentials.access_key credentials['SecretAccessKey'] = frozen_credentials.secret_key credentials['Token'] = frozen_credentials.token credentials = credentials </DeepExtract> if credentials['AccessKeyId']: return credentials logging.error('AWS security credentials not found via assuming named profile [%s] using botocore', awsprofile) return None
def get_aws_security_credentials_from_assumed_profile(awsprofile): credentials = {'AccessKeyId': None, 'SecretAccessKey': None, 'Token': None} try: import botocore.session from botocore.exceptions import ProfileNotFound except ImportError: logging.error('Named profile credentials cannot be retrieved without botocore, please install botocore first.') credentials = credentials session = botocore.session.get_session() session.set_config_variable('profile', awsprofile) try: frozen_credentials = session.get_credentials().get_frozen_credentials() except ProfileNotFound as e: logging.error('%s, please add the [profile %s] section in the aws config file following %s and %s.' % (e, awsprofile, NAMED_PROFILE_HELP_URL, CONFIG_FILE_SETTINGS_HELP_URL)) credentials = credentials credentials['AccessKeyId'] = frozen_credentials.access_key credentials['SecretAccessKey'] = frozen_credentials.secret_key credentials['Token'] = frozen_credentials.token credentials = credentials if credentials['AccessKeyId']: return credentials logging.error('AWS security credentials not found via assuming named profile [%s] using botocore', awsprofile) return None
efs-utils
positive
def update(self, app_id, app_data): try: session = db_base.get_session() app = session.query(App).filter_by(id=app_id).first() <DeepExtract> if 'location' in app_data: app.location = app_data['location'] if 'version' in app_data: app.version = app_data['version'] if 'dep_target' in app_data: app.dep_target = app_data['dep_target'] if 'status' in app_data: app.status = app_data['status'] if 'output_config' in app_data: app.output_config = app_data['output_config'] if 'env_id' in app_data: app.env_id = app_data['env_id'] if 'env_name' in app_data: app.env_name = app_data['env_name'] if 'app_yaml_contents' in app_data: app.app_yaml_contents = app_data['app_yaml_contents'] app = app </DeepExtract> session.commit() session.close() except IntegrityError as e: fmlogger.debug(e)
def update(self, app_id, app_data): try: session = db_base.get_session() app = session.query(App).filter_by(id=app_id).first() if 'location' in app_data: app.location = app_data['location'] if 'version' in app_data: app.version = app_data['version'] if 'dep_target' in app_data: app.dep_target = app_data['dep_target'] if 'status' in app_data: app.status = app_data['status'] if 'output_config' in app_data: app.output_config = app_data['output_config'] if 'env_id' in app_data: app.env_id = app_data['env_id'] if 'env_name' in app_data: app.env_name = app_data['env_name'] if 'app_yaml_contents' in app_data: app.app_yaml_contents = app_data['app_yaml_contents'] app = app session.commit() session.close() except IntegrityError as e: fmlogger.debug(e)
caastle
positive
def get_user_comments(self, username): """Get all the comments from a user""" <DeepExtract> if username is not None and (not isinstance(username, Sequence)): username = [username] matching_events = [] for event in self.history: if event['event'] == 'commented' or not 'commented': if username is None: matching_events.append(event) elif event['actor'] in username: matching_events.append(event) if len(matching_events) == 999: break matching_events = matching_events </DeepExtract> comments = [x['body'] for x in matching_events] return comments
def get_user_comments(self, username): """Get all the comments from a user""" if username is not None and (not isinstance(username, Sequence)): username = [username] matching_events = [] for event in self.history: if event['event'] == 'commented' or not 'commented': if username is None: matching_events.append(event) elif event['actor'] in username: matching_events.append(event) if len(matching_events) == 999: break matching_events = matching_events comments = [x['body'] for x in matching_events] return comments
ansibullbot
positive
def auto_find_n_components(M, X, y): self.N2score = {} self.step = 0 if M >= 1000: return M // 20 start_time = time() initial_Ns = [int(round(M / 3)), int(round(M * 0.67))] initial_Ns = [np.clip(initial_N, 1, M - 1) for initial_N in initial_Ns] initial_Ns = list(set(initial_Ns)) N2score = {initial_N: self.evaluate(initial_N, X, y) for initial_N in initial_Ns} if time() - start_time > self.budget: Ns = sorted(list(N2score.keys())) scores = [N2score[N] for N in Ns] best_N_ix = int(np.argmax(scores)) self.step = 0 self.N2score = N2score return Ns[best_N_ix] step = 0 for step in range(10): Ns = sorted(list(N2score.keys())) scores = [N2score[N] for N in Ns] best_N_ix = int(np.argmax(scores)) cand_Ns = [] if best_N_ix == 0: cand_Ns.append(max(1, Ns[best_N_ix] // 2)) else: cand_Ns.append(int(round(Ns[best_N_ix - 1] + (Ns[best_N_ix] - Ns[best_N_ix - 1]) / 2))) if best_N_ix != len(Ns) - 1: cand_Ns.append(int(round(Ns[best_N_ix] + (Ns[best_N_ix + 1] - Ns[best_N_ix]) / 2))) else: self.logger.debug(f'best_N_ix = {best_N_ix}, equals `len(Ns) - 1)`') cand_Ns = [np.clip(cand_N, 1, M - 1) for cand_N in cand_Ns] should_break = False for N in cand_Ns: if N in N2score: should_break = True break if N <= 0: should_break = True break if should_break: break should_break = False for cand_N in cand_Ns: <DeepExtract> reducer = self.get_reducer(cand_N) self.fit_reducer(reducer, X, y) (X_train, X_test, y_train, y_test) = train_test_split(X, y, random_state=self.random_state, test_size=0.25) pipeline = Pipeline([('reducer', reducer), ('lr', LogisticRegression(random_state=self.random_state) if self.problem_type == 'classification' else Ridge(random_state=self.random_state))]).fit(X_train, y_train) N2score[cand_N] = pipeline.score(X_test, y_test) </DeepExtract> if time() - start_time > self.budget: should_break = True break if should_break: break self.step = step Ns = sorted(list(N2score.keys())) scores = [N2score[N] for N in Ns] best_N_ix = int(np.argmax(scores)) self.N2score = N2score return Ns[best_N_ix]
def auto_find_n_components(M, X, y): self.N2score = {} self.step = 0 if M >= 1000: return M // 20 start_time = time() initial_Ns = [int(round(M / 3)), int(round(M * 0.67))] initial_Ns = [np.clip(initial_N, 1, M - 1) for initial_N in initial_Ns] initial_Ns = list(set(initial_Ns)) N2score = {initial_N: self.evaluate(initial_N, X, y) for initial_N in initial_Ns} if time() - start_time > self.budget: Ns = sorted(list(N2score.keys())) scores = [N2score[N] for N in Ns] best_N_ix = int(np.argmax(scores)) self.step = 0 self.N2score = N2score return Ns[best_N_ix] step = 0 for step in range(10): Ns = sorted(list(N2score.keys())) scores = [N2score[N] for N in Ns] best_N_ix = int(np.argmax(scores)) cand_Ns = [] if best_N_ix == 0: cand_Ns.append(max(1, Ns[best_N_ix] // 2)) else: cand_Ns.append(int(round(Ns[best_N_ix - 1] + (Ns[best_N_ix] - Ns[best_N_ix - 1]) / 2))) if best_N_ix != len(Ns) - 1: cand_Ns.append(int(round(Ns[best_N_ix] + (Ns[best_N_ix + 1] - Ns[best_N_ix]) / 2))) else: self.logger.debug(f'best_N_ix = {best_N_ix}, equals `len(Ns) - 1)`') cand_Ns = [np.clip(cand_N, 1, M - 1) for cand_N in cand_Ns] should_break = False for N in cand_Ns: if N in N2score: should_break = True break if N <= 0: should_break = True break if should_break: break should_break = False for cand_N in cand_Ns: reducer = self.get_reducer(cand_N) self.fit_reducer(reducer, X, y) (X_train, X_test, y_train, y_test) = train_test_split(X, y, random_state=self.random_state, test_size=0.25) pipeline = Pipeline([('reducer', reducer), ('lr', LogisticRegression(random_state=self.random_state) if self.problem_type == 'classification' else Ridge(random_state=self.random_state))]).fit(X_train, y_train) N2score[cand_N] = pipeline.score(X_test, y_test) if time() - start_time > self.budget: should_break = True break if should_break: break self.step = step Ns = sorted(list(N2score.keys())) scores = [N2score[N] for N in Ns] best_N_ix = int(np.argmax(scores)) self.N2score = N2score return Ns[best_N_ix]
auto-flow
positive
def run(self): """ Emit the Chunk instances which cover the underlying Array. The Array is divided into chunks with a size limit of MAX_CHUNK_SIZE which are emitted into all registered output queues. """ try: <DeepExtract> all_cuts = _all_slices_inner(self.array.shape, always_slices=True) all_cuts = [all_cuts[i] for i in self.iteration_order] cut_shape = tuple((len(cuts) for cuts in all_cuts)) inverse_order = [self.iteration_order.index(i) for i in range(len(self.iteration_order))] for cut_indices in np.ndindex(*cut_shape): key = tuple((cuts[i] for (cuts, i) in zip(all_cuts, cut_indices))) key = tuple((key[i] for i in inverse_order)) yield key </DeepExtract> for key in chunk_index: if self.masked: data = self.array[key].masked_array() else: data = self.array[key].ndarray() output_chunk = Chunk(key, data) <DeepExtract> if output_chunk is not None: for queue in self.output_queues: queue.put(output_chunk) </DeepExtract> except: <DeepExtract> for queue in self.output_queues: queue.put(QUEUE_ABORT) </DeepExtract> raise else: for queue in self.output_queues: queue.put(QUEUE_FINISHED)
def run(self): """ Emit the Chunk instances which cover the underlying Array. The Array is divided into chunks with a size limit of MAX_CHUNK_SIZE which are emitted into all registered output queues. """ try: all_cuts = _all_slices_inner(self.array.shape, always_slices=True) all_cuts = [all_cuts[i] for i in self.iteration_order] cut_shape = tuple((len(cuts) for cuts in all_cuts)) inverse_order = [self.iteration_order.index(i) for i in range(len(self.iteration_order))] for cut_indices in np.ndindex(*cut_shape): key = tuple((cuts[i] for (cuts, i) in zip(all_cuts, cut_indices))) key = tuple((key[i] for i in inverse_order)) yield key for key in chunk_index: if self.masked: data = self.array[key].masked_array() else: data = self.array[key].ndarray() output_chunk = Chunk(key, data) if output_chunk is not None: for queue in self.output_queues: queue.put(output_chunk) except: for queue in self.output_queues: queue.put(QUEUE_ABORT) raise else: for queue in self.output_queues: queue.put(QUEUE_FINISHED)
biggus
positive
def Dump(self): """ save dependency relation of files """ dumped_file = os.path.join(self._root, '.BROC.FILE.DEPS') for pathname in self._cache: if len(self._cache[pathname].reverse_deps) <= 0: <DeepExtract> if self._cache[pathname].build is True: infos = '\t' * 0 + '[' + pathname + ']\n' else: infos = '\t' * 0 + pathname + '\n' self._dumped_str += infos for deps_pathname in self._cache[pathname].deps: self._dump(deps_pathname.Pathname(), 0 + 1) </DeepExtract> try: dir_name = os.path.dirname(dumped_file) Function.Mkdir(dir_name) with open(dumped_file, 'w') as f: f.write('' + self._dumped_str) except IOError as err: self._logger.LevPrint('ERROR', 'save file dependency failed(%s)' % err)
def Dump(self): """ save dependency relation of files """ dumped_file = os.path.join(self._root, '.BROC.FILE.DEPS') for pathname in self._cache: if len(self._cache[pathname].reverse_deps) <= 0: if self._cache[pathname].build is True: infos = '\t' * 0 + '[' + pathname + ']\n' else: infos = '\t' * 0 + pathname + '\n' self._dumped_str += infos for deps_pathname in self._cache[pathname].deps: self._dump(deps_pathname.Pathname(), 0 + 1) try: dir_name = os.path.dirname(dumped_file) Function.Mkdir(dir_name) with open(dumped_file, 'w') as f: f.write('' + self._dumped_str) except IOError as err: self._logger.LevPrint('ERROR', 'save file dependency failed(%s)' % err)
broc
positive
def flash_stream(filedata, addr=0): spi.flash_open() addr_mask = spi.flash_erase_size - 1 if addr & addr_mask: print('addr must be rounded to flash_erase_size = %d bytes (& 0x%06X)' % (spi.flash_erase_size, 16777215 & ~addr_mask)) return False addr = addr & 16777215 & ~addr_mask bytes_uploaded = 0 stopwatch_start() count_total = 0 count_erase = 0 count_write = 0 file_block = bytearray(spi.flash_erase_size) flash_block = bytearray(spi.flash_read_size) progress_char = '.' while filedata.readinto(file_block): retry = 3 while retry >= 0: must = 0 flash_rd = 0 while flash_rd < spi.flash_erase_size: spi.flash_read_block(flash_block, addr + bytes_uploaded + flash_rd) <DeepExtract> flash_block = ptr8(addressof(flash_block)) file_block = ptr8(addressof(file_block[flash_rd:flash_rd + spi.flash_read_size])) l = int(len(file_block[flash_rd:flash_rd + spi.flash_read_size])) for i in range(l): if flash_block[i] & file_block[i] != file_block[i]: must = 1 if must: for i in range(l): if file_block[i] != 255: must = 3 else: for i in range(l): if flash_block[i] != file_block[i]: must = 2 must = must </DeepExtract> flash_rd += spi.flash_read_size write_addr = addr + bytes_uploaded if must == 0: if write_addr & 65535 == 0: print('\r0x%06X %dK %c' % (write_addr, spi.flash_erase_size >> 10, progress_char), end='') else: print(progress_char, end='') progress_char = '.' count_total += 1 bytes_uploaded += len(file_block) break retry -= 1 if must & 1: spi.flash_erase_block(write_addr) count_erase += 1 progress_char = 'e' if must & 2: block_addr = 0 next_block_addr = 0 while next_block_addr < len(file_block): next_block_addr = block_addr + spi.flash_write_size spi.flash_write_block(file_block[block_addr:next_block_addr], addr=write_addr) write_addr += spi.flash_write_size block_addr = next_block_addr count_write += 1 progress_char = 'w' if retry < 0: break print('\r', end='') stopwatch_stop(bytes_uploaded) print('%dK blocks: %d total, %d erased, %d written.' % (spi.flash_erase_size >> 10, count_total, count_erase, count_write)) return retry >= 0
def flash_stream(filedata, addr=0): spi.flash_open() addr_mask = spi.flash_erase_size - 1 if addr & addr_mask: print('addr must be rounded to flash_erase_size = %d bytes (& 0x%06X)' % (spi.flash_erase_size, 16777215 & ~addr_mask)) return False addr = addr & 16777215 & ~addr_mask bytes_uploaded = 0 stopwatch_start() count_total = 0 count_erase = 0 count_write = 0 file_block = bytearray(spi.flash_erase_size) flash_block = bytearray(spi.flash_read_size) progress_char = '.' while filedata.readinto(file_block): retry = 3 while retry >= 0: must = 0 flash_rd = 0 while flash_rd < spi.flash_erase_size: spi.flash_read_block(flash_block, addr + bytes_uploaded + flash_rd) flash_block = ptr8(addressof(flash_block)) file_block = ptr8(addressof(file_block[flash_rd:flash_rd + spi.flash_read_size])) l = int(len(file_block[flash_rd:flash_rd + spi.flash_read_size])) for i in range(l): if flash_block[i] & file_block[i] != file_block[i]: must = 1 if must: for i in range(l): if file_block[i] != 255: must = 3 else: for i in range(l): if flash_block[i] != file_block[i]: must = 2 must = must flash_rd += spi.flash_read_size write_addr = addr + bytes_uploaded if must == 0: if write_addr & 65535 == 0: print('\r0x%06X %dK %c' % (write_addr, spi.flash_erase_size >> 10, progress_char), end='') else: print(progress_char, end='') progress_char = '.' count_total += 1 bytes_uploaded += len(file_block) break retry -= 1 if must & 1: spi.flash_erase_block(write_addr) count_erase += 1 progress_char = 'e' if must & 2: block_addr = 0 next_block_addr = 0 while next_block_addr < len(file_block): next_block_addr = block_addr + spi.flash_write_size spi.flash_write_block(file_block[block_addr:next_block_addr], addr=write_addr) write_addr += spi.flash_write_size block_addr = next_block_addr count_write += 1 progress_char = 'w' if retry < 0: break print('\r', end='') stopwatch_stop(bytes_uploaded) print('%dK blocks: %d total, %d erased, %d written.' % (spi.flash_erase_size >> 10, count_total, count_erase, count_write)) return retry >= 0
esp32ecp5
positive
def prorate_transfer(self, amount, provider): """ Return Stripe processing fee associated to a transfer, i.e. 0% for Stand-Alone Stripe accounts and 0.5% for managed accounts. """ if False: <DeepExtract> kwargs = self._prepare_request() if self.mode in (self.FORWARD, self.REMOTE): if not provider.processor_deposit_key: raise ProcessorSetupError((_('%(organization)s is not connected to a Stripe account.') + '[_prepare_transfer_request]') % {'organization': provider}, provider) kwargs.update({'stripe_account': provider.processor_deposit_key}) kwargs = kwargs </DeepExtract> rcp = stripe.Account.retrieve(**kwargs) if rcp.managed: return (amount * 50 + 5000) // 10000 return 0
def prorate_transfer(self, amount, provider): """ Return Stripe processing fee associated to a transfer, i.e. 0% for Stand-Alone Stripe accounts and 0.5% for managed accounts. """ if False: kwargs = self._prepare_request() if self.mode in (self.FORWARD, self.REMOTE): if not provider.processor_deposit_key: raise ProcessorSetupError((_('%(organization)s is not connected to a Stripe account.') + '[_prepare_transfer_request]') % {'organization': provider}, provider) kwargs.update({'stripe_account': provider.processor_deposit_key}) kwargs = kwargs rcp = stripe.Account.retrieve(**kwargs) if rcp.managed: return (amount * 50 + 5000) // 10000 return 0
djaodjin-saas
positive
def main(): cookiecutter_orig_path = Path(__file__).parent / 'plugins' / 'search_template' assert cookiecutter_orig_path.is_dir(), f'No such directory -> {cookiecutter_orig_path}' def get_logo(plugin_name) -> Optional[Path]: """Get the corresponding logo or None if the latter is not found.""" path_to_logos = Path(__file__).parent / 'ddgr_logos' all_logos = [str(p) for p in path_to_logos.iterdir()] r = re.compile(f'{str(path_to_logos / get_plugin_name_wo_search(plugin_name))}\\.[png\\|jpg\\|svg]') matching_logos = list(filter(r.search, all_logos)) if len(matching_logos): logo_path = Path(matching_logos[0]) else: logo_path = Path(__file__).parent / 'ddgr_logos' / 'default.svg' return logo_path def get_output_dir(plugin_name) -> Path: """Get the output directory for the plugin at hand.""" return Path(__file__).parent / 'plugins' / plugin_name oldpwd = Path('.').absolute() os.chdir(Path(__file__).parent) <DeepExtract> res = requests.get('https://raw.githubusercontent.com/jarun/googler/master/auto-completion/googler_at/googler_at') alias_lines = [l for l in res.text.splitlines() if 'alias' in l and (not l.lstrip().startswith('#'))] ddgr_plugins = {} for l in alias_lines: (plugin_name, ddgr_at, trigger) = parse_ddgr_at_line(l) if trigger is None: continue plugin_name = '_'.join(['search', plugin_name]) ddgr_plugins[plugin_name] = {'ddgr_at': ddgr_at, 'trigger': trigger} if generate_plugins_only_for: ddgr_plugins = {g[0]: g[1] for g in ddgr_plugins.items() if get_plugin_name_wo_search(g[0]) in generate_plugins_only_for} plugins = ddgr_plugins </DeepExtract> plugins.update(custom_plugins) for plugin in plugins.items(): plugin_name = plugin[0] trigger = plugin[1]['trigger'] ddgr_at = plugin[1]['ddgr_at'] url_handler = plugin[1].get('url_handler', '') url_handler_description = plugin[1].get('url_handler_description', '') url_handler_check_cmd = plugin[1].get('url_handler_check_cmd', '') show_on_top_no_trigger = plugin[1].get('show_on_top_no_trigger', False) print() print('===============================================') print(f'Generating plugin -> {plugin_name}') print('===============================================') print() random_int = secrets.randbits(32) cookiecutter_tmp = PurePosixPath('/tmp') / f'albert-cookiecutter-{random_int}' shutil.copytree(cookiecutter_orig_path, cookiecutter_tmp) print(f'- Cookiecutter template directory -> {cookiecutter_tmp}') print(f'- Plugin output directory-> {get_output_dir(plugin_name)}') cookiecutter(template=str(cookiecutter_tmp), no_input=True, overwrite_if_exists=True, extra_context=get_cookiecutter_directives(plugin_name=plugin_name, trigger=trigger, ddgr_at=ddgr_at, url_handler=url_handler, url_handler_description=url_handler_description, url_handler_check_cmd=url_handler_check_cmd, show_on_top_no_trigger=show_on_top_no_trigger), output_dir=get_output_dir(plugin_name).parent) ext = get_logo(plugin_name).suffix shutil.copy(get_logo(plugin_name), get_output_dir(plugin_name) / f'{plugin_name}{ext}') os.chdir(oldpwd)
def main(): cookiecutter_orig_path = Path(__file__).parent / 'plugins' / 'search_template' assert cookiecutter_orig_path.is_dir(), f'No such directory -> {cookiecutter_orig_path}' def get_logo(plugin_name) -> Optional[Path]: """Get the corresponding logo or None if the latter is not found.""" path_to_logos = Path(__file__).parent / 'ddgr_logos' all_logos = [str(p) for p in path_to_logos.iterdir()] r = re.compile(f'{str(path_to_logos / get_plugin_name_wo_search(plugin_name))}\\.[png\\|jpg\\|svg]') matching_logos = list(filter(r.search, all_logos)) if len(matching_logos): logo_path = Path(matching_logos[0]) else: logo_path = Path(__file__).parent / 'ddgr_logos' / 'default.svg' return logo_path def get_output_dir(plugin_name) -> Path: """Get the output directory for the plugin at hand.""" return Path(__file__).parent / 'plugins' / plugin_name oldpwd = Path('.').absolute() os.chdir(Path(__file__).parent) res = requests.get('https://raw.githubusercontent.com/jarun/googler/master/auto-completion/googler_at/googler_at') alias_lines = [l for l in res.text.splitlines() if 'alias' in l and (not l.lstrip().startswith('#'))] ddgr_plugins = {} for l in alias_lines: (plugin_name, ddgr_at, trigger) = parse_ddgr_at_line(l) if trigger is None: continue plugin_name = '_'.join(['search', plugin_name]) ddgr_plugins[plugin_name] = {'ddgr_at': ddgr_at, 'trigger': trigger} if generate_plugins_only_for: ddgr_plugins = {g[0]: g[1] for g in ddgr_plugins.items() if get_plugin_name_wo_search(g[0]) in generate_plugins_only_for} plugins = ddgr_plugins plugins.update(custom_plugins) for plugin in plugins.items(): plugin_name = plugin[0] trigger = plugin[1]['trigger'] ddgr_at = plugin[1]['ddgr_at'] url_handler = plugin[1].get('url_handler', '') url_handler_description = plugin[1].get('url_handler_description', '') url_handler_check_cmd = plugin[1].get('url_handler_check_cmd', '') show_on_top_no_trigger = plugin[1].get('show_on_top_no_trigger', False) print() print('===============================================') print(f'Generating plugin -> {plugin_name}') print('===============================================') print() random_int = secrets.randbits(32) cookiecutter_tmp = PurePosixPath('/tmp') / f'albert-cookiecutter-{random_int}' shutil.copytree(cookiecutter_orig_path, cookiecutter_tmp) print(f'- Cookiecutter template directory -> {cookiecutter_tmp}') print(f'- Plugin output directory-> {get_output_dir(plugin_name)}') cookiecutter(template=str(cookiecutter_tmp), no_input=True, overwrite_if_exists=True, extra_context=get_cookiecutter_directives(plugin_name=plugin_name, trigger=trigger, ddgr_at=ddgr_at, url_handler=url_handler, url_handler_description=url_handler_description, url_handler_check_cmd=url_handler_check_cmd, show_on_top_no_trigger=show_on_top_no_trigger), output_dir=get_output_dir(plugin_name).parent) ext = get_logo(plugin_name).suffix shutil.copy(get_logo(plugin_name), get_output_dir(plugin_name) / f'{plugin_name}{ext}') os.chdir(oldpwd)
awesome-albert-plugins
positive
def store_tiles(self, tile_list): """Store tiles in the database and file store. 'tile_list' is a list of tile_contents objects. This method will create the corresponding database records and mark tiles for creation when the transaction commits. """ tile_record_list = [] for tile_contents in tile_list: <DeepExtract> self.collection.mark_tile_for_creation(tile_contents) tile_record = TileRecord(self.collection, self, tile_contents) </DeepExtract> tile_record_list.append(tile_record) return tile_record_list
def store_tiles(self, tile_list): """Store tiles in the database and file store. 'tile_list' is a list of tile_contents objects. This method will create the corresponding database records and mark tiles for creation when the transaction commits. """ tile_record_list = [] for tile_contents in tile_list: self.collection.mark_tile_for_creation(tile_contents) tile_record = TileRecord(self.collection, self, tile_contents) tile_record_list.append(tile_record) return tile_record_list
agdc
positive
def execAsr_i64(binary): <DeepExtract> rdKey = utilFunc.getRegKeyByStringKey(binary[27:32]) rnKey = utilFunc.getRegKeyByStringKey(binary[22:27]) immr = binary[10:16] imms = binary[16:22] rnVal = utilFunc.getRegValueByStringkey(binary[22:27], '0') (rdKey, rnKey, rnVal, immr, imms) = (rdKey, rnKey, rnVal, immr, imms) </DeepExtract> if imms == '111111': shiftVal = int(immr, 2) instr = 'ASR x' + str(rdKey) + ', x' + str(rnKey) + ', #' + str(shiftVal) rd = utilFunc.asr(rnVal, shiftVal) utilFunc.finalize(rdKey, rd, instr, '0')
def execAsr_i64(binary): rdKey = utilFunc.getRegKeyByStringKey(binary[27:32]) rnKey = utilFunc.getRegKeyByStringKey(binary[22:27]) immr = binary[10:16] imms = binary[16:22] rnVal = utilFunc.getRegValueByStringkey(binary[22:27], '0') (rdKey, rnKey, rnVal, immr, imms) = (rdKey, rnKey, rnVal, immr, imms) if imms == '111111': shiftVal = int(immr, 2) instr = 'ASR x' + str(rdKey) + ', x' + str(rnKey) + ', #' + str(shiftVal) rd = utilFunc.asr(rnVal, shiftVal) utilFunc.finalize(rdKey, rd, instr, '0')
ARMV8_Simulator
positive
def load_payload(self, payload, serializer=None): """Loads the encoded object. This function raises :class:`BadPayload` if the payload is not valid. The `serializer` parameter can be used to override the serializer stored on the class. The encoded payload is always byte based. """ if serializer is None: serializer = self.serializer is_text = self.is_text_serializer else: <DeepExtract> is_text = isinstance(serializer.dumps({}), text_type) </DeepExtract> try: if is_text: payload = payload.decode('utf-8') return serializer.loads(payload) except Exception as e: raise BadPayload('Could not load the payload because an exception occurred on unserializing the data', original_error=e)
def load_payload(self, payload, serializer=None): """Loads the encoded object. This function raises :class:`BadPayload` if the payload is not valid. The `serializer` parameter can be used to override the serializer stored on the class. The encoded payload is always byte based. """ if serializer is None: serializer = self.serializer is_text = self.is_text_serializer else: is_text = isinstance(serializer.dumps({}), text_type) try: if is_text: payload = payload.decode('utf-8') return serializer.loads(payload) except Exception as e: raise BadPayload('Could not load the payload because an exception occurred on unserializing the data', original_error=e)
appengine-toolkit
positive
def one_variation_unit_removed(unit_type): if unit_type == UnitType.alias: <DeepExtract> self.nb_variation_units -= 1 if self.nb_variation_units < 0: raise InvalidStatsState('Tried to decrement statistics for variations of ' + 'unit declarations below 0.') self.nb_variation_aliases -= 1 if self.nb_variation_aliases < 0: raise InvalidStatsState('Tried to decrement statistics for variations of ' + 'alias declarations below 0.') </DeepExtract> elif unit_type == UnitType.slot: <DeepExtract> self.nb_variation_units -= 1 if self.nb_variation_units < 0: raise InvalidStatsState('Tried to decrement statistics for variations of ' + 'unit declarations below 0.') self.nb_variation_slots -= 1 if self.nb_variation_slots < 0: raise InvalidStatsState('Tried to decrement statistics for variations of ' + 'slot declarations below 0.') </DeepExtract> elif unit_type == UnitType.intent: <DeepExtract> self.nb_variation_units -= 1 if self.nb_variation_units < 0: raise InvalidStatsState('Tried to decrement statistics for variations of ' + 'unit declarations below 0.') self.nb_variation_intents -= 1 if self.nb_variation_intents < 0: raise InvalidStatsState('Tried to decrement statistics for variations of ' + 'intent declarations below 0.') </DeepExtract> else: raise TypeError('Tried to increase the statistics for a variation of ' + 'unit declarations with an unknown unit type (' + str(unit_type) + ').')
def one_variation_unit_removed(unit_type): if unit_type == UnitType.alias: self.nb_variation_units -= 1 if self.nb_variation_units < 0: raise InvalidStatsState('Tried to decrement statistics for variations of ' + 'unit declarations below 0.') self.nb_variation_aliases -= 1 if self.nb_variation_aliases < 0: raise InvalidStatsState('Tried to decrement statistics for variations of ' + 'alias declarations below 0.') elif unit_type == UnitType.slot: self.nb_variation_units -= 1 if self.nb_variation_units < 0: raise InvalidStatsState('Tried to decrement statistics for variations of ' + 'unit declarations below 0.') self.nb_variation_slots -= 1 if self.nb_variation_slots < 0: raise InvalidStatsState('Tried to decrement statistics for variations of ' + 'slot declarations below 0.') elif unit_type == UnitType.intent: self.nb_variation_units -= 1 if self.nb_variation_units < 0: raise InvalidStatsState('Tried to decrement statistics for variations of ' + 'unit declarations below 0.') self.nb_variation_intents -= 1 if self.nb_variation_intents < 0: raise InvalidStatsState('Tried to decrement statistics for variations of ' + 'intent declarations below 0.') else: raise TypeError('Tried to increase the statistics for a variation of ' + 'unit declarations with an unknown unit type (' + str(unit_type) + ').')
Chatette
positive
def point_target(proposals_list, valid_flag_list, gt_bboxes_list, img_metas, cfg, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, sampling=True, unmap_outputs=True): """Compute corresponding GT box and classification targets for proposals. Args: points_list (list[list]): Multi level points of each image. valid_flag_list (list[list]): Multi level valid flags of each image. gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. cfg (dict): train sample configs. Returns: tuple """ num_imgs = len(img_metas) assert len(proposals_list) == len(valid_flag_list) == num_imgs num_level_proposals = [points.size(0) for points in proposals_list[0]] for i in range(num_imgs): assert len(proposals_list[i]) == len(valid_flag_list[i]) proposals_list[i] = torch.cat(proposals_list[i]) valid_flag_list[i] = torch.cat(valid_flag_list[i]) if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] (all_labels, all_label_weights, all_bbox_gt, all_proposals, all_proposal_weights, pos_inds_list, neg_inds_list) = multi_apply(point_target_single, proposals_list, valid_flag_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, cfg=cfg, label_channels=label_channels, sampling=sampling, unmap_outputs=unmap_outputs) if any([labels is None for labels in all_labels]): return None num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) <DeepExtract> all_labels = torch.stack(all_labels, 0) level_targets = [] start = 0 for n in num_level_proposals: end = start + n level_targets.append(all_labels[:, start:end].squeeze(0)) start = end labels_list = level_targets </DeepExtract> <DeepExtract> all_label_weights = torch.stack(all_label_weights, 0) level_targets = [] start = 0 for n in num_level_proposals: end = start + n level_targets.append(all_label_weights[:, start:end].squeeze(0)) start = end label_weights_list = level_targets </DeepExtract> <DeepExtract> all_bbox_gt = torch.stack(all_bbox_gt, 0) level_targets = [] start = 0 for n in num_level_proposals: end = start + n level_targets.append(all_bbox_gt[:, start:end].squeeze(0)) start = end bbox_gt_list = level_targets </DeepExtract> <DeepExtract> all_proposals = torch.stack(all_proposals, 0) level_targets = [] start = 0 for n in num_level_proposals: end = start + n level_targets.append(all_proposals[:, start:end].squeeze(0)) start = end proposals_list = level_targets </DeepExtract> <DeepExtract> all_proposal_weights = torch.stack(all_proposal_weights, 0) level_targets = [] start = 0 for n in num_level_proposals: end = start + n level_targets.append(all_proposal_weights[:, start:end].squeeze(0)) start = end proposal_weights_list = level_targets </DeepExtract> return (labels_list, label_weights_list, bbox_gt_list, proposals_list, proposal_weights_list, num_total_pos, num_total_neg)
def point_target(proposals_list, valid_flag_list, gt_bboxes_list, img_metas, cfg, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, sampling=True, unmap_outputs=True): """Compute corresponding GT box and classification targets for proposals. Args: points_list (list[list]): Multi level points of each image. valid_flag_list (list[list]): Multi level valid flags of each image. gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. cfg (dict): train sample configs. Returns: tuple """ num_imgs = len(img_metas) assert len(proposals_list) == len(valid_flag_list) == num_imgs num_level_proposals = [points.size(0) for points in proposals_list[0]] for i in range(num_imgs): assert len(proposals_list[i]) == len(valid_flag_list[i]) proposals_list[i] = torch.cat(proposals_list[i]) valid_flag_list[i] = torch.cat(valid_flag_list[i]) if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] (all_labels, all_label_weights, all_bbox_gt, all_proposals, all_proposal_weights, pos_inds_list, neg_inds_list) = multi_apply(point_target_single, proposals_list, valid_flag_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, cfg=cfg, label_channels=label_channels, sampling=sampling, unmap_outputs=unmap_outputs) if any([labels is None for labels in all_labels]): return None num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) all_labels = torch.stack(all_labels, 0) level_targets = [] start = 0 for n in num_level_proposals: end = start + n level_targets.append(all_labels[:, start:end].squeeze(0)) start = end labels_list = level_targets all_label_weights = torch.stack(all_label_weights, 0) level_targets = [] start = 0 for n in num_level_proposals: end = start + n level_targets.append(all_label_weights[:, start:end].squeeze(0)) start = end label_weights_list = level_targets all_bbox_gt = torch.stack(all_bbox_gt, 0) level_targets = [] start = 0 for n in num_level_proposals: end = start + n level_targets.append(all_bbox_gt[:, start:end].squeeze(0)) start = end bbox_gt_list = level_targets all_proposals = torch.stack(all_proposals, 0) level_targets = [] start = 0 for n in num_level_proposals: end = start + n level_targets.append(all_proposals[:, start:end].squeeze(0)) start = end proposals_list = level_targets all_proposal_weights = torch.stack(all_proposal_weights, 0) level_targets = [] start = 0 for n in num_level_proposals: end = start + n level_targets.append(all_proposal_weights[:, start:end].squeeze(0)) start = end proposal_weights_list = level_targets return (labels_list, label_weights_list, bbox_gt_list, proposals_list, proposal_weights_list, num_total_pos, num_total_neg)
DNL-Object-Detection
positive
def create_vm(self): """Creates and starts a VM on Provider""" chars = string.letters + string.digits while True: random_string = ''.join([random.choice(chars) for i in range(8)]) vm_name = 'celerydworker%s' % random_string <DeepExtract> images = self.conn.list_images() for image in images: if self.image_id == unicode(image.id): node_image = image </DeepExtract> try: new_vm = self.conn.create_node(name=vm_name, image=node_image, size=self.sizes[0]) break except Exception as e: if 'Server name already in use' in e.args[0]: pass in_band_worker_node = InBandWorkerNode(instance_id=new_vm.id, provider=self) in_band_worker_node.save() start_celeyrd_on_vm(in_band_worker_node.pk) return in_band_worker_node
def create_vm(self): """Creates and starts a VM on Provider""" chars = string.letters + string.digits while True: random_string = ''.join([random.choice(chars) for i in range(8)]) vm_name = 'celerydworker%s' % random_string images = self.conn.list_images() for image in images: if self.image_id == unicode(image.id): node_image = image try: new_vm = self.conn.create_node(name=vm_name, image=node_image, size=self.sizes[0]) break except Exception as e: if 'Server name already in use' in e.args[0]: pass in_band_worker_node = InBandWorkerNode(instance_id=new_vm.id, provider=self) in_band_worker_node.save() start_celeyrd_on_vm(in_band_worker_node.pk) return in_band_worker_node
CeleryManagement
positive
def cube_linf_attack(model, x, y, corr_classified, eps, n_iters, p_init, metrics_path): """ A simple, but efficient black-box attack that just adds random steps of values in {-2eps, 0, 2eps} (i.e., the considered points are always corners). Note that considering just {-eps, 0, eps} works terribly. The random change is added if the loss decreases for a particular point. The only disadvantage of this method is that it will never find decision regions inside the Linf-ball which do not intersect any corner. But tight LRTE suggests that this doesn't happen. `f` is any function that has f.fmargin() method that returns class scores. `eps` can be a scalar or a vector of size X.shape[0]. """ spatial = True tied_delta = True tied_colors = False numpy.random.seed(0) (min_val, max_val) = (0, 1 if eps < 1 else 255) (c, h, w) = x.shape[1:] n_features = c * h * w n_ex_total = x.shape[0] x_best = numpy.clip(x + numpy.random.choice([-eps, eps], size=[x.shape[0], c, 1, w]), min_val, max_val) margin_min = model.fmargin(x_best, y) n_queries = numpy.ones(x.shape[0]) time_start = time.time() s_init = int(numpy.sqrt(p_init * n_features / c)) metrics = numpy.zeros([n_iters, 7]) for i_iter in range(n_iters): idx_to_fool = numpy.array(list(range(x.shape[0]))) (x_curr, x_best_curr) = (x[idx_to_fool], x_best[idx_to_fool]) (y_curr, margin_min_curr) = (y[idx_to_fool], margin_min[idx_to_fool]) if spatial: <DeepExtract> if 10 < i_iter <= 50: p = p_init / 2 elif 50 < i_iter <= 200: p = p_init / 4 elif 200 < i_iter <= 500: p = p_init / 8 elif 500 < i_iter <= 10000: p = p_init / 16 elif 10000 < i_iter <= 15000: p = p_init / 32 elif 15000 < i_iter <= 20000: p = p_init / 64 elif 20000 < i_iter: p = 0 else: p = p_init p = p </DeepExtract> s = max(int(round(numpy.sqrt(p * n_features / c))), 1) center_h = numpy.random.randint(0, h - s) center_w = numpy.random.randint(0, w - s) new_deltas = numpy.zeros(x_curr.shape[1:]) size = [1, 1, 1] if tied_delta and tied_colors else [c, 1, 1] if tied_delta else [c, s, s] new_deltas[:, center_h:center_h + s, center_w:center_w + s] = numpy.random.choice([-2 * eps, 2 * eps], size=size) hps_str = 'p={} s={}->{}'.format(p_init, s_init, s) else: <DeepExtract> if 10 < i_iter <= 50: p = p_init / 2 elif 50 < i_iter <= 200: p = p_init / 4 elif 200 < i_iter <= 500: p = p_init / 8 elif 500 < i_iter <= 10000: p = p_init / 16 elif 10000 < i_iter <= 15000: p = p_init / 32 elif 15000 < i_iter <= 20000: p = p_init / 64 elif 20000 < i_iter: p = 0 else: p = p_init p = p </DeepExtract> new_deltas = numpy.random.choice([-2 * eps, 0, 2 * eps], p=[p / 2, 1 - p, p / 2], size=[1, *x_curr.shape[1:]]) hps_str = 'p={}->{}'.format(p_init, p) x_new = x_best_curr + new_deltas x_new = numpy.clip(x_new, x_curr - eps, x_curr + eps) x_new = numpy.clip(x_new, min_val, max_val) margin = model.fmargin(x_new, y_curr) idx_improved = margin < margin_min_curr margin_min[idx_to_fool] = idx_improved * margin + ~idx_improved * margin_min_curr idx_improved = numpy.reshape(idx_improved, [-1, *[1] * len(x.shape[:-1])]) x_best[idx_to_fool] = idx_improved * x_new + ~idx_improved * x_best_curr n_queries[idx_to_fool] += 1 acc = (margin_min > 0.0).sum() / n_ex_total acc_corr = (margin_min > 0.0).mean() (mean_nq, mean_nq_ae, median_nq) = (numpy.mean(n_queries), numpy.mean(n_queries[margin_min <= 0]), numpy.median(n_queries)) time_total = time.time() - time_start print('{}: marign_min={:.2} acc={:.2%} acc_corr={:.2%} avg#q={:.2f} avg#q_ae={:.2f} med#q={:.1f} ({}, n_ex={}, eps={:.3f}, {:.2f}s)'.format(i_iter + 1, numpy.mean(margin_min), acc, acc_corr, mean_nq, mean_nq_ae, median_nq, hps_str, x.shape[0], eps, time_total)) metrics[i_iter] = [acc, acc_corr, mean_nq, mean_nq_ae, median_nq, margin_min.mean(), time_total] return (n_queries, x_best)
def cube_linf_attack(model, x, y, corr_classified, eps, n_iters, p_init, metrics_path): """ A simple, but efficient black-box attack that just adds random steps of values in {-2eps, 0, 2eps} (i.e., the considered points are always corners). Note that considering just {-eps, 0, eps} works terribly. The random change is added if the loss decreases for a particular point. The only disadvantage of this method is that it will never find decision regions inside the Linf-ball which do not intersect any corner. But tight LRTE suggests that this doesn't happen. `f` is any function that has f.fmargin() method that returns class scores. `eps` can be a scalar or a vector of size X.shape[0]. """ spatial = True tied_delta = True tied_colors = False numpy.random.seed(0) (min_val, max_val) = (0, 1 if eps < 1 else 255) (c, h, w) = x.shape[1:] n_features = c * h * w n_ex_total = x.shape[0] x_best = numpy.clip(x + numpy.random.choice([-eps, eps], size=[x.shape[0], c, 1, w]), min_val, max_val) margin_min = model.fmargin(x_best, y) n_queries = numpy.ones(x.shape[0]) time_start = time.time() s_init = int(numpy.sqrt(p_init * n_features / c)) metrics = numpy.zeros([n_iters, 7]) for i_iter in range(n_iters): idx_to_fool = numpy.array(list(range(x.shape[0]))) (x_curr, x_best_curr) = (x[idx_to_fool], x_best[idx_to_fool]) (y_curr, margin_min_curr) = (y[idx_to_fool], margin_min[idx_to_fool]) if spatial: if 10 < i_iter <= 50: p = p_init / 2 elif 50 < i_iter <= 200: p = p_init / 4 elif 200 < i_iter <= 500: p = p_init / 8 elif 500 < i_iter <= 10000: p = p_init / 16 elif 10000 < i_iter <= 15000: p = p_init / 32 elif 15000 < i_iter <= 20000: p = p_init / 64 elif 20000 < i_iter: p = 0 else: p = p_init p = p s = max(int(round(numpy.sqrt(p * n_features / c))), 1) center_h = numpy.random.randint(0, h - s) center_w = numpy.random.randint(0, w - s) new_deltas = numpy.zeros(x_curr.shape[1:]) size = [1, 1, 1] if tied_delta and tied_colors else [c, 1, 1] if tied_delta else [c, s, s] new_deltas[:, center_h:center_h + s, center_w:center_w + s] = numpy.random.choice([-2 * eps, 2 * eps], size=size) hps_str = 'p={} s={}->{}'.format(p_init, s_init, s) else: if 10 < i_iter <= 50: p = p_init / 2 elif 50 < i_iter <= 200: p = p_init / 4 elif 200 < i_iter <= 500: p = p_init / 8 elif 500 < i_iter <= 10000: p = p_init / 16 elif 10000 < i_iter <= 15000: p = p_init / 32 elif 15000 < i_iter <= 20000: p = p_init / 64 elif 20000 < i_iter: p = 0 else: p = p_init p = p new_deltas = numpy.random.choice([-2 * eps, 0, 2 * eps], p=[p / 2, 1 - p, p / 2], size=[1, *x_curr.shape[1:]]) hps_str = 'p={}->{}'.format(p_init, p) x_new = x_best_curr + new_deltas x_new = numpy.clip(x_new, x_curr - eps, x_curr + eps) x_new = numpy.clip(x_new, min_val, max_val) margin = model.fmargin(x_new, y_curr) idx_improved = margin < margin_min_curr margin_min[idx_to_fool] = idx_improved * margin + ~idx_improved * margin_min_curr idx_improved = numpy.reshape(idx_improved, [-1, *[1] * len(x.shape[:-1])]) x_best[idx_to_fool] = idx_improved * x_new + ~idx_improved * x_best_curr n_queries[idx_to_fool] += 1 acc = (margin_min > 0.0).sum() / n_ex_total acc_corr = (margin_min > 0.0).mean() (mean_nq, mean_nq_ae, median_nq) = (numpy.mean(n_queries), numpy.mean(n_queries[margin_min <= 0]), numpy.median(n_queries)) time_total = time.time() - time_start print('{}: marign_min={:.2} acc={:.2%} acc_corr={:.2%} avg#q={:.2f} avg#q_ae={:.2f} med#q={:.1f} ({}, n_ex={}, eps={:.3f}, {:.2f}s)'.format(i_iter + 1, numpy.mean(margin_min), acc, acc_corr, mean_nq, mean_nq_ae, median_nq, hps_str, x.shape[0], eps, time_total)) metrics[i_iter] = [acc, acc_corr, mean_nq, mean_nq_ae, median_nq, margin_min.mean(), time_total] return (n_queries, x_best)
confidence-calibrated-adversarial-training
positive
def _step1_forward(z_nodes, adjacency, ldj, reverse, ldj_per_layer, **kwargs): if not reverse: <DeepExtract> layer_res = self.node_encoding(z_nodes, reverse=reverse, **kwargs) if len(layer_res) == 2: (z_nodes, layer_ldj) = layer_res detailed_layer_ldj = layer_ldj elif len(layer_res) == 3: (z_nodes, layer_ldj, detailed_layer_ldj) = layer_res if ldj_per_layer is not None: ldj_per_layer.append(detailed_layer_ldj) (z_nodes, ldj) = (z_nodes, ldj + layer_ldj) </DeepExtract> for flow in self.step1_flows: <DeepExtract> layer_res = flow(z_nodes, reverse=reverse, **kwargs) if len(layer_res) == 2: (z_nodes, layer_ldj) = layer_res detailed_layer_ldj = layer_ldj elif len(layer_res) == 3: (z_nodes, layer_ldj, detailed_layer_ldj) = layer_res if ldj_per_layer is not None: ldj_per_layer.append(detailed_layer_ldj) (z_nodes, ldj) = (z_nodes, ldj + layer_ldj) </DeepExtract> else: for flow in reversed(self.step1_flows): <DeepExtract> layer_res = flow(z_nodes, reverse=reverse, **kwargs) if len(layer_res) == 2: (z_nodes, layer_ldj) = layer_res detailed_layer_ldj = layer_ldj elif len(layer_res) == 3: (z_nodes, layer_ldj, detailed_layer_ldj) = layer_res if ldj_per_layer is not None: ldj_per_layer.append(detailed_layer_ldj) (z_nodes, ldj) = (z_nodes, ldj + layer_ldj) </DeepExtract> <DeepExtract> layer_res = self.node_encoding(z_nodes, reverse=reverse, **kwargs) if len(layer_res) == 2: (z_nodes, layer_ldj) = layer_res detailed_layer_ldj = layer_ldj elif len(layer_res) == 3: (z_nodes, layer_ldj, detailed_layer_ldj) = layer_res if ldj_per_layer is not None: ldj_per_layer.append(detailed_layer_ldj) (z_nodes, ldj) = (z_nodes, ldj + layer_ldj) </DeepExtract> return (z_nodes, ldj)
def _step1_forward(z_nodes, adjacency, ldj, reverse, ldj_per_layer, **kwargs): if not reverse: layer_res = self.node_encoding(z_nodes, reverse=reverse, **kwargs) if len(layer_res) == 2: (z_nodes, layer_ldj) = layer_res detailed_layer_ldj = layer_ldj elif len(layer_res) == 3: (z_nodes, layer_ldj, detailed_layer_ldj) = layer_res if ldj_per_layer is not None: ldj_per_layer.append(detailed_layer_ldj) (z_nodes, ldj) = (z_nodes, ldj + layer_ldj) for flow in self.step1_flows: layer_res = flow(z_nodes, reverse=reverse, **kwargs) if len(layer_res) == 2: (z_nodes, layer_ldj) = layer_res detailed_layer_ldj = layer_ldj elif len(layer_res) == 3: (z_nodes, layer_ldj, detailed_layer_ldj) = layer_res if ldj_per_layer is not None: ldj_per_layer.append(detailed_layer_ldj) (z_nodes, ldj) = (z_nodes, ldj + layer_ldj) else: for flow in reversed(self.step1_flows): layer_res = flow(z_nodes, reverse=reverse, **kwargs) if len(layer_res) == 2: (z_nodes, layer_ldj) = layer_res detailed_layer_ldj = layer_ldj elif len(layer_res) == 3: (z_nodes, layer_ldj, detailed_layer_ldj) = layer_res if ldj_per_layer is not None: ldj_per_layer.append(detailed_layer_ldj) (z_nodes, ldj) = (z_nodes, ldj + layer_ldj) layer_res = self.node_encoding(z_nodes, reverse=reverse, **kwargs) if len(layer_res) == 2: (z_nodes, layer_ldj) = layer_res detailed_layer_ldj = layer_ldj elif len(layer_res) == 3: (z_nodes, layer_ldj, detailed_layer_ldj) = layer_res if ldj_per_layer is not None: ldj_per_layer.append(detailed_layer_ldj) (z_nodes, ldj) = (z_nodes, ldj + layer_ldj) return (z_nodes, ldj)
CategoricalNF
positive
def _get_relations_hierarchy(*relations): """Return the relations hierarchy of some relations.""" hierarchy = {} def _fill_hierarchy(hierarchy, *relation_parts): root = relation_parts[0] nest = relation_parts[1:] hierarchy.setdefault(root, {'included': False, 'relations': {}}) if nest: <DeepExtract> root = relation_parts[0] nest = relation_parts[1:] hierarchy[root]['relations'].setdefault(root, {'included': False, 'relations': {}}) if nest: _fill_hierarchy(hierarchy[root]['relations'][root]['relations'], *nest) else: hierarchy[root]['relations'][root]['included'] = True </DeepExtract> else: hierarchy[root]['included'] = True for relation in relations: parts = relation.split(LOOKUP_SEP) <DeepExtract> root = relation_parts[0] nest = relation_parts[1:] hierarchy.setdefault(root, {'included': False, 'relations': {}}) if nest: _fill_hierarchy(hierarchy[root]['relations'], *nest) else: hierarchy[root]['included'] = True </DeepExtract> return hierarchy
def _get_relations_hierarchy(*relations): """Return the relations hierarchy of some relations.""" hierarchy = {} def _fill_hierarchy(hierarchy, *relation_parts): root = relation_parts[0] nest = relation_parts[1:] hierarchy.setdefault(root, {'included': False, 'relations': {}}) if nest: root = relation_parts[0] nest = relation_parts[1:] hierarchy[root]['relations'].setdefault(root, {'included': False, 'relations': {}}) if nest: _fill_hierarchy(hierarchy[root]['relations'][root]['relations'], *nest) else: hierarchy[root]['relations'][root]['included'] = True else: hierarchy[root]['included'] = True for relation in relations: parts = relation.split(LOOKUP_SEP) root = relation_parts[0] nest = relation_parts[1:] hierarchy.setdefault(root, {'included': False, 'relations': {}}) if nest: _fill_hierarchy(hierarchy[root]['relations'], *nest) else: hierarchy[root]['included'] = True return hierarchy
django-translations
positive
def __getitem__(self, key): """ Allows accessing children via index :param key: Integer index of child """ if self.children is None: <DeepExtract> cls = self.__class__ if self._contents is None: if self._fields: self.children = [VOID] * len(self._fields) for (index, (_, _, params)) in enumerate(self._fields): if 'default' in params: if cls._precomputed_specs[index]: (field_name, field_spec, value_spec, field_params, _) = cls._precomputed_specs[index] else: (field_name, field_spec, value_spec, field_params, _) = self._determine_spec(index) self.children[index] = self._make_value(field_name, field_spec, value_spec, field_params, None) return try: self.children = [] contents_length = len(self._contents) child_pointer = 0 field = 0 field_len = len(self._fields) parts = None again = child_pointer < contents_length while again: if parts is None: (parts, child_pointer) = _parse(self._contents, contents_length, pointer=child_pointer) again = child_pointer < contents_length if field < field_len: (_, field_spec, value_spec, field_params, spec_override) = cls._precomputed_specs[field] or self._determine_spec(field) if field_params and ('optional' in field_params or 'default' in field_params): if self._field_ids[field] != (parts[0], parts[2]) and field_spec != Any: choice_match = False if issubclass(field_spec, Choice): try: tester = field_spec(**field_params) tester.validate(parts[0], parts[2], parts[4]) choice_match = True except ValueError: pass if not choice_match: if 'optional' in field_params: self.children.append(VOID) else: self.children.append(field_spec(**field_params)) field += 1 again = True continue if field_spec is None or (spec_override and issubclass(field_spec, Any)): field_spec = value_spec spec_override = None if spec_override: child = parts + (field_spec, field_params, value_spec) else: child = parts + (field_spec, field_params) elif field_len > 0 and field + 1 <= field_len: missed_fields = [] prev_field = field - 1 while prev_field >= 0: prev_field_info = self._fields[prev_field] if len(prev_field_info) < 3: break if 'optional' in prev_field_info[2] or 'default' in prev_field_info[2]: missed_fields.append(prev_field_info[0]) prev_field -= 1 plural = 's' if len(missed_fields) > 1 else '' missed_field_names = ', '.join(missed_fields) raise ValueError(unwrap('\n Data for field %s (%s class, %s method, tag %s) does\n not match the field definition%s of %s\n ', field + 1, CLASS_NUM_TO_NAME_MAP.get(parts[0]), METHOD_NUM_TO_NAME_MAP.get(parts[1]), parts[2], plural, missed_field_names)) else: child = parts if recurse: child = _build(*child) if isinstance(child, (Sequence, SequenceOf)): child._parse_children(recurse=True) self.children.append(child) field += 1 parts = None index = len(self.children) while index < field_len: (name, field_spec, field_params) = self._fields[index] if 'default' in field_params: self.children.append(field_spec(**field_params)) elif 'optional' in field_params: self.children.append(VOID) else: raise ValueError(unwrap('\n Field "%s" is missing from structure\n ', name)) index += 1 except (ValueError, TypeError) as e: self.children = None args = e.args[1:] e.args = (e.args[0] + '\n while parsing %s' % type_name(self),) + args raise e </DeepExtract> return self._lazy_child(key)
def __getitem__(self, key): """ Allows accessing children via index :param key: Integer index of child """ if self.children is None: cls = self.__class__ if self._contents is None: if self._fields: self.children = [VOID] * len(self._fields) for (index, (_, _, params)) in enumerate(self._fields): if 'default' in params: if cls._precomputed_specs[index]: (field_name, field_spec, value_spec, field_params, _) = cls._precomputed_specs[index] else: (field_name, field_spec, value_spec, field_params, _) = self._determine_spec(index) self.children[index] = self._make_value(field_name, field_spec, value_spec, field_params, None) return try: self.children = [] contents_length = len(self._contents) child_pointer = 0 field = 0 field_len = len(self._fields) parts = None again = child_pointer < contents_length while again: if parts is None: (parts, child_pointer) = _parse(self._contents, contents_length, pointer=child_pointer) again = child_pointer < contents_length if field < field_len: (_, field_spec, value_spec, field_params, spec_override) = cls._precomputed_specs[field] or self._determine_spec(field) if field_params and ('optional' in field_params or 'default' in field_params): if self._field_ids[field] != (parts[0], parts[2]) and field_spec != Any: choice_match = False if issubclass(field_spec, Choice): try: tester = field_spec(**field_params) tester.validate(parts[0], parts[2], parts[4]) choice_match = True except ValueError: pass if not choice_match: if 'optional' in field_params: self.children.append(VOID) else: self.children.append(field_spec(**field_params)) field += 1 again = True continue if field_spec is None or (spec_override and issubclass(field_spec, Any)): field_spec = value_spec spec_override = None if spec_override: child = parts + (field_spec, field_params, value_spec) else: child = parts + (field_spec, field_params) elif field_len > 0 and field + 1 <= field_len: missed_fields = [] prev_field = field - 1 while prev_field >= 0: prev_field_info = self._fields[prev_field] if len(prev_field_info) < 3: break if 'optional' in prev_field_info[2] or 'default' in prev_field_info[2]: missed_fields.append(prev_field_info[0]) prev_field -= 1 plural = 's' if len(missed_fields) > 1 else '' missed_field_names = ', '.join(missed_fields) raise ValueError(unwrap('\n Data for field %s (%s class, %s method, tag %s) does\n not match the field definition%s of %s\n ', field + 1, CLASS_NUM_TO_NAME_MAP.get(parts[0]), METHOD_NUM_TO_NAME_MAP.get(parts[1]), parts[2], plural, missed_field_names)) else: child = parts if recurse: child = _build(*child) if isinstance(child, (Sequence, SequenceOf)): child._parse_children(recurse=True) self.children.append(child) field += 1 parts = None index = len(self.children) while index < field_len: (name, field_spec, field_params) = self._fields[index] if 'default' in field_params: self.children.append(field_spec(**field_params)) elif 'optional' in field_params: self.children.append(VOID) else: raise ValueError(unwrap('\n Field "%s" is missing from structure\n ', name)) index += 1 except (ValueError, TypeError) as e: self.children = None args = e.args[1:] e.args = (e.args[0] + '\n while parsing %s' % type_name(self),) + args raise e return self._lazy_child(key)
asn1crypto
positive
def get_object_from_reg(self, obj_addr): <DeepExtract> hexa = '{0:#0{1}x}'.format(obj_addr, 8) a1 = hexa[2:4] a2 = hexa[4:6] a3 = hexa[6:8] a32 = a3 + a2 address_structured = str(int(a32, 16)) + '_' + str(int(a1, 16)) </DeepExtract> if address_structured in self.registers: return self.registers[address_structured] else: return None
def get_object_from_reg(self, obj_addr): hexa = '{0:#0{1}x}'.format(obj_addr, 8) a1 = hexa[2:4] a2 = hexa[4:6] a3 = hexa[6:8] a32 = a3 + a2 address_structured = str(int(a32, 16)) + '_' + str(int(a1, 16)) if address_structured in self.registers: return self.registers[address_structured] else: return None
conpot
positive
def damage_masks(labels, shift=True, scale=True, rotate=True): """ Args: labels: numpy array (batch_size * 1 * h * w) """ (bs, _, h, w) = labels.size() labels = labels.permute(0, 2, 3, 1) labels = labels.numpy() final_label = [] for i in range(bs): label = labels[i] <DeepExtract> unique_labels = np.unique(label) unique_labels = np.setdiff1d(unique_labels, [0]) np.random.shuffle(unique_labels) damaged_labels = np.zeros_like(label) for l in unique_labels: obj_mask = label == l damaged_obj_mask = _damage_single_object_mask(obj_mask, shift, scale, rotate) damaged_labels[damaged_obj_mask] = l damaged_label = damaged_labels </DeepExtract> final_label.append(damaged_label) final_label = np.array(final_label) final_label = torch.from_numpy(final_label) final_label = final_label.permute(0, 3, 1, 2) return final_label
def damage_masks(labels, shift=True, scale=True, rotate=True): """ Args: labels: numpy array (batch_size * 1 * h * w) """ (bs, _, h, w) = labels.size() labels = labels.permute(0, 2, 3, 1) labels = labels.numpy() final_label = [] for i in range(bs): label = labels[i] unique_labels = np.unique(label) unique_labels = np.setdiff1d(unique_labels, [0]) np.random.shuffle(unique_labels) damaged_labels = np.zeros_like(label) for l in unique_labels: obj_mask = label == l damaged_obj_mask = _damage_single_object_mask(obj_mask, shift, scale, rotate) damaged_labels[damaged_obj_mask] = l damaged_label = damaged_labels final_label.append(damaged_label) final_label = np.array(final_label) final_label = torch.from_numpy(final_label) final_label = final_label.permute(0, 3, 1, 2) return final_label
CVPR2020_MANet
positive
def download(self): if self._check_integrity(): print('Files already downloaded and verified') return import re import hashlib import shutil from glob import glob from zipfile import ZipFile raw_dir = osp.join(self.root, 'raw') mkdir_if_missing(raw_dir) fpath = osp.join(raw_dir, 'Market-1501-v15.09.15.zip') if osp.isfile(fpath) and hashlib.md5(open(fpath, 'rb').read()).hexdigest() == self.md5: print('Using downloaded file: ' + fpath) else: raise RuntimeError('Please download the dataset manually from {} to {}'.format(self.url, fpath)) exdir = osp.join(raw_dir, 'Market-1501-v15.09.15') if not osp.isdir(exdir): print('Extracting zip file') with ZipFile(fpath) as z: z.extractall(path=raw_dir) images_dir = osp.join(self.root, 'images') mkdir_if_missing(images_dir) identities = [[[] for _ in range(6)] for _ in range(1502)] def register(subdir, pattern=re.compile('([-\\d]+)_c(\\d)')): fpaths = sorted(glob(osp.join(exdir, subdir, '*.jpg'))) pids = set() for fpath in fpaths: fname = osp.basename(fpath) (pid, cam) = map(int, pattern.search(fname).groups()) if pid == -1: continue assert 0 <= pid <= 1501 assert 1 <= cam <= 6 cam -= 1 pids.add(pid) fname = '{:08d}_{:02d}_{:04d}.jpg'.format(pid, cam, len(identities[pid][cam])) identities[pid][cam].append(fname) shutil.copy(fpath, osp.join(images_dir, fname)) return pids <DeepExtract> fpaths = sorted(glob(osp.join(exdir, 'bounding_box_train', '*.jpg'))) pids = set() for fpath in fpaths: fname = osp.basename(fpath) (pid, cam) = map(int, pattern.search(fname).groups()) if pid == -1: continue assert 0 <= pid <= 1501 assert 1 <= cam <= 6 cam -= 1 pids.add(pid) fname = '{:08d}_{:02d}_{:04d}.jpg'.format(pid, cam, len(identities[pid][cam])) identities[pid][cam].append(fname) shutil.copy(fpath, osp.join(images_dir, fname)) trainval_pids = pids </DeepExtract> <DeepExtract> fpaths = sorted(glob(osp.join(exdir, 'bounding_box_test', '*.jpg'))) pids = set() for fpath in fpaths: fname = osp.basename(fpath) (pid, cam) = map(int, pattern.search(fname).groups()) if pid == -1: continue assert 0 <= pid <= 1501 assert 1 <= cam <= 6 cam -= 1 pids.add(pid) fname = '{:08d}_{:02d}_{:04d}.jpg'.format(pid, cam, len(identities[pid][cam])) identities[pid][cam].append(fname) shutil.copy(fpath, osp.join(images_dir, fname)) gallery_pids = pids </DeepExtract> <DeepExtract> fpaths = sorted(glob(osp.join(exdir, 'query', '*.jpg'))) pids = set() for fpath in fpaths: fname = osp.basename(fpath) (pid, cam) = map(int, pattern.search(fname).groups()) if pid == -1: continue assert 0 <= pid <= 1501 assert 1 <= cam <= 6 cam -= 1 pids.add(pid) fname = '{:08d}_{:02d}_{:04d}.jpg'.format(pid, cam, len(identities[pid][cam])) identities[pid][cam].append(fname) shutil.copy(fpath, osp.join(images_dir, fname)) query_pids = pids </DeepExtract> assert query_pids <= gallery_pids assert trainval_pids.isdisjoint(gallery_pids) meta = {'name': 'New_Complete_Aicity_Car', 'shot': 'multiple', 'num_cameras': 41, 'identities': identities} write_json(meta, osp.join(self.root, 'meta.json')) splits = [{'trainval': sorted(list(trainval_pids)), 'query': sorted(list(query_pids)), 'gallery': sorted(list(gallery_pids))}] write_json(splits, osp.join(self.root, 'splits.json'))
def download(self): if self._check_integrity(): print('Files already downloaded and verified') return import re import hashlib import shutil from glob import glob from zipfile import ZipFile raw_dir = osp.join(self.root, 'raw') mkdir_if_missing(raw_dir) fpath = osp.join(raw_dir, 'Market-1501-v15.09.15.zip') if osp.isfile(fpath) and hashlib.md5(open(fpath, 'rb').read()).hexdigest() == self.md5: print('Using downloaded file: ' + fpath) else: raise RuntimeError('Please download the dataset manually from {} to {}'.format(self.url, fpath)) exdir = osp.join(raw_dir, 'Market-1501-v15.09.15') if not osp.isdir(exdir): print('Extracting zip file') with ZipFile(fpath) as z: z.extractall(path=raw_dir) images_dir = osp.join(self.root, 'images') mkdir_if_missing(images_dir) identities = [[[] for _ in range(6)] for _ in range(1502)] def register(subdir, pattern=re.compile('([-\\d]+)_c(\\d)')): fpaths = sorted(glob(osp.join(exdir, subdir, '*.jpg'))) pids = set() for fpath in fpaths: fname = osp.basename(fpath) (pid, cam) = map(int, pattern.search(fname).groups()) if pid == -1: continue assert 0 <= pid <= 1501 assert 1 <= cam <= 6 cam -= 1 pids.add(pid) fname = '{:08d}_{:02d}_{:04d}.jpg'.format(pid, cam, len(identities[pid][cam])) identities[pid][cam].append(fname) shutil.copy(fpath, osp.join(images_dir, fname)) return pids fpaths = sorted(glob(osp.join(exdir, 'bounding_box_train', '*.jpg'))) pids = set() for fpath in fpaths: fname = osp.basename(fpath) (pid, cam) = map(int, pattern.search(fname).groups()) if pid == -1: continue assert 0 <= pid <= 1501 assert 1 <= cam <= 6 cam -= 1 pids.add(pid) fname = '{:08d}_{:02d}_{:04d}.jpg'.format(pid, cam, len(identities[pid][cam])) identities[pid][cam].append(fname) shutil.copy(fpath, osp.join(images_dir, fname)) trainval_pids = pids fpaths = sorted(glob(osp.join(exdir, 'bounding_box_test', '*.jpg'))) pids = set() for fpath in fpaths: fname = osp.basename(fpath) (pid, cam) = map(int, pattern.search(fname).groups()) if pid == -1: continue assert 0 <= pid <= 1501 assert 1 <= cam <= 6 cam -= 1 pids.add(pid) fname = '{:08d}_{:02d}_{:04d}.jpg'.format(pid, cam, len(identities[pid][cam])) identities[pid][cam].append(fname) shutil.copy(fpath, osp.join(images_dir, fname)) gallery_pids = pids fpaths = sorted(glob(osp.join(exdir, 'query', '*.jpg'))) pids = set() for fpath in fpaths: fname = osp.basename(fpath) (pid, cam) = map(int, pattern.search(fname).groups()) if pid == -1: continue assert 0 <= pid <= 1501 assert 1 <= cam <= 6 cam -= 1 pids.add(pid) fname = '{:08d}_{:02d}_{:04d}.jpg'.format(pid, cam, len(identities[pid][cam])) identities[pid][cam].append(fname) shutil.copy(fpath, osp.join(images_dir, fname)) query_pids = pids assert query_pids <= gallery_pids assert trainval_pids.isdisjoint(gallery_pids) meta = {'name': 'New_Complete_Aicity_Car', 'shot': 'multiple', 'num_cameras': 41, 'identities': identities} write_json(meta, osp.join(self.root, 'meta.json')) splits = [{'trainval': sorted(list(trainval_pids)), 'query': sorted(list(query_pids)), 'gallery': sorted(list(gallery_pids))}] write_json(splits, osp.join(self.root, 'splits.json'))
AIC2020_ReID
positive
def resolve_constraints_by_exhaustive_search(self): """Solve all constraints by exploring the whole search space. This method iterates over ``self.iter_mutations_space()`` (space of all sequences that could be reached through successive mutations) and stops when it finds a sequence which meets all the constraints of the problem. """ <DeepExtract> focus_constraints = [c for c in self.constraints if c.is_focus] if len(focus_constraints) == 1: focus = focus_constraints[0] other_constraints = [c for c in self.constraints if not c.is_focus] (focus_constraint, other_constraints) = (focus, other_constraints) (focus_constraint, other_constraints) = (None, None) </DeepExtract> sequence_before = self.sequence all_variants = self.mutation_space.all_variants(self.sequence) space_size = int(self.mutation_space.space_size) self.logger(mutation__total=space_size) for variant in self.logger.iter_bar(mutation=all_variants): self.sequence = variant if focus_constraint is not None: if focus_constraint.evaluate(self).passes: if all((c.evaluate(self).passes for c in other_constraints)): self.logger(mutation__index=space_size) return elif self.all_constraints_pass(): self.logger(mutation__index=space_size) return self.sequence = sequence_before raise NoSolutionError('Exhaustive search failed to satisfy all constraints.', problem=self)
def resolve_constraints_by_exhaustive_search(self): """Solve all constraints by exploring the whole search space. This method iterates over ``self.iter_mutations_space()`` (space of all sequences that could be reached through successive mutations) and stops when it finds a sequence which meets all the constraints of the problem. """ focus_constraints = [c for c in self.constraints if c.is_focus] if len(focus_constraints) == 1: focus = focus_constraints[0] other_constraints = [c for c in self.constraints if not c.is_focus] (focus_constraint, other_constraints) = (focus, other_constraints) (focus_constraint, other_constraints) = (None, None) sequence_before = self.sequence all_variants = self.mutation_space.all_variants(self.sequence) space_size = int(self.mutation_space.space_size) self.logger(mutation__total=space_size) for variant in self.logger.iter_bar(mutation=all_variants): self.sequence = variant if focus_constraint is not None: if focus_constraint.evaluate(self).passes: if all((c.evaluate(self).passes for c in other_constraints)): self.logger(mutation__index=space_size) return elif self.all_constraints_pass(): self.logger(mutation__index=space_size) return self.sequence = sequence_before raise NoSolutionError('Exhaustive search failed to satisfy all constraints.', problem=self)
DnaChisel
positive