before
stringlengths
0
955k
after
stringlengths
0
877k
repo
stringlengths
1
74
type
stringclasses
1 value
def subsample(self, proposals, targets): """ This method performs the positive/negative sampling, and return the sampled proposals. Note: this function keeps a state. Arguments: proposals (list[BoxList]) targets (list[BoxList]) """ <DeepExtract> labels = [] keypoints = [] for (proposals_per_image, targets_per_image) in zip(proposals, targets): matched_targets = self.match_targets_to_proposals(proposals_per_image, targets_per_image) matched_idxs = matched_targets.get_field('matched_idxs') labels_per_image = matched_targets.get_field('labels') labels_per_image = labels_per_image.to(dtype=torch.int64) neg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD labels_per_image[neg_inds] = 0 keypoints_per_image = matched_targets.get_field('keypoints') within_box = _within_box(keypoints_per_image.keypoints, matched_targets.bbox) vis_kp = keypoints_per_image.keypoints[..., 2] > 0 is_visible = (within_box & vis_kp).sum(1) > 0 labels_per_image[~is_visible] = -1 labels.append(labels_per_image) keypoints.append(keypoints_per_image) (labels, keypoints) = (labels, keypoints) </DeepExtract> (sampled_pos_inds, sampled_neg_inds) = self.fg_bg_sampler(labels) proposals = list(proposals) for (labels_per_image, keypoints_per_image, proposals_per_image) in zip(labels, keypoints, proposals): proposals_per_image.add_field('labels', labels_per_image) proposals_per_image.add_field('keypoints', keypoints_per_image) for (img_idx, (pos_inds_img, neg_inds_img)) in enumerate(zip(sampled_pos_inds, sampled_neg_inds)): img_sampled_inds = torch.nonzero(pos_inds_img).squeeze(1) proposals_per_image = proposals[img_idx][img_sampled_inds] proposals[img_idx] = proposals_per_image self._proposals = proposals return proposals
def subsample(self, proposals, targets): """ This method performs the positive/negative sampling, and return the sampled proposals. Note: this function keeps a state. Arguments: proposals (list[BoxList]) targets (list[BoxList]) """ labels = [] keypoints = [] for (proposals_per_image, targets_per_image) in zip(proposals, targets): matched_targets = self.match_targets_to_proposals(proposals_per_image, targets_per_image) matched_idxs = matched_targets.get_field('matched_idxs') labels_per_image = matched_targets.get_field('labels') labels_per_image = labels_per_image.to(dtype=torch.int64) neg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD labels_per_image[neg_inds] = 0 keypoints_per_image = matched_targets.get_field('keypoints') within_box = _within_box(keypoints_per_image.keypoints, matched_targets.bbox) vis_kp = keypoints_per_image.keypoints[..., 2] > 0 is_visible = (within_box & vis_kp).sum(1) > 0 labels_per_image[~is_visible] = -1 labels.append(labels_per_image) keypoints.append(keypoints_per_image) (labels, keypoints) = (labels, keypoints) (sampled_pos_inds, sampled_neg_inds) = self.fg_bg_sampler(labels) proposals = list(proposals) for (labels_per_image, keypoints_per_image, proposals_per_image) in zip(labels, keypoints, proposals): proposals_per_image.add_field('labels', labels_per_image) proposals_per_image.add_field('keypoints', keypoints_per_image) for (img_idx, (pos_inds_img, neg_inds_img)) in enumerate(zip(sampled_pos_inds, sampled_neg_inds)): img_sampled_inds = torch.nonzero(pos_inds_img).squeeze(1) proposals_per_image = proposals[img_idx][img_sampled_inds] proposals[img_idx] = proposals_per_image self._proposals = proposals return proposals
dgmn
positive
def train(self, mode=True): """ If ``mode=True``, the method sets the module in training mode and discards the :attr:`~e2cnn.nn.R2Conv.filter` and :attr:`~e2cnn.nn.R2Conv.expanded_bias` attributes. If ``mode=False``, it sets the module in evaluation mode. Moreover, the method builds the filter and the bias using the current values of the trainable parameters and store them in :attr:`~e2cnn.nn.R2Conv.filter` and :attr:`~e2cnn.nn.R2Conv.expanded_bias` such that they are not recomputed at each forward pass. .. warning :: This behaviour can cause problems when storing the :meth:`~torch.nn.Module.state_dict` of a model while in a mode and lately loading it in a model with a different mode, as the attributes of this class change. To avoid this issue, we recommend converting the model to eval mode before storing or loading the state dictionary. Args: mode (bool, optional): whether to set training mode (``True``) or evaluation mode (``False``). Default: ``True``. """ if mode: if hasattr(self, 'filter'): del self.filter if hasattr(self, 'expanded_bias'): del self.expanded_bias elif self.training: <DeepExtract> _filter = self.basisexpansion(self.weights) _filter = _filter.reshape(_filter.shape[0], _filter.shape[1], self.kernel_size, self.kernel_size) if self.bias is None: _bias = None else: _bias = self.bias_expansion @ self.bias (_filter, _bias) = (_filter, _bias) </DeepExtract> self.register_buffer('filter', _filter) if _bias is not None: self.register_buffer('expanded_bias', _bias) else: self.expanded_bias = None return super(R2Conv, self).train(mode)
def train(self, mode=True): """ If ``mode=True``, the method sets the module in training mode and discards the :attr:`~e2cnn.nn.R2Conv.filter` and :attr:`~e2cnn.nn.R2Conv.expanded_bias` attributes. If ``mode=False``, it sets the module in evaluation mode. Moreover, the method builds the filter and the bias using the current values of the trainable parameters and store them in :attr:`~e2cnn.nn.R2Conv.filter` and :attr:`~e2cnn.nn.R2Conv.expanded_bias` such that they are not recomputed at each forward pass. .. warning :: This behaviour can cause problems when storing the :meth:`~torch.nn.Module.state_dict` of a model while in a mode and lately loading it in a model with a different mode, as the attributes of this class change. To avoid this issue, we recommend converting the model to eval mode before storing or loading the state dictionary. Args: mode (bool, optional): whether to set training mode (``True``) or evaluation mode (``False``). Default: ``True``. """ if mode: if hasattr(self, 'filter'): del self.filter if hasattr(self, 'expanded_bias'): del self.expanded_bias elif self.training: _filter = self.basisexpansion(self.weights) _filter = _filter.reshape(_filter.shape[0], _filter.shape[1], self.kernel_size, self.kernel_size) if self.bias is None: _bias = None else: _bias = self.bias_expansion @ self.bias (_filter, _bias) = (_filter, _bias) self.register_buffer('filter', _filter) if _bias is not None: self.register_buffer('expanded_bias', _bias) else: self.expanded_bias = None return super(R2Conv, self).train(mode)
e2cnn
positive
def evaluate(dataset, step, partition='valid'): self.model_lm.eval() predictions = [] labels = [] with torch.no_grad(): for validation_batch in dataset: validation_batch = batch_filter_distances(validation_batch, self.relative_distances) validation_batch = batch_to_device(validation_batch, self.device) output = self.model_lm.forward_batch(validation_batch).cpu() validation_batch = batch_to_device(validation_batch, 'cpu') predictions.extend(output.logits.argmax(-1)) labels.extend(validation_batch.labels) <DeepExtract> evaluation = dict() for (metric_name, metric_fn) in self.metrics.items(): evaluation[f'{metric_name}/{partition}'] = metric_fn(output.logits, validation_batch.labels) if output.loss: evaluation[f'loss/{partition}'] = output.loss.item() evaluation = evaluation </DeepExtract> self.logger.log_sub_batch_metrics(evaluation) self.logger.log_text('predicted words/validation', str(self._decode_predicted_words(output, validation_batch))) self.logger.log_text('labels/validation', str(self._decode_labels(validation_batch))) self.model_lm.train() if f'micro_f1_score/{partition}' in self.logger.sub_batch_metrics: score = mean(self.logger.sub_batch_metrics[f'micro_f1_score/{partition}']) else: score = mean(self.logger.sub_batch_metrics[f'f1_score/{partition}']) self.logger.flush_batch_metrics(step=step) return score
def evaluate(dataset, step, partition='valid'): self.model_lm.eval() predictions = [] labels = [] with torch.no_grad(): for validation_batch in dataset: validation_batch = batch_filter_distances(validation_batch, self.relative_distances) validation_batch = batch_to_device(validation_batch, self.device) output = self.model_lm.forward_batch(validation_batch).cpu() validation_batch = batch_to_device(validation_batch, 'cpu') predictions.extend(output.logits.argmax(-1)) labels.extend(validation_batch.labels) evaluation = dict() for (metric_name, metric_fn) in self.metrics.items(): evaluation[f'{metric_name}/{partition}'] = metric_fn(output.logits, validation_batch.labels) if output.loss: evaluation[f'loss/{partition}'] = output.loss.item() evaluation = evaluation self.logger.log_sub_batch_metrics(evaluation) self.logger.log_text('predicted words/validation', str(self._decode_predicted_words(output, validation_batch))) self.logger.log_text('labels/validation', str(self._decode_labels(validation_batch))) self.model_lm.train() if f'micro_f1_score/{partition}' in self.logger.sub_batch_metrics: score = mean(self.logger.sub_batch_metrics[f'micro_f1_score/{partition}']) else: score = mean(self.logger.sub_batch_metrics[f'f1_score/{partition}']) self.logger.flush_batch_metrics(step=step) return score
code-transformer
positive
def simulate_conditional(self, X): """ Draws random samples from the conditional distribution Args: X: x to be conditioned on when drawing a sample from y ~ p(y|x) - numpy array of shape (n_samples, 3) thereby x is a horizontal stack of V, L and Psi -> x = (V, L, Psi) Returns: (X,Y) - X: the x to of the conditional samples (identical with argument X) - Y: Conditional random samples y drawn from p(y|x) - numpy array of shape (n_samples, 1) """ X = self._handle_input_dimensionality(X) (V_sim, L_sim, Psi_sim) = (X[:, 0], X[:, 1], X[:, 2]) <DeepExtract> assert V_sim.ndim == L_sim.ndim == Psi_sim.ndim assert V_sim.shape[0] == L_sim.shape[0] == Psi_sim.shape[0] N = V_sim.shape[0] y_sim = np.full((N,), 0) xi = math.exp(self.theta + self.delta ** 2 / 2) - 1 dt = 1 / 252 lambda_t = Psi_sim + self.gamma_V * V_sim + self.gamma_L * L_sim Psi_sim = np.maximum(0, Psi_sim + self.kappa_psi * (self.theta_psi - Psi_sim) * dt + self.xi_psi * (Psi_sim * dt) ** 0.5 * self.random_state.normal(size=(N,))) L_shocks = self.random_state.normal(size=(N,)) L_sim = np.maximum(0, L_sim + self.kappa_L * (self.theta_L - L_sim) * dt + self.xi_L * (L_sim * dt) ** 0.5 * L_shocks) V_shocks = self.random_state.normal(size=(N,)) V_sim = np.maximum(0, V_sim + self.kappa_V * (self.theta_V - V_sim) * dt + self.gamma * self.kappa_L * (self.theta_L - L_sim) * dt + self.xi_V * (V_sim * dt) ** 0.5 * V_shocks + self.gamma * self.xi_L * (L_sim * dt) ** 0.5 * L_shocks) q = self.random_state.normal(loc=self.theta, scale=self.delta, size=(N,)) jumps = self.random_state.poisson(lam=lambda_t * dt, size=(1, N)).T[:, 0] y_shocks = self.random_state.normal(size=(N,)) y_sim = y_sim + (self.r - 0.5 * V_sim - xi * lambda_t + 1.554 * V_sim ** 0.5) * dt + (V_sim * dt) ** 0.5 * ((1 - self.rho ** 2) ** 0.5 * y_shocks + self.rho * V_shocks) + q * jumps (Y, _, _, _) = (y_sim, V_sim, L_sim, Psi_sim) </DeepExtract> Y = np.expand_dims(Y, axis=1) assert Y.shape == (X.shape[0], self.ndim_y) return (X, Y)
def simulate_conditional(self, X): """ Draws random samples from the conditional distribution Args: X: x to be conditioned on when drawing a sample from y ~ p(y|x) - numpy array of shape (n_samples, 3) thereby x is a horizontal stack of V, L and Psi -> x = (V, L, Psi) Returns: (X,Y) - X: the x to of the conditional samples (identical with argument X) - Y: Conditional random samples y drawn from p(y|x) - numpy array of shape (n_samples, 1) """ X = self._handle_input_dimensionality(X) (V_sim, L_sim, Psi_sim) = (X[:, 0], X[:, 1], X[:, 2]) assert V_sim.ndim == L_sim.ndim == Psi_sim.ndim assert V_sim.shape[0] == L_sim.shape[0] == Psi_sim.shape[0] N = V_sim.shape[0] y_sim = np.full((N,), 0) xi = math.exp(self.theta + self.delta ** 2 / 2) - 1 dt = 1 / 252 lambda_t = Psi_sim + self.gamma_V * V_sim + self.gamma_L * L_sim Psi_sim = np.maximum(0, Psi_sim + self.kappa_psi * (self.theta_psi - Psi_sim) * dt + self.xi_psi * (Psi_sim * dt) ** 0.5 * self.random_state.normal(size=(N,))) L_shocks = self.random_state.normal(size=(N,)) L_sim = np.maximum(0, L_sim + self.kappa_L * (self.theta_L - L_sim) * dt + self.xi_L * (L_sim * dt) ** 0.5 * L_shocks) V_shocks = self.random_state.normal(size=(N,)) V_sim = np.maximum(0, V_sim + self.kappa_V * (self.theta_V - V_sim) * dt + self.gamma * self.kappa_L * (self.theta_L - L_sim) * dt + self.xi_V * (V_sim * dt) ** 0.5 * V_shocks + self.gamma * self.xi_L * (L_sim * dt) ** 0.5 * L_shocks) q = self.random_state.normal(loc=self.theta, scale=self.delta, size=(N,)) jumps = self.random_state.poisson(lam=lambda_t * dt, size=(1, N)).T[:, 0] y_shocks = self.random_state.normal(size=(N,)) y_sim = y_sim + (self.r - 0.5 * V_sim - xi * lambda_t + 1.554 * V_sim ** 0.5) * dt + (V_sim * dt) ** 0.5 * ((1 - self.rho ** 2) ** 0.5 * y_shocks + self.rho * V_shocks) + q * jumps (Y, _, _, _) = (y_sim, V_sim, L_sim, Psi_sim) Y = np.expand_dims(Y, axis=1) assert Y.shape == (X.shape[0], self.ndim_y) return (X, Y)
Conditional_Density_Estimation
positive
def get_id_fields(self): """ Called to return a list of fields consisting of, at minimum, the PK field name. The output of this method is used to construct a Prefetch object with a .only() queryset when this field is not being sideloaded but we need to return a list of IDs. """ <DeepExtract> model = self.child.get_model() </DeepExtract> out = [model._meta.pk.name] for field in model._meta.fields: if isinstance(field, models.ForeignKey): out.append(field.name + '_id') return out
def get_id_fields(self): """ Called to return a list of fields consisting of, at minimum, the PK field name. The output of this method is used to construct a Prefetch object with a .only() queryset when this field is not being sideloaded but we need to return a list of IDs. """ model = self.child.get_model() out = [model._meta.pk.name] for field in model._meta.fields: if isinstance(field, models.ForeignKey): out.append(field.name + '_id') return out
dynamic-rest
positive
def rebalance(self, obs): if self.step: prev_posit = self.get_portfolio_vector(obs, index=-1)[:-1] <DeepExtract> raise NotImplementedError() </DeepExtract> return self.update(prev_posit, *factor) else: n_pairs = obs.columns.levels[0].shape[0] action = np.ones(n_pairs) action[-1] = 0 return array_normalize(action)
def rebalance(self, obs): if self.step: prev_posit = self.get_portfolio_vector(obs, index=-1)[:-1] raise NotImplementedError() return self.update(prev_posit, *factor) else: n_pairs = obs.columns.levels[0].shape[0] action = np.ones(n_pairs) action[-1] = 0 return array_normalize(action)
cryptotrader
positive
def get_maxshape(self): """Check if this dataset has any dimensions named "*unlimited*". If so, return shape of data, with unlimited dimension set to None, as would be used to set maxshape in call to h5py.""" dims = self.dsinfo['dimensions'] if '*unlimited*' not in dims: return None shape = self.dsinfo['shape'] if not isinstance(shape, (list, tuple)): <DeepExtract> if "Found '*unlimited*' dimension, but data shape (%s) is not a list" % shape: print('** Error: %s' % "Found '*unlimited*' dimension, but data shape (%s) is not a list" % shape) print('Stack trace follows') print('-------------------') traceback.print_stack() sys.exit(1) </DeepExtract> maxshape = [] for i in range(len(shape)): msv = shape[i] if dims[i] != '*unlimited*' else None maxshape.append(msv) return maxshape
def get_maxshape(self): """Check if this dataset has any dimensions named "*unlimited*". If so, return shape of data, with unlimited dimension set to None, as would be used to set maxshape in call to h5py.""" dims = self.dsinfo['dimensions'] if '*unlimited*' not in dims: return None shape = self.dsinfo['shape'] if not isinstance(shape, (list, tuple)): if "Found '*unlimited*' dimension, but data shape (%s) is not a list" % shape: print('** Error: %s' % "Found '*unlimited*' dimension, but data shape (%s) is not a list" % shape) print('Stack trace follows') print('-------------------') traceback.print_stack() sys.exit(1) maxshape = [] for i in range(len(shape)): msv = shape[i] if dims[i] != '*unlimited*' else None maxshape.append(msv) return maxshape
api-python
positive
def main(): parser = argparse.ArgumentParser(description='Spawn tasks for bugbug data pipeline') parser.add_argument('data_pipeline_json') args = parser.parse_args() decision_task_id = os.environ.get('TASK_ID') <DeepExtract> options = taskcluster.optionsFromEnvironment() proxy_url = os.environ.get('TASKCLUSTER_PROXY_URL') if proxy_url is not None: options['rootUrl'] = proxy_url if 'rootUrl' not in options: options['rootUrl'] = TASKCLUSTER_DEFAULT_URL options = options </DeepExtract> add_self = False if decision_task_id: add_self = True task_group_id = decision_task_id else: task_group_id = taskcluster.utils.slugId() keys = {'taskGroupId': task_group_id} id_mapping = {} tasks = [] with open(args.data_pipeline_json) as pipeline_file: raw_tasks = yaml.safe_load(pipeline_file.read()) version = os.getenv('TAG', 'latest') context = {'version': version} rendered = jsone.render(raw_tasks, context) for task in rendered['tasks']: task_id = taskcluster.utils.slugId() task_internal_id = task['ID'] if task_internal_id in id_mapping: raise ValueError(f'Conflicting IDs {task_internal_id}') id_mapping[task_internal_id] = task_id for task in rendered['tasks']: task_internal_id = task.pop('ID') task_id = id_mapping[task_internal_id] for (key, value) in keys.items(): task[key] = value task_payload = task['payload'] if 'env' in task_payload and task_payload['env']: task_payload['env']['TAG'] = version else: task_payload['env'] = {'TAG': version} new_dependencies = [] for dependency in task.get('dependencies', []): new_dependencies.append(id_mapping[dependency]) if add_self: new_dependencies.append(decision_task_id) task['dependencies'] = new_dependencies tasks.append((task_id, task)) queue = taskcluster.Queue(options) try: for (task_id, task_payload) in tasks: queue.createTask(task_id, task_payload) logger.info('https://community-tc.services.mozilla.com/tasks/groups/%s', task_group_id) except taskcluster.exceptions.TaskclusterAuthFailure: logger.exception('Failed to authenticate with Taskcluster') raise
def main(): parser = argparse.ArgumentParser(description='Spawn tasks for bugbug data pipeline') parser.add_argument('data_pipeline_json') args = parser.parse_args() decision_task_id = os.environ.get('TASK_ID') options = taskcluster.optionsFromEnvironment() proxy_url = os.environ.get('TASKCLUSTER_PROXY_URL') if proxy_url is not None: options['rootUrl'] = proxy_url if 'rootUrl' not in options: options['rootUrl'] = TASKCLUSTER_DEFAULT_URL options = options add_self = False if decision_task_id: add_self = True task_group_id = decision_task_id else: task_group_id = taskcluster.utils.slugId() keys = {'taskGroupId': task_group_id} id_mapping = {} tasks = [] with open(args.data_pipeline_json) as pipeline_file: raw_tasks = yaml.safe_load(pipeline_file.read()) version = os.getenv('TAG', 'latest') context = {'version': version} rendered = jsone.render(raw_tasks, context) for task in rendered['tasks']: task_id = taskcluster.utils.slugId() task_internal_id = task['ID'] if task_internal_id in id_mapping: raise ValueError(f'Conflicting IDs {task_internal_id}') id_mapping[task_internal_id] = task_id for task in rendered['tasks']: task_internal_id = task.pop('ID') task_id = id_mapping[task_internal_id] for (key, value) in keys.items(): task[key] = value task_payload = task['payload'] if 'env' in task_payload and task_payload['env']: task_payload['env']['TAG'] = version else: task_payload['env'] = {'TAG': version} new_dependencies = [] for dependency in task.get('dependencies', []): new_dependencies.append(id_mapping[dependency]) if add_self: new_dependencies.append(decision_task_id) task['dependencies'] = new_dependencies tasks.append((task_id, task)) queue = taskcluster.Queue(options) try: for (task_id, task_payload) in tasks: queue.createTask(task_id, task_payload) logger.info('https://community-tc.services.mozilla.com/tasks/groups/%s', task_group_id) except taskcluster.exceptions.TaskclusterAuthFailure: logger.exception('Failed to authenticate with Taskcluster') raise
bugbug
positive
def build_insert_or_do(method_name): """ Build insert or do.""" def _do(): if self.is_focus(): <DeepExtract> text = QT_TEXT_DICT.get(self.current_event_string, self.current_event_string) modifier = Qt.KeyboardModifier.NoModifier if self.current_event_string == '<backtab>' or (len(self.current_event_string) == 1 and self.current_event_string.isupper()): modifier = Qt.KeyboardModifier.ShiftModifier try: key_press = QKeyEvent(QEvent.Type.KeyPress, QT_KEY_DICT[self.current_event_string], modifier, text) except: key_press = QKeyEvent(QEvent.Type.KeyPress, Qt.Key.Key_unknown, modifier, text) for widget in self.get_key_event_widgets(): QApplication.postEvent(widget, key_press) self.send_key_filter(self.current_event_string) </DeepExtract> else: getattr(self, method_name)() setattr(self, 'insert_or_{}'.format(method_name), _do)
def build_insert_or_do(method_name): """ Build insert or do.""" def _do(): if self.is_focus(): text = QT_TEXT_DICT.get(self.current_event_string, self.current_event_string) modifier = Qt.KeyboardModifier.NoModifier if self.current_event_string == '<backtab>' or (len(self.current_event_string) == 1 and self.current_event_string.isupper()): modifier = Qt.KeyboardModifier.ShiftModifier try: key_press = QKeyEvent(QEvent.Type.KeyPress, QT_KEY_DICT[self.current_event_string], modifier, text) except: key_press = QKeyEvent(QEvent.Type.KeyPress, Qt.Key.Key_unknown, modifier, text) for widget in self.get_key_event_widgets(): QApplication.postEvent(widget, key_press) self.send_key_filter(self.current_event_string) else: getattr(self, method_name)() setattr(self, 'insert_or_{}'.format(method_name), _do)
emacs-application-framework
positive
def initfromfile(self): botsglobal.logger.debug('Read edi file "%(filename)s".', self.ta_info) filename = botslib.abspathdata(self.ta_info['filename']) if self.ta_info['messagetype'] == 'mailbag': try: (module, grammarname) = botslib.botsimport('grammars', 'xml', 'mailbag') mailbagsearch = getattr(module, 'mailbagsearch') except AttributeError: botsglobal.logger.error('Missing mailbagsearch in mailbag definitions for xml.') raise except botslib.BotsImportError: botsglobal.logger.error('Missing mailbag definitions for xml, should be there.') raise parser = ET.XMLParser() try: extra_character_entity = getattr(module, 'extra_character_entity') for (key, value) in extra_character_entity.items(): parser.entity[key] = value except AttributeError: pass etree = ET.ElementTree() etreeroot = etree.parse(filename, parser) for item in mailbagsearch: if 'xpath' not in item or 'messagetype' not in item: raise botslib.InMessageError(_('Invalid search parameters in xml mailbag.')) found = etree.find(item['xpath']) if found is not None: if 'content' in item and found.text != item['content']: continue self.ta_info['messagetype'] = item['messagetype'] break else: raise botslib.InMessageError(_('Could not find right xml messagetype for mailbag.')) self.messagegrammarread(typeofgrammarfile='grammars') else: self.messagegrammarread(typeofgrammarfile='grammars') parser = ET.XMLParser() for (key, value) in self.ta_info['extra_character_entity'].items(): parser.entity[key] = value etree = ET.ElementTree() etreeroot = etree.parse(filename, parser) <DeepExtract> if etreeroot.text: etreeroot.text = etreeroot.text.strip() for (key, value) in etreeroot.items(): etreeroot.attrib[key] = value.strip() for xmlchildnode in etreeroot: self._handle_empty(xmlchildnode) </DeepExtract> <DeepExtract> self.stack = [self.defmessage.structure[0]] </DeepExtract> <DeepExtract> newnode = node.Node(record=self._etreenode2botstreenode(etreeroot)) for xmlchildnode in etreeroot: entitytype = self._entitytype(xmlchildnode) if not entitytype: if xmlchildnode.text: newnode.record[xmlchildnode.tag] = xmlchildnode.text newnode.record.update(((xmlchildnode.tag + self.ta_info['attributemarker'] + key, value) for (key, value) in xmlchildnode.items() if value)) elif entitytype == 1: newnode.append(self._etree2botstree(xmlchildnode)) self.stack.pop() else: if self.ta_info['checkunknownentities']: self.add2errorlist(_('[S02]%(linpos)s: Unknown xml-tag "%(recordunkown)s" (within "%(record)s") in message.\n') % {'linpos': newnode.linpos(), 'recordunkown': xmlchildnode.tag, 'record': newnode.record['BOTSID']}) continue self.root = newnode </DeepExtract> <DeepExtract> pass </DeepExtract> self.ta_info.update(self.root.queries)
def initfromfile(self): botsglobal.logger.debug('Read edi file "%(filename)s".', self.ta_info) filename = botslib.abspathdata(self.ta_info['filename']) if self.ta_info['messagetype'] == 'mailbag': try: (module, grammarname) = botslib.botsimport('grammars', 'xml', 'mailbag') mailbagsearch = getattr(module, 'mailbagsearch') except AttributeError: botsglobal.logger.error('Missing mailbagsearch in mailbag definitions for xml.') raise except botslib.BotsImportError: botsglobal.logger.error('Missing mailbag definitions for xml, should be there.') raise parser = ET.XMLParser() try: extra_character_entity = getattr(module, 'extra_character_entity') for (key, value) in extra_character_entity.items(): parser.entity[key] = value except AttributeError: pass etree = ET.ElementTree() etreeroot = etree.parse(filename, parser) for item in mailbagsearch: if 'xpath' not in item or 'messagetype' not in item: raise botslib.InMessageError(_('Invalid search parameters in xml mailbag.')) found = etree.find(item['xpath']) if found is not None: if 'content' in item and found.text != item['content']: continue self.ta_info['messagetype'] = item['messagetype'] break else: raise botslib.InMessageError(_('Could not find right xml messagetype for mailbag.')) self.messagegrammarread(typeofgrammarfile='grammars') else: self.messagegrammarread(typeofgrammarfile='grammars') parser = ET.XMLParser() for (key, value) in self.ta_info['extra_character_entity'].items(): parser.entity[key] = value etree = ET.ElementTree() etreeroot = etree.parse(filename, parser) if etreeroot.text: etreeroot.text = etreeroot.text.strip() for (key, value) in etreeroot.items(): etreeroot.attrib[key] = value.strip() for xmlchildnode in etreeroot: self._handle_empty(xmlchildnode) self.stack = [self.defmessage.structure[0]] newnode = node.Node(record=self._etreenode2botstreenode(etreeroot)) for xmlchildnode in etreeroot: entitytype = self._entitytype(xmlchildnode) if not entitytype: if xmlchildnode.text: newnode.record[xmlchildnode.tag] = xmlchildnode.text newnode.record.update(((xmlchildnode.tag + self.ta_info['attributemarker'] + key, value) for (key, value) in xmlchildnode.items() if value)) elif entitytype == 1: newnode.append(self._etree2botstree(xmlchildnode)) self.stack.pop() else: if self.ta_info['checkunknownentities']: self.add2errorlist(_('[S02]%(linpos)s: Unknown xml-tag "%(recordunkown)s" (within "%(record)s") in message.\n') % {'linpos': newnode.linpos(), 'recordunkown': xmlchildnode.tag, 'record': newnode.record['BOTSID']}) continue self.root = newnode pass self.ta_info.update(self.root.queries)
bots
positive
def calculate_ssim(img1, img2, crop_border, input_order='HWC', test_y_channel=False): """Calculate SSIM (structural similarity). Ref: Image quality assessment: From error visibility to structural similarity The results are the same as that of the official released MATLAB code in https://ece.uwaterloo.ca/~z70wang/research/ssim/. For three-channel images, SSIM is calculated for each channel and then averaged. Args: img1 (ndarray): Images with range [0, 255]. img2 (ndarray): Images with range [0, 255]. crop_border (int): Cropped pixels in each edge of an image. These pixels are not involved in the SSIM calculation. input_order (str): Whether the input order is 'HWC' or 'CHW'. Default: 'HWC'. test_y_channel (bool): Test on Y channel of YCbCr. Default: False. Returns: float: ssim result. """ assert img1.shape == img2.shape, f'Image shapes are differnet: {img1.shape}, {img2.shape}.' if input_order not in ['HWC', 'CHW']: raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are "HWC" and "CHW"') img1 = img1.copy().astype('float32')[..., ::-1] img2 = img2.copy().astype('float32')[..., ::-1] <DeepExtract> if input_order not in ['HWC', 'CHW']: raise ValueError(f"Wrong input_order {input_order}. Supported input_orders are 'HWC' and 'CHW'") if len(img1.shape) == 2: img1 = img1[..., None] img1 = img1 if input_order == 'CHW': img1 = img1.transpose(1, 2, 0) img1 = img1 </DeepExtract> <DeepExtract> if input_order not in ['HWC', 'CHW']: raise ValueError(f"Wrong input_order {input_order}. Supported input_orders are 'HWC' and 'CHW'") if len(img2.shape) == 2: img2 = img2[..., None] img2 = img2 if input_order == 'CHW': img2 = img2.transpose(1, 2, 0) img2 = img2 </DeepExtract> if crop_border != 0: img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...] img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...] if test_y_channel: <DeepExtract> img1 = img1.astype(np.float32) / 255.0 if img1.ndim == 3 and img1.shape[2] == 3: img1 = rgb2ycbcr(img1, y_only=True) img1 = img1[..., None] img1 = img1 * 255.0 </DeepExtract> <DeepExtract> img2 = img2.astype(np.float32) / 255.0 if img2.ndim == 3 and img2.shape[2] == 3: img2 = rgb2ycbcr(img2, y_only=True) img2 = img2[..., None] img2 = img2 * 255.0 </DeepExtract> ssims = [] for i in range(img1.shape[2]): ssims.append(_ssim(img1[..., i], img2[..., i])) return np.array(ssims).mean()
def calculate_ssim(img1, img2, crop_border, input_order='HWC', test_y_channel=False): """Calculate SSIM (structural similarity). Ref: Image quality assessment: From error visibility to structural similarity The results are the same as that of the official released MATLAB code in https://ece.uwaterloo.ca/~z70wang/research/ssim/. For three-channel images, SSIM is calculated for each channel and then averaged. Args: img1 (ndarray): Images with range [0, 255]. img2 (ndarray): Images with range [0, 255]. crop_border (int): Cropped pixels in each edge of an image. These pixels are not involved in the SSIM calculation. input_order (str): Whether the input order is 'HWC' or 'CHW'. Default: 'HWC'. test_y_channel (bool): Test on Y channel of YCbCr. Default: False. Returns: float: ssim result. """ assert img1.shape == img2.shape, f'Image shapes are differnet: {img1.shape}, {img2.shape}.' if input_order not in ['HWC', 'CHW']: raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are "HWC" and "CHW"') img1 = img1.copy().astype('float32')[..., ::-1] img2 = img2.copy().astype('float32')[..., ::-1] if input_order not in ['HWC', 'CHW']: raise ValueError(f"Wrong input_order {input_order}. Supported input_orders are 'HWC' and 'CHW'") if len(img1.shape) == 2: img1 = img1[..., None] img1 = img1 if input_order == 'CHW': img1 = img1.transpose(1, 2, 0) img1 = img1 if input_order not in ['HWC', 'CHW']: raise ValueError(f"Wrong input_order {input_order}. Supported input_orders are 'HWC' and 'CHW'") if len(img2.shape) == 2: img2 = img2[..., None] img2 = img2 if input_order == 'CHW': img2 = img2.transpose(1, 2, 0) img2 = img2 if crop_border != 0: img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...] img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...] if test_y_channel: img1 = img1.astype(np.float32) / 255.0 if img1.ndim == 3 and img1.shape[2] == 3: img1 = rgb2ycbcr(img1, y_only=True) img1 = img1[..., None] img1 = img1 * 255.0 img2 = img2.astype(np.float32) / 255.0 if img2.ndim == 3 and img2.shape[2] == 3: img2 = rgb2ycbcr(img2, y_only=True) img2 = img2[..., None] img2 = img2 * 255.0 ssims = [] for i in range(img1.shape[2]): ssims.append(_ssim(img1[..., i], img2[..., i])) return np.array(ssims).mean()
-AI-emmmm
positive
def input_config(self): """Configure XP inputs. Available settings are RX_EQ_Control and RX_Sign_Control. RX_EQ_Control can be used as broadcast. RX_Sign_Control has to be done on an input-by-input basis.""" for key in self.map['RX_EQ_Control'].keys(): if 'BC' in key: <DeepExtract> if key not in self.map['RX_EQ_Control'].keys(): logging.info('Invalid %s Register: %s' % ('RX_EQ_Control', key)) raise ValueError if not True: return self.read_register('RX_EQ_Control', key) elif True: return self.write_register('RX_EQ_Control', key, 3) </DeepExtract> elif 'BC' not in key: <DeepExtract> if key not in self.map['RX_EQ_Control'].keys(): logging.info('Invalid %s Register: %s' % ('RX_EQ_Control', key)) raise ValueError if not True: return self.read_register('RX_EQ_Control', key) elif True: return self.write_register('RX_EQ_Control', key, 0) </DeepExtract>
def input_config(self): """Configure XP inputs. Available settings are RX_EQ_Control and RX_Sign_Control. RX_EQ_Control can be used as broadcast. RX_Sign_Control has to be done on an input-by-input basis.""" for key in self.map['RX_EQ_Control'].keys(): if 'BC' in key: if key not in self.map['RX_EQ_Control'].keys(): logging.info('Invalid %s Register: %s' % ('RX_EQ_Control', key)) raise ValueError if not True: return self.read_register('RX_EQ_Control', key) elif True: return self.write_register('RX_EQ_Control', key, 3) elif 'BC' not in key: if key not in self.map['RX_EQ_Control'].keys(): logging.info('Invalid %s Register: %s' % ('RX_EQ_Control', key)) raise ValueError if not True: return self.read_register('RX_EQ_Control', key) elif True: return self.write_register('RX_EQ_Control', key, 0) </DeepExtract>
basil
positive
def testNestedScope(self): graph = tf.Graph() with graph.as_default(): with graph.name_scope('top'): <DeepExtract> parser = graph_builder.GreedyParser(self._num_actions, self._num_features, self._num_feature_ids, embedding_sizes=[8, 8, 8], hidden_layer_sizes=[32, 32], seed=42, gate_gradients=True, use_averaging=use_averaging, **kw_args) </DeepExtract> parser.AddTraining(self._task_context, batch_size=10, corpus_name='training-corpus') parser.AddSaver() self.assertTrue(self.NodeFound('top/training/logits')) self.assertTrue(self.NodeFound('top/training/feature_0')) self.assertFalse(self.NodeFound('top/save/restore_all')) self.assertTrue(self.NodeFound('save/restore_all'))
def testNestedScope(self): graph = tf.Graph() with graph.as_default(): with graph.name_scope('top'): parser = graph_builder.GreedyParser(self._num_actions, self._num_features, self._num_feature_ids, embedding_sizes=[8, 8, 8], hidden_layer_sizes=[32, 32], seed=42, gate_gradients=True, use_averaging=use_averaging, **kw_args) parser.AddTraining(self._task_context, batch_size=10, corpus_name='training-corpus') parser.AddSaver() self.assertTrue(self.NodeFound('top/training/logits')) self.assertTrue(self.NodeFound('top/training/feature_0')) self.assertFalse(self.NodeFound('top/save/restore_all')) self.assertTrue(self.NodeFound('save/restore_all'))
AI_Reader
positive
def update(self, other): """Updates this jar with cookies from another CookieJar or dict-like""" if isinstance(other, cookielib.CookieJar): for cookie in other: <DeepExtract> if hasattr(copy.copy(cookie).value, 'startswith') and copy.copy(cookie).value.startswith('"') and copy.copy(cookie).value.endswith('"'): copy.copy(cookie).value = copy.copy(cookie).value.replace('\\"', '') return super(RequestsCookieJar, self).set_cookie(copy.copy(cookie), *args, **kwargs) </DeepExtract> else: super(RequestsCookieJar, self).update(other)
def update(self, other): """Updates this jar with cookies from another CookieJar or dict-like""" if isinstance(other, cookielib.CookieJar): for cookie in other: if hasattr(copy.copy(cookie).value, 'startswith') and copy.copy(cookie).value.startswith('"') and copy.copy(cookie).value.endswith('"'): copy.copy(cookie).value = copy.copy(cookie).value.replace('\\"', '') return super(RequestsCookieJar, self).set_cookie(copy.copy(cookie), *args, **kwargs) else: super(RequestsCookieJar, self).update(other)
alexa-sky-hd
positive
def _merge_a_into_b(a, b): """Merge config dictionary a into config dictionary b, clobbering the options in b whenever they are also specified in a. """ if type(a) is not edict: return for (k, v) in a.items(): if k not in b: raise KeyError('{} is not a valid config key'.format(k)) old_type = type(b[k]) if old_type is not type(v): if isinstance(b[k], np.ndarray): v = np.array(v, dtype=b[k].dtype) else: raise ValueError('Type mismatch ({} vs. {}) for config key: {}'.format(type(b[k]), type(v), k)) if type(v) is edict: try: <DeepExtract> if type(a[k]) is not edict: return for (k, v) in a[k].items(): if k not in b[k]: raise KeyError('{} is not a valid config key'.format(k)) old_type = type(b[k][k]) if old_type is not type(v): if isinstance(b[k][k], np.ndarray): v = np.array(v, dtype=b[k][k].dtype) else: raise ValueError('Type mismatch ({} vs. {}) for config key: {}'.format(type(b[k][k]), type(v), k)) if type(v) is edict: try: _merge_a_into_b(a[k][k], b[k][k]) except: print('Error under config key: {}'.format(k)) raise else: b[k][k] = v </DeepExtract> except: print('Error under config key: {}'.format(k)) raise else: b[k] = v
def _merge_a_into_b(a, b): """Merge config dictionary a into config dictionary b, clobbering the options in b whenever they are also specified in a. """ if type(a) is not edict: return for (k, v) in a.items(): if k not in b: raise KeyError('{} is not a valid config key'.format(k)) old_type = type(b[k]) if old_type is not type(v): if isinstance(b[k], np.ndarray): v = np.array(v, dtype=b[k].dtype) else: raise ValueError('Type mismatch ({} vs. {}) for config key: {}'.format(type(b[k]), type(v), k)) if type(v) is edict: try: if type(a[k]) is not edict: return for (k, v) in a[k].items(): if k not in b[k]: raise KeyError('{} is not a valid config key'.format(k)) old_type = type(b[k][k]) if old_type is not type(v): if isinstance(b[k][k], np.ndarray): v = np.array(v, dtype=b[k][k].dtype) else: raise ValueError('Type mismatch ({} vs. {}) for config key: {}'.format(type(b[k][k]), type(v), k)) if type(v) is edict: try: _merge_a_into_b(a[k][k], b[k][k]) except: print('Error under config key: {}'.format(k)) raise else: b[k][k] = v except: print('Error under config key: {}'.format(k)) raise else: b[k] = v
Detection-PyTorch-Notebook
positive
def setup_model_for_training(model, weights_file, output_dir): """Loaded saved weights and create the network in the C2 workspace.""" logger = logging.getLogger(__name__) if cfg.TRAIN.DOMAIN_ADAPTATION: <DeepExtract> logger = logging.getLogger(__name__) logger.info('Loading source dataset: {}'.format(cfg.TRAIN.SOURCE_DATASETS)) source_roidb = combined_roidb_for_training(cfg.TRAIN.SOURCE_DATASETS, cfg.TRAIN.SOURCE_PROPOSAL_FILES, True) logger.info('{:d} source roidb entries'.format(len(source_roidb))) logger.info('Loading target dataset: {}'.format(cfg.TRAIN.TARGET_DATASETS)) target_roidb = combined_roidb_for_training(cfg.TRAIN.TARGET_DATASETS, cfg.TRAIN.TARGET_PROPOSAL_FILES, False) logger.info('{:d} target roidb entries'.format(len(target_roidb))) roidb = source_roidb + target_roidb model_builder.add_training_inputs(model, source_roidb=source_roidb, target_roidb=target_roidb) </DeepExtract> else: <DeepExtract> logger = logging.getLogger(__name__) logger.info('Loading dataset: {}'.format(cfg.TRAIN.DATASETS)) roidb = combined_roidb_for_training(cfg.TRAIN.DATASETS, cfg.TRAIN.PROPOSAL_FILES) logger.info('{:d} roidb entries'.format(len(roidb))) model_builder.add_training_inputs(model, source_roidb=roidb) </DeepExtract> if weights_file: nu.initialize_gpu_from_weights_file(model, weights_file, gpu_id=0) nu.broadcast_parameters(model) workspace.CreateNet(model.net) logger.info('Outputs saved to: {:s}'.format(os.path.abspath(output_dir))) <DeepExtract> with open(os.path.join(output_dir, 'net.pbtxt'), 'w') as fid: fid.write(str(model.net.Proto())) with open(os.path.join(output_dir, 'param_init_net.pbtxt'), 'w') as fid: fid.write(str(model.param_init_net.Proto())) </DeepExtract> model.roi_data_loader.register_sigint_handler() model.roi_data_loader.start(prefill=True) return output_dir
def setup_model_for_training(model, weights_file, output_dir): """Loaded saved weights and create the network in the C2 workspace.""" logger = logging.getLogger(__name__) if cfg.TRAIN.DOMAIN_ADAPTATION: logger = logging.getLogger(__name__) logger.info('Loading source dataset: {}'.format(cfg.TRAIN.SOURCE_DATASETS)) source_roidb = combined_roidb_for_training(cfg.TRAIN.SOURCE_DATASETS, cfg.TRAIN.SOURCE_PROPOSAL_FILES, True) logger.info('{:d} source roidb entries'.format(len(source_roidb))) logger.info('Loading target dataset: {}'.format(cfg.TRAIN.TARGET_DATASETS)) target_roidb = combined_roidb_for_training(cfg.TRAIN.TARGET_DATASETS, cfg.TRAIN.TARGET_PROPOSAL_FILES, False) logger.info('{:d} target roidb entries'.format(len(target_roidb))) roidb = source_roidb + target_roidb model_builder.add_training_inputs(model, source_roidb=source_roidb, target_roidb=target_roidb) else: logger = logging.getLogger(__name__) logger.info('Loading dataset: {}'.format(cfg.TRAIN.DATASETS)) roidb = combined_roidb_for_training(cfg.TRAIN.DATASETS, cfg.TRAIN.PROPOSAL_FILES) logger.info('{:d} roidb entries'.format(len(roidb))) model_builder.add_training_inputs(model, source_roidb=roidb) if weights_file: nu.initialize_gpu_from_weights_file(model, weights_file, gpu_id=0) nu.broadcast_parameters(model) workspace.CreateNet(model.net) logger.info('Outputs saved to: {:s}'.format(os.path.abspath(output_dir))) with open(os.path.join(output_dir, 'net.pbtxt'), 'w') as fid: fid.write(str(model.net.Proto())) with open(os.path.join(output_dir, 'param_init_net.pbtxt'), 'w') as fid: fid.write(str(model.param_init_net.Proto())) model.roi_data_loader.register_sigint_handler() model.roi_data_loader.start(prefill=True) return output_dir
Detectron-DA-Faster-RCNN
positive
def get_span_offsets(docgraph, node_id): """ returns the character start and end position of the span of text that the given node spans or dominates. Returns ------- offsets : tuple(int, int) character onset and offset of the span """ try: <DeepExtract> if debug is True and is_directed_acyclic_graph(docgraph) is False: warnings.warn("Can't reliably extract span '{0}' from cyclical graph'{1}'.Maximum recursion depth may be exceeded.".format(node_id, docgraph)) span = [] if docgraph.ns + ':token' in docgraph.node[node_id]: span.append(node_id) for (src_id, target_id, edge_attribs) in docgraph.out_edges_iter(node_id, data=True): if src_id == target_id: continue if edge_attribs['edge_type'] != EdgeTypes.pointing_relation: span.extend(get_span(docgraph, target_id)) span = sorted(span, key=natural_sort_key) </DeepExtract> (onsets, offsets) = zip(*[docgraph.get_offsets(tok_node) for tok_node in span]) return (min(onsets), max(offsets)) except KeyError as _: raise KeyError("Node '{}' doesn't span any tokens.".format(node_id))
def get_span_offsets(docgraph, node_id): """ returns the character start and end position of the span of text that the given node spans or dominates. Returns ------- offsets : tuple(int, int) character onset and offset of the span """ try: if debug is True and is_directed_acyclic_graph(docgraph) is False: warnings.warn("Can't reliably extract span '{0}' from cyclical graph'{1}'.Maximum recursion depth may be exceeded.".format(node_id, docgraph)) span = [] if docgraph.ns + ':token' in docgraph.node[node_id]: span.append(node_id) for (src_id, target_id, edge_attribs) in docgraph.out_edges_iter(node_id, data=True): if src_id == target_id: continue if edge_attribs['edge_type'] != EdgeTypes.pointing_relation: span.extend(get_span(docgraph, target_id)) span = sorted(span, key=natural_sort_key) (onsets, offsets) = zip(*[docgraph.get_offsets(tok_node) for tok_node in span]) return (min(onsets), max(offsets)) except KeyError as _: raise KeyError("Node '{}' doesn't span any tokens.".format(node_id))
discoursegraphs
positive
def do_check(separator, payload, TAG, timesec, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, OUTPUT_TEXTFILE, alter_shell, filename): if menu.options.file_write: <DeepExtract> (file_to_write, dest_to_write, content) = checks.check_file_to_write() if settings.TARGET_OS == 'win': cmd = checks.change_dir(dest_to_write) response = fb_injector.injection(separator, payload, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, OUTPUT_TEXTFILE, alter_shell, filename) (fname, tmp_fname, cmd) = checks.find_filename(dest_to_write, content) response = fb_injector.injection(separator, payload, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, OUTPUT_TEXTFILE, alter_shell, filename) cmd = checks.win_decode_b64_enc(fname, tmp_fname) response = fb_injector.injection(separator, payload, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, OUTPUT_TEXTFILE, alter_shell, filename) cmd = checks.delete_tmp(tmp_fname) response = fb_injector.injection(separator, payload, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, OUTPUT_TEXTFILE, alter_shell, filename) else: cmd = checks.write_content(content, dest_to_write) cmd = cmd + settings.COMMENT response = fb_injector.injection(separator, payload, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, OUTPUT_TEXTFILE, alter_shell, filename) shell = fb_injector.injection_results(url, OUTPUT_TEXTFILE, timesec) shell = ''.join((str(p) for p in shell)) cmd = checks.check_file(dest_to_write) response = fb_injector.injection(separator, payload, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, OUTPUT_TEXTFILE, alter_shell, filename) shell = fb_injector.injection_results(url, OUTPUT_TEXTFILE, timesec) shell = ''.join((str(p) for p in shell)) checks.file_write_status(shell, dest_to_write) </DeepExtract> settings.FILE_ACCESS_DONE = True if menu.options.file_upload: if settings.TARGET_OS == 'win': check_option = '--file-upload' checks.unavailable_option(check_option) else: <DeepExtract> (cmd, dest_to_upload) = checks.check_file_to_upload() response = fb_injector.injection(separator, payload, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, OUTPUT_TEXTFILE, alter_shell, filename) shell = fb_injector.injection_results(url, OUTPUT_TEXTFILE, timesec) shell = ''.join((str(p) for p in shell)) cmd = checks.check_file(dest_to_upload) response = fb_injector.injection(separator, payload, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, OUTPUT_TEXTFILE, alter_shell, filename) shell = fb_injector.injection_results(url, OUTPUT_TEXTFILE, timesec) shell = ''.join((str(p) for p in shell)) checks.file_upload_status(shell, dest_to_upload) </DeepExtract> settings.FILE_ACCESS_DONE = True if menu.options.file_read: <DeepExtract> (cmd, file_to_read) = checks.file_content_to_read() if session_handler.export_stored_cmd(url, cmd, vuln_parameter) == None or menu.options.ignore_session: response = fb_injector.injection(separator, payload, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, OUTPUT_TEXTFILE, alter_shell, filename) shell = fb_injector.injection_results(url, OUTPUT_TEXTFILE, timesec) shell = ''.join((str(p) for p in shell)) session_handler.store_cmd(url, cmd, shell, vuln_parameter) else: shell = session_handler.export_stored_cmd(url, cmd, vuln_parameter) checks.file_read_status(shell, file_to_read, filename) </DeepExtract> settings.FILE_ACCESS_DONE = True
def do_check(separator, payload, TAG, timesec, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, OUTPUT_TEXTFILE, alter_shell, filename): if menu.options.file_write: (file_to_write, dest_to_write, content) = checks.check_file_to_write() if settings.TARGET_OS == 'win': cmd = checks.change_dir(dest_to_write) response = fb_injector.injection(separator, payload, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, OUTPUT_TEXTFILE, alter_shell, filename) (fname, tmp_fname, cmd) = checks.find_filename(dest_to_write, content) response = fb_injector.injection(separator, payload, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, OUTPUT_TEXTFILE, alter_shell, filename) cmd = checks.win_decode_b64_enc(fname, tmp_fname) response = fb_injector.injection(separator, payload, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, OUTPUT_TEXTFILE, alter_shell, filename) cmd = checks.delete_tmp(tmp_fname) response = fb_injector.injection(separator, payload, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, OUTPUT_TEXTFILE, alter_shell, filename) else: cmd = checks.write_content(content, dest_to_write) cmd = cmd + settings.COMMENT response = fb_injector.injection(separator, payload, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, OUTPUT_TEXTFILE, alter_shell, filename) shell = fb_injector.injection_results(url, OUTPUT_TEXTFILE, timesec) shell = ''.join((str(p) for p in shell)) cmd = checks.check_file(dest_to_write) response = fb_injector.injection(separator, payload, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, OUTPUT_TEXTFILE, alter_shell, filename) shell = fb_injector.injection_results(url, OUTPUT_TEXTFILE, timesec) shell = ''.join((str(p) for p in shell)) checks.file_write_status(shell, dest_to_write) settings.FILE_ACCESS_DONE = True if menu.options.file_upload: if settings.TARGET_OS == 'win': check_option = '--file-upload' checks.unavailable_option(check_option) else: (cmd, dest_to_upload) = checks.check_file_to_upload() response = fb_injector.injection(separator, payload, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, OUTPUT_TEXTFILE, alter_shell, filename) shell = fb_injector.injection_results(url, OUTPUT_TEXTFILE, timesec) shell = ''.join((str(p) for p in shell)) cmd = checks.check_file(dest_to_upload) response = fb_injector.injection(separator, payload, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, OUTPUT_TEXTFILE, alter_shell, filename) shell = fb_injector.injection_results(url, OUTPUT_TEXTFILE, timesec) shell = ''.join((str(p) for p in shell)) checks.file_upload_status(shell, dest_to_upload) settings.FILE_ACCESS_DONE = True if menu.options.file_read: (cmd, file_to_read) = checks.file_content_to_read() if session_handler.export_stored_cmd(url, cmd, vuln_parameter) == None or menu.options.ignore_session: response = fb_injector.injection(separator, payload, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, OUTPUT_TEXTFILE, alter_shell, filename) shell = fb_injector.injection_results(url, OUTPUT_TEXTFILE, timesec) shell = ''.join((str(p) for p in shell)) session_handler.store_cmd(url, cmd, shell, vuln_parameter) else: shell = session_handler.export_stored_cmd(url, cmd, vuln_parameter) checks.file_read_status(shell, file_to_read, filename) settings.FILE_ACCESS_DONE = True
commix
positive
def cut_species_for_mapping(reactants: List[ARCSpecies], products: List[ARCSpecies], loc_r: List[int], loc_p: List[int]) -> Optional[Tuple[List[ARCSpecies], List[ARCSpecies]]]: """ A function for scissoring the reactants and products, as a preparation for atom mapping. Args: reactants: A list of the ARCSpecies for scission products: A list of the ARCSpecies for scission loc_r: A list of the location and number of cuts that is required. loc_p: A list of the location and number of cuts that is required. Returns: A list of scissored reactants and products. """ (r_cuts, p_cuts) = (list(), list()) for (index, reactant) in zip(loc_r, reactants): if index == 1: try: reactant.final_xyz = reactant.get_xyz() cuts = reactant.scissors() r_cuts += cuts except SpeciesError: return None elif index > 1: bdes = reactant.bdes new_r = ARCSpecies(label='scissors', mol=reactant.mol.copy(deep=True)) for bde in bdes: new_r.bdes = [bde] new_r.final_xyz = new_r.get_xyz() try: cuts = new_r.scissors() except SpeciesError: return None if len(cuts) == 1: new_r = cuts[0] else: <DeepExtract> list_atom_labels_cuts_0 = [int(atom.label) + 1 for atom in cuts[0].mol.atoms] bdes = reactant.bdes for bd in bdes: if bd == bde: continue elif bd[0] not in list_atom_labels_cuts_0: (new_r, second) = (cuts[1], cuts[0]) (new_r, second) = (cuts[0], cuts[1]) </DeepExtract> r_cuts += [second] r_cuts += [new_r] else: r_cuts.append(reactant) for (index, product) in zip(loc_p, products): if index == 1: try: product.final_xyz = product.get_xyz() cuts = product.scissors() if len(cuts) == 1: cuts.append(ARCSpecies(label=cuts[0].label, mol=cuts[0].mol.copy(deep=True))) labels = [atom.label for atom in product.mol.atoms] cuts[-1].mol.atoms[0].label = labels[1] if cuts[0].mol.atoms[0].label == labels[0] else labels[0] p_cuts += cuts except SpeciesError: return None elif index > 1: bdes = product.bdes new_p = ARCSpecies(label='scissors', mol=product.mol.copy(deep=True)) for bde in bdes: new_p.bdes = [bde] new_p.final_xyz = new_p.get_xyz() try: cuts = new_p.scissors() except SpeciesError: return None if len(cuts) == 1: new_p = cuts[0] else: <DeepExtract> list_atom_labels_cuts_0 = [int(atom.label) + 1 for atom in cuts[0].mol.atoms] bdes = product.bdes for bd in bdes: if bd == bde: continue elif bd[0] not in list_atom_labels_cuts_0: (new_p, second) = (cuts[1], cuts[0]) (new_p, second) = (cuts[0], cuts[1]) </DeepExtract> p_cuts += [second] p_cuts += [new_p] else: p_cuts.append(product) return (r_cuts, p_cuts)
def cut_species_for_mapping(reactants: List[ARCSpecies], products: List[ARCSpecies], loc_r: List[int], loc_p: List[int]) -> Optional[Tuple[List[ARCSpecies], List[ARCSpecies]]]: """ A function for scissoring the reactants and products, as a preparation for atom mapping. Args: reactants: A list of the ARCSpecies for scission products: A list of the ARCSpecies for scission loc_r: A list of the location and number of cuts that is required. loc_p: A list of the location and number of cuts that is required. Returns: A list of scissored reactants and products. """ (r_cuts, p_cuts) = (list(), list()) for (index, reactant) in zip(loc_r, reactants): if index == 1: try: reactant.final_xyz = reactant.get_xyz() cuts = reactant.scissors() r_cuts += cuts except SpeciesError: return None elif index > 1: bdes = reactant.bdes new_r = ARCSpecies(label='scissors', mol=reactant.mol.copy(deep=True)) for bde in bdes: new_r.bdes = [bde] new_r.final_xyz = new_r.get_xyz() try: cuts = new_r.scissors() except SpeciesError: return None if len(cuts) == 1: new_r = cuts[0] else: list_atom_labels_cuts_0 = [int(atom.label) + 1 for atom in cuts[0].mol.atoms] bdes = reactant.bdes for bd in bdes: if bd == bde: continue elif bd[0] not in list_atom_labels_cuts_0: (new_r, second) = (cuts[1], cuts[0]) (new_r, second) = (cuts[0], cuts[1]) r_cuts += [second] r_cuts += [new_r] else: r_cuts.append(reactant) for (index, product) in zip(loc_p, products): if index == 1: try: product.final_xyz = product.get_xyz() cuts = product.scissors() if len(cuts) == 1: cuts.append(ARCSpecies(label=cuts[0].label, mol=cuts[0].mol.copy(deep=True))) labels = [atom.label for atom in product.mol.atoms] cuts[-1].mol.atoms[0].label = labels[1] if cuts[0].mol.atoms[0].label == labels[0] else labels[0] p_cuts += cuts except SpeciesError: return None elif index > 1: bdes = product.bdes new_p = ARCSpecies(label='scissors', mol=product.mol.copy(deep=True)) for bde in bdes: new_p.bdes = [bde] new_p.final_xyz = new_p.get_xyz() try: cuts = new_p.scissors() except SpeciesError: return None if len(cuts) == 1: new_p = cuts[0] else: list_atom_labels_cuts_0 = [int(atom.label) + 1 for atom in cuts[0].mol.atoms] bdes = product.bdes for bd in bdes: if bd == bde: continue elif bd[0] not in list_atom_labels_cuts_0: (new_p, second) = (cuts[1], cuts[0]) (new_p, second) = (cuts[0], cuts[1]) p_cuts += [second] p_cuts += [new_p] else: p_cuts.append(product) return (r_cuts, p_cuts)
ARC
positive
def resolve_django_path_parameter(path_regex, variable, available_formats): """ convert django style path parameters to OpenAPI parameters. """ registered_converters = get_converters() for match in _PATH_PARAMETER_COMPONENT_RE.finditer(path_regex): (converter, parameter) = (match.group('converter'), match.group('parameter')) enum_values = None if api_settings.SCHEMA_COERCE_PATH_PK and parameter == 'pk': parameter = 'id' elif spectacular_settings.SCHEMA_COERCE_PATH_PK_SUFFIX and parameter.endswith('_pk'): parameter = f'{parameter[:-3]}_id' if parameter != variable: continue if not converter: return None if converter.startswith('drf_format_suffix_'): explicit_formats = converter[len('drf_format_suffix_'):].split('_') enum_values = [f'.{suffix}' for suffix in explicit_formats if suffix in available_formats] converter = 'drf_format_suffix' elif converter == 'drf_format_suffix': enum_values = [f'.{suffix}' for suffix in available_formats] if converter in spectacular_settings.PATH_CONVERTER_OVERRIDES: override = spectacular_settings.PATH_CONVERTER_OVERRIDES[converter] if is_basic_type(override): <DeepExtract> openapi_type_mapping = get_openapi_type_mapping() if override is None or type(override) is None or override is OpenApiTypes.NONE: schema = None elif override in openapi_type_mapping: schema = dict(openapi_type_mapping[override]) elif override in PYTHON_TYPE_MAPPING: schema = dict(openapi_type_mapping[PYTHON_TYPE_MAPPING[override]]) else: warn(f'could not resolve type for "{override}". defaulting to "string"') schema = dict(openapi_type_mapping[OpenApiTypes.STR]) </DeepExtract> elif isinstance(override, dict): schema = dict(override) else: warn(f'Unable to use path converter override for "{converter}". Please refer to the documentation on how to use this.') return None elif converter in DJANGO_PATH_CONVERTER_MAPPING: <DeepExtract> openapi_type_mapping = get_openapi_type_mapping() if DJANGO_PATH_CONVERTER_MAPPING[converter] is None or type(DJANGO_PATH_CONVERTER_MAPPING[converter]) is None or DJANGO_PATH_CONVERTER_MAPPING[converter] is OpenApiTypes.NONE: schema = None elif DJANGO_PATH_CONVERTER_MAPPING[converter] in openapi_type_mapping: schema = dict(openapi_type_mapping[DJANGO_PATH_CONVERTER_MAPPING[converter]]) elif DJANGO_PATH_CONVERTER_MAPPING[converter] in PYTHON_TYPE_MAPPING: schema = dict(openapi_type_mapping[PYTHON_TYPE_MAPPING[DJANGO_PATH_CONVERTER_MAPPING[converter]]]) else: warn(f'could not resolve type for "{DJANGO_PATH_CONVERTER_MAPPING[converter]}". defaulting to "string"') schema = dict(openapi_type_mapping[OpenApiTypes.STR]) </DeepExtract> elif converter in registered_converters: <DeepExtract> openapi_type_mapping = get_openapi_type_mapping() if OpenApiTypes.STR is None or type(OpenApiTypes.STR) is None or OpenApiTypes.STR is OpenApiTypes.NONE: schema = None elif OpenApiTypes.STR in openapi_type_mapping: schema = dict(openapi_type_mapping[OpenApiTypes.STR]) elif OpenApiTypes.STR in PYTHON_TYPE_MAPPING: schema = dict(openapi_type_mapping[PYTHON_TYPE_MAPPING[OpenApiTypes.STR]]) else: warn(f'could not resolve type for "{OpenApiTypes.STR}". defaulting to "string"') schema = dict(openapi_type_mapping[OpenApiTypes.STR]) </DeepExtract> <DeepExtract> if not registered_converters[converter].regex.startswith('^'): registered_converters[converter].regex = '^' + registered_converters[converter].regex if not registered_converters[converter].regex.endswith('$'): registered_converters[converter].regex = registered_converters[converter].regex + '$' schema['pattern'] = registered_converters[converter].regex </DeepExtract> else: error(f'Encountered path converter "{converter}" that is unknown to Django.') return None return build_parameter_type(name=variable, schema=schema, location=OpenApiParameter.PATH, enum=enum_values) return None
def resolve_django_path_parameter(path_regex, variable, available_formats): """ convert django style path parameters to OpenAPI parameters. """ registered_converters = get_converters() for match in _PATH_PARAMETER_COMPONENT_RE.finditer(path_regex): (converter, parameter) = (match.group('converter'), match.group('parameter')) enum_values = None if api_settings.SCHEMA_COERCE_PATH_PK and parameter == 'pk': parameter = 'id' elif spectacular_settings.SCHEMA_COERCE_PATH_PK_SUFFIX and parameter.endswith('_pk'): parameter = f'{parameter[:-3]}_id' if parameter != variable: continue if not converter: return None if converter.startswith('drf_format_suffix_'): explicit_formats = converter[len('drf_format_suffix_'):].split('_') enum_values = [f'.{suffix}' for suffix in explicit_formats if suffix in available_formats] converter = 'drf_format_suffix' elif converter == 'drf_format_suffix': enum_values = [f'.{suffix}' for suffix in available_formats] if converter in spectacular_settings.PATH_CONVERTER_OVERRIDES: override = spectacular_settings.PATH_CONVERTER_OVERRIDES[converter] if is_basic_type(override): openapi_type_mapping = get_openapi_type_mapping() if override is None or type(override) is None or override is OpenApiTypes.NONE: schema = None elif override in openapi_type_mapping: schema = dict(openapi_type_mapping[override]) elif override in PYTHON_TYPE_MAPPING: schema = dict(openapi_type_mapping[PYTHON_TYPE_MAPPING[override]]) else: warn(f'could not resolve type for "{override}". defaulting to "string"') schema = dict(openapi_type_mapping[OpenApiTypes.STR]) elif isinstance(override, dict): schema = dict(override) else: warn(f'Unable to use path converter override for "{converter}". Please refer to the documentation on how to use this.') return None elif converter in DJANGO_PATH_CONVERTER_MAPPING: openapi_type_mapping = get_openapi_type_mapping() if DJANGO_PATH_CONVERTER_MAPPING[converter] is None or type(DJANGO_PATH_CONVERTER_MAPPING[converter]) is None or DJANGO_PATH_CONVERTER_MAPPING[converter] is OpenApiTypes.NONE: schema = None elif DJANGO_PATH_CONVERTER_MAPPING[converter] in openapi_type_mapping: schema = dict(openapi_type_mapping[DJANGO_PATH_CONVERTER_MAPPING[converter]]) elif DJANGO_PATH_CONVERTER_MAPPING[converter] in PYTHON_TYPE_MAPPING: schema = dict(openapi_type_mapping[PYTHON_TYPE_MAPPING[DJANGO_PATH_CONVERTER_MAPPING[converter]]]) else: warn(f'could not resolve type for "{DJANGO_PATH_CONVERTER_MAPPING[converter]}". defaulting to "string"') schema = dict(openapi_type_mapping[OpenApiTypes.STR]) elif converter in registered_converters: openapi_type_mapping = get_openapi_type_mapping() if OpenApiTypes.STR is None or type(OpenApiTypes.STR) is None or OpenApiTypes.STR is OpenApiTypes.NONE: schema = None elif OpenApiTypes.STR in openapi_type_mapping: schema = dict(openapi_type_mapping[OpenApiTypes.STR]) elif OpenApiTypes.STR in PYTHON_TYPE_MAPPING: schema = dict(openapi_type_mapping[PYTHON_TYPE_MAPPING[OpenApiTypes.STR]]) else: warn(f'could not resolve type for "{OpenApiTypes.STR}". defaulting to "string"') schema = dict(openapi_type_mapping[OpenApiTypes.STR]) if not registered_converters[converter].regex.startswith('^'): registered_converters[converter].regex = '^' + registered_converters[converter].regex if not registered_converters[converter].regex.endswith('$'): registered_converters[converter].regex = registered_converters[converter].regex + '$' schema['pattern'] = registered_converters[converter].regex else: error(f'Encountered path converter "{converter}" that is unknown to Django.') return None return build_parameter_type(name=variable, schema=schema, location=OpenApiParameter.PATH, enum=enum_values) return None
drf-spectacular
positive
def evaluate_model(dataset, docs, model, model_props, stats, save_output=False, save_scores=False, print_table=False): prog = utils.Progbar(dataset.n_batches) mt = RankingMetricsTracker(dataset.name, model_props=model_props) if model_props.ranking else ClassificationMetricsTracker(dataset.name) mta = ClassificationMetricsTracker(dataset.name + ' anaphoricity', anaphoricity=True) docs_by_id = {doc.did: doc for doc in docs} if model_props.ranking else {} (saved_links, saved_scores) = (defaultdict(list) if save_output else None, defaultdict(dict) if save_scores else None) for (i, X) in enumerate(dataset): if X['y'].size == 0: continue progress = [] scores = model.predict_on_batch(X) if model_props.ranking: <DeepExtract> s = scores[1][:, 0] starts_ends = zip(X['starts'][:, 0], X['ends'][:, 0]) for (start, end) in starts_ends: action_scores = s[start:end] link = np.argmax(action_scores) (m1, m2) = X['ids'][start + link] if saved_links is not None: if m1 != -1: saved_links[docs_by_id[X['did']].did].append((m1, m2)) if saved_scores is not None: for (pair, link_score) in zip(X['ids'][start:end], action_scores): saved_scores[docs_by_id[X['did']].did][tuple(pair)] = link_score docs_by_id[X['did']].link(m1, m2) </DeepExtract> if model_props.anaphoricity and (not model_props.ranking): progress.append(('anaphoricity loss', mta.update(X, scores[0][:, 0]))) if not model_props.anaphoricity_only: progress.append(('loss', mt.update(X, scores if model_props.ranking else scores[1 if model_props.anaphoricity else 0][:, 0]))) prog.update(i + 1, exact=progress) if save_scores: print('Writing scores') utils.write_pickle(saved_scores, model_props.path + dataset.name + '_scores.pkl') if save_output: print('Writing output') utils.write_pickle(saved_links, model_props.path + dataset.name + '_links.pkl') utils.write_pickle(docs, model_props.path + dataset.name + '_processed_docs.pkl') timer.start('metrics') if model_props.ranking: stats.update(compute_metrics(docs, dataset.name)) stats['validate time'] = time.time() - prog.start if model_props.anaphoricity and (not model_props.ranking): mta.finish(stats) if not model_props.anaphoricity_only: mt.finish(stats) timer.stop('metrics') if print_table: print(' & '.join(map(lambda x: '{:.2f}'.format(x * 100), [stats[dataset.name + ' muc precision'], stats[dataset.name + ' muc recall'], stats[dataset.name + ' muc'], stats[dataset.name + ' b3 precision'], stats[dataset.name + ' b3 recall'], stats[dataset.name + ' b3'], stats[dataset.name + ' ceafe precision'], stats[dataset.name + ' ceafe recall'], stats[dataset.name + ' ceafe'], stats[dataset.name + ' conll']])))
def evaluate_model(dataset, docs, model, model_props, stats, save_output=False, save_scores=False, print_table=False): prog = utils.Progbar(dataset.n_batches) mt = RankingMetricsTracker(dataset.name, model_props=model_props) if model_props.ranking else ClassificationMetricsTracker(dataset.name) mta = ClassificationMetricsTracker(dataset.name + ' anaphoricity', anaphoricity=True) docs_by_id = {doc.did: doc for doc in docs} if model_props.ranking else {} (saved_links, saved_scores) = (defaultdict(list) if save_output else None, defaultdict(dict) if save_scores else None) for (i, X) in enumerate(dataset): if X['y'].size == 0: continue progress = [] scores = model.predict_on_batch(X) if model_props.ranking: s = scores[1][:, 0] starts_ends = zip(X['starts'][:, 0], X['ends'][:, 0]) for (start, end) in starts_ends: action_scores = s[start:end] link = np.argmax(action_scores) (m1, m2) = X['ids'][start + link] if saved_links is not None: if m1 != -1: saved_links[docs_by_id[X['did']].did].append((m1, m2)) if saved_scores is not None: for (pair, link_score) in zip(X['ids'][start:end], action_scores): saved_scores[docs_by_id[X['did']].did][tuple(pair)] = link_score docs_by_id[X['did']].link(m1, m2) if model_props.anaphoricity and (not model_props.ranking): progress.append(('anaphoricity loss', mta.update(X, scores[0][:, 0]))) if not model_props.anaphoricity_only: progress.append(('loss', mt.update(X, scores if model_props.ranking else scores[1 if model_props.anaphoricity else 0][:, 0]))) prog.update(i + 1, exact=progress) if save_scores: print('Writing scores') utils.write_pickle(saved_scores, model_props.path + dataset.name + '_scores.pkl') if save_output: print('Writing output') utils.write_pickle(saved_links, model_props.path + dataset.name + '_links.pkl') utils.write_pickle(docs, model_props.path + dataset.name + '_processed_docs.pkl') timer.start('metrics') if model_props.ranking: stats.update(compute_metrics(docs, dataset.name)) stats['validate time'] = time.time() - prog.start if model_props.anaphoricity and (not model_props.ranking): mta.finish(stats) if not model_props.anaphoricity_only: mt.finish(stats) timer.stop('metrics') if print_table: print(' & '.join(map(lambda x: '{:.2f}'.format(x * 100), [stats[dataset.name + ' muc precision'], stats[dataset.name + ' muc recall'], stats[dataset.name + ' muc'], stats[dataset.name + ' b3 precision'], stats[dataset.name + ' b3 recall'], stats[dataset.name + ' b3'], stats[dataset.name + ' ceafe precision'], stats[dataset.name + ' ceafe recall'], stats[dataset.name + ' ceafe'], stats[dataset.name + ' conll']])))
deep-coref
positive
@timeit def main(args): if os.path.isdir(args.input): filelist = glob.glob(f'{args.input}/*.json') else: filelist = [args.input] print(f'Loading {len(filelist)} files') corpus = [dataloader(filelist)] print(f'Documents: {len(corpus[0])}') if args.concepts == 'umls': dict_disorder = load_dict(f'{args.dict_root}viruses/SNOMEDCT_US.disorder.tsv') dict_symptom = load_dict(f'{args.dict_root}viruses/SNOMEDCT_US.symptom.tsv') dict_finding = load_dict(f'{args.dict_root}viruses/SNOMEDCT_US.finding.tsv') dict_icd10 = load_dict(f'{args.dict_root}viruses/ICD10CM.codes.tsv') dict_geo = load_dict(f'{args.dict_root}viruses/umls.geographic_area.tsv') taggers = {'concepts': DictionaryTagger({'disorder': dict_disorder, 'symptom': dict_symptom, 'finding': dict_finding, 'GPE': dict_geo, 'ICD10': dict_icd10})} target_entities = ['disorder', 'symptom', 'finding', 'ICD10'] elif args.concepts == 'umls_merged': dict_terms = {} dict_terms.update(load_dict(f'{args.dict_root}viruses/SNOMEDCT_US.disorder.tsv')) dict_terms.update(load_dict(f'{args.dict_root}viruses/SNOMEDCT_US.symptom.tsv')) dict_terms.update(load_dict(f'{args.dict_root}viruses/SNOMEDCT_US.finding.tsv')) taggers = {'concepts': DictionaryTagger({'disorder_symptom_finding': dict_terms})} target_entities = ['disorder_symptom_finding'] print(f'[{args.concepts}] Loaded {len(dict_terms)} concept terms') elif args.concepts == 'inkfish': drug_fpath = f'{args.entity_tags}/drug.tags.tsv' diso_fpath = f'{args.entity_tags}/disorder.tags.tsv' dict_icd10 = load_dict(f'{args.dict_root}viruses/ICD10CM.codes.tsv') dict_geo = load_dict(f'{args.dict_root}viruses/umls.geographic_area.tsv') taggers = {'concepts': DictionaryTagger({'GPE': dict_geo, 'ICD10': dict_icd10}), 'drugs': PrecomputedEntityTagger(drug_fpath, type_name='drug'), 'disorders': PrecomputedEntityTagger(diso_fpath, type_name='disorder')} target_entities = ['disorder', 'drug', 'ICD10'] pipeline = {'headers': SectionHeaderTagger(header_dict=get_header_dict(), stop_headers={})} for name in taggers: pipeline[name] = taggers[name] pipeline['timex3'] = Timex3Tagger() attribs = {'doctimes': DocTimeTagger(prop='CREATED_AT', format='%Y-%m-%d %H:%M:%S'), 'normalize': Timex3NormalizerTagger(), 'section': ParentSectionTagger(targets=target_entities + ['TIMEX3'], major_headers=get_major_headers()), 'tdelta': TimeDeltaTagger(targets=target_entities), 'polarity': PolarityTagger(targets=target_entities, data_root=f'{args.dict_root}/negex/'), 'hypothetical': HypotheticalTagger(targets=target_entities), 'historical': HistoricalTagger(targets=target_entities), 'subject': FamilyTagger(targets=target_entities, data_root=f'{args.dict_root}/negex/')} pipeline.update(attribs) print(pipeline.keys()) print(f'Pipes: {len(pipeline)}') tagger = TaggerPipelineServer(num_workers=args.n_procs) documents = tagger.apply(pipeline, corpus) print('Tagging complete') <DeepExtract> header = ['DOC_ID', 'DOC_TS', 'TYPE', 'TEXT', 'ABS_CHAR_START', 'ABS_CHAR_END'] header += ['POLARITY', 'HYPOTHETICAL', 'HISTORICAL', 'SECTION', 'SUBJECT', 'TDELTA'] data = [] for entity_type in ['disorder', 'drug', 'ICD10', 'GPE']: for doc in documents[0]: spans = build_candidate_set([doc], entity_type) for x in spans: row = [doc.name, doc.props['doctime'] if 'doctime' in doc.props else 'None', entity_type] row += [x.text, x.abs_char_start, x.abs_char_end] polarity = x.props['polarity'] if 'polarity' in x.props else 'NULL' hypothetical = x.props['hypothetical'] == 1 if 'hypothetical' in x.props else 'NULL' historical = x.props['historical'] == 1 if 'historical' in x.props else 'NULL' section = x.props['section'].text if 'section' in x.props and x.props['section'] is not None else 'NULL' subject = x.props['subject'] if 'subject' in x.props else 'NULL' tdelta = x.props['tdelta'] if 'tdelta' in x.props else 'NULL' row += [polarity, hypothetical, historical, section, subject, tdelta] data.append('\t'.join(map(str, row))) with open(args.output, 'w') as fp: fp.write('\t'.join(header) + '\n') fp.write('\n'.join(data)) </DeepExtract> print(f'Concepts written to {args.output}')
@timeit def main(args): if os.path.isdir(args.input): filelist = glob.glob(f'{args.input}/*.json') else: filelist = [args.input] print(f'Loading {len(filelist)} files') corpus = [dataloader(filelist)] print(f'Documents: {len(corpus[0])}') if args.concepts == 'umls': dict_disorder = load_dict(f'{args.dict_root}viruses/SNOMEDCT_US.disorder.tsv') dict_symptom = load_dict(f'{args.dict_root}viruses/SNOMEDCT_US.symptom.tsv') dict_finding = load_dict(f'{args.dict_root}viruses/SNOMEDCT_US.finding.tsv') dict_icd10 = load_dict(f'{args.dict_root}viruses/ICD10CM.codes.tsv') dict_geo = load_dict(f'{args.dict_root}viruses/umls.geographic_area.tsv') taggers = {'concepts': DictionaryTagger({'disorder': dict_disorder, 'symptom': dict_symptom, 'finding': dict_finding, 'GPE': dict_geo, 'ICD10': dict_icd10})} target_entities = ['disorder', 'symptom', 'finding', 'ICD10'] elif args.concepts == 'umls_merged': dict_terms = {} dict_terms.update(load_dict(f'{args.dict_root}viruses/SNOMEDCT_US.disorder.tsv')) dict_terms.update(load_dict(f'{args.dict_root}viruses/SNOMEDCT_US.symptom.tsv')) dict_terms.update(load_dict(f'{args.dict_root}viruses/SNOMEDCT_US.finding.tsv')) taggers = {'concepts': DictionaryTagger({'disorder_symptom_finding': dict_terms})} target_entities = ['disorder_symptom_finding'] print(f'[{args.concepts}] Loaded {len(dict_terms)} concept terms') elif args.concepts == 'inkfish': drug_fpath = f'{args.entity_tags}/drug.tags.tsv' diso_fpath = f'{args.entity_tags}/disorder.tags.tsv' dict_icd10 = load_dict(f'{args.dict_root}viruses/ICD10CM.codes.tsv') dict_geo = load_dict(f'{args.dict_root}viruses/umls.geographic_area.tsv') taggers = {'concepts': DictionaryTagger({'GPE': dict_geo, 'ICD10': dict_icd10}), 'drugs': PrecomputedEntityTagger(drug_fpath, type_name='drug'), 'disorders': PrecomputedEntityTagger(diso_fpath, type_name='disorder')} target_entities = ['disorder', 'drug', 'ICD10'] pipeline = {'headers': SectionHeaderTagger(header_dict=get_header_dict(), stop_headers={})} for name in taggers: pipeline[name] = taggers[name] pipeline['timex3'] = Timex3Tagger() attribs = {'doctimes': DocTimeTagger(prop='CREATED_AT', format='%Y-%m-%d %H:%M:%S'), 'normalize': Timex3NormalizerTagger(), 'section': ParentSectionTagger(targets=target_entities + ['TIMEX3'], major_headers=get_major_headers()), 'tdelta': TimeDeltaTagger(targets=target_entities), 'polarity': PolarityTagger(targets=target_entities, data_root=f'{args.dict_root}/negex/'), 'hypothetical': HypotheticalTagger(targets=target_entities), 'historical': HistoricalTagger(targets=target_entities), 'subject': FamilyTagger(targets=target_entities, data_root=f'{args.dict_root}/negex/')} pipeline.update(attribs) print(pipeline.keys()) print(f'Pipes: {len(pipeline)}') tagger = TaggerPipelineServer(num_workers=args.n_procs) documents = tagger.apply(pipeline, corpus) print('Tagging complete') header = ['DOC_ID', 'DOC_TS', 'TYPE', 'TEXT', 'ABS_CHAR_START', 'ABS_CHAR_END'] header += ['POLARITY', 'HYPOTHETICAL', 'HISTORICAL', 'SECTION', 'SUBJECT', 'TDELTA'] data = [] for entity_type in ['disorder', 'drug', 'ICD10', 'GPE']: for doc in documents[0]: spans = build_candidate_set([doc], entity_type) for x in spans: row = [doc.name, doc.props['doctime'] if 'doctime' in doc.props else 'None', entity_type] row += [x.text, x.abs_char_start, x.abs_char_end] polarity = x.props['polarity'] if 'polarity' in x.props else 'NULL' hypothetical = x.props['hypothetical'] == 1 if 'hypothetical' in x.props else 'NULL' historical = x.props['historical'] == 1 if 'historical' in x.props else 'NULL' section = x.props['section'].text if 'section' in x.props and x.props['section'] is not None else 'NULL' subject = x.props['subject'] if 'subject' in x.props else 'NULL' tdelta = x.props['tdelta'] if 'tdelta' in x.props else 'NULL' row += [polarity, hypothetical, historical, section, subject, tdelta] data.append('\t'.join(map(str, row))) with open(args.output, 'w') as fp: fp.write('\t'.join(header) + '\n') fp.write('\n'.join(data)) print(f'Concepts written to {args.output}')
ehr-rwe
positive
def register(): for cls in classes: bpy.utils.register_class(cls) context = bpy.context prefs = bpy.context.preferences.addons[__package__].preferences <DeepExtract> is_panel = hasattr(bpy.types, 'VIEW3D_PT_texel_density_checker') category = bpy.context.preferences.addons[__package__].preferences.view3d_panel_category if is_panel: try: bpy.utils.unregister_class(VIEW3D_PT_texel_density_checker) except: pass VIEW3D_PT_texel_density_checker.bl_category = category bpy.utils.register_class(VIEW3D_PT_texel_density_checker) </DeepExtract> <DeepExtract> is_panel = hasattr(bpy.types, 'UV_PT_texel_density_checker') category = bpy.context.preferences.addons[__package__].preferences.uv_panel_category if is_panel: try: bpy.utils.unregister_class(UV_PT_texel_density_checker) except: pass UV_PT_texel_density_checker.bl_category = category bpy.utils.register_class(UV_PT_texel_density_checker) </DeepExtract>
def register(): for cls in classes: bpy.utils.register_class(cls) context = bpy.context prefs = bpy.context.preferences.addons[__package__].preferences is_panel = hasattr(bpy.types, 'VIEW3D_PT_texel_density_checker') category = bpy.context.preferences.addons[__package__].preferences.view3d_panel_category if is_panel: try: bpy.utils.unregister_class(VIEW3D_PT_texel_density_checker) except: pass VIEW3D_PT_texel_density_checker.bl_category = category bpy.utils.register_class(VIEW3D_PT_texel_density_checker) is_panel = hasattr(bpy.types, 'UV_PT_texel_density_checker') category = bpy.context.preferences.addons[__package__].preferences.uv_panel_category if is_panel: try: bpy.utils.unregister_class(UV_PT_texel_density_checker) except: pass UV_PT_texel_density_checker.bl_category = category bpy.utils.register_class(UV_PT_texel_density_checker) </DeepExtract>
Blender-Texel-Density-Checker
positive
def net(self, input, args, class_dim=1000, data_format='NCHW'): layers = self.layers vgg_spec = {11: [1, 1, 2, 2, 2], 13: [2, 2, 2, 2, 2], 16: [2, 2, 3, 3, 3], 19: [2, 2, 4, 4, 4]} assert layers in vgg_spec.keys(), 'supported layers are {} but input layer is {}'.format(vgg_spec.keys(), layers) nums = vgg_spec[layers] <DeepExtract> conv = input for i in range(nums[0]): conv = fluid.layers.conv2d(input=conv, num_filters=64, filter_size=3, stride=1, padding=1, act='relu', param_attr=fluid.param_attr.ParamAttr(name='conv1_' + str(i + 1) + '_weights'), bias_attr=fluid.param_attr.ParamAttr(name='conv1_' + str(i + 1) + '_offset'), data_format=data_format) conv1 = fluid.layers.pool2d(input=conv, pool_size=2, pool_type='max', pool_stride=2) </DeepExtract> <DeepExtract> conv = conv1 for i in range(nums[1]): conv = fluid.layers.conv2d(input=conv, num_filters=128, filter_size=3, stride=1, padding=1, act='relu', param_attr=fluid.param_attr.ParamAttr(name='conv2_' + str(i + 1) + '_weights'), bias_attr=fluid.param_attr.ParamAttr(name='conv2_' + str(i + 1) + '_offset'), data_format=data_format) conv2 = fluid.layers.pool2d(input=conv, pool_size=2, pool_type='max', pool_stride=2) </DeepExtract> <DeepExtract> conv = conv2 for i in range(nums[2]): conv = fluid.layers.conv2d(input=conv, num_filters=256, filter_size=3, stride=1, padding=1, act='relu', param_attr=fluid.param_attr.ParamAttr(name='conv3_' + str(i + 1) + '_weights'), bias_attr=fluid.param_attr.ParamAttr(name='conv3_' + str(i + 1) + '_offset'), data_format=data_format) conv3 = fluid.layers.pool2d(input=conv, pool_size=2, pool_type='max', pool_stride=2) </DeepExtract> <DeepExtract> conv = conv3 for i in range(nums[3]): conv = fluid.layers.conv2d(input=conv, num_filters=512, filter_size=3, stride=1, padding=1, act='relu', param_attr=fluid.param_attr.ParamAttr(name='conv4_' + str(i + 1) + '_weights'), bias_attr=fluid.param_attr.ParamAttr(name='conv4_' + str(i + 1) + '_offset'), data_format=data_format) conv4 = fluid.layers.pool2d(input=conv, pool_size=2, pool_type='max', pool_stride=2) </DeepExtract> <DeepExtract> conv = conv4 for i in range(nums[4]): conv = fluid.layers.conv2d(input=conv, num_filters=512, filter_size=3, stride=1, padding=1, act='relu', param_attr=fluid.param_attr.ParamAttr(name='conv5_' + str(i + 1) + '_weights'), bias_attr=fluid.param_attr.ParamAttr(name='conv5_' + str(i + 1) + '_offset'), data_format=data_format) conv5 = fluid.layers.pool2d(input=conv, pool_size=2, pool_type='max', pool_stride=2) </DeepExtract> fc_dim = 4096 fc_name = ['fc6', 'fc7', 'fc8'] fc1 = fluid.layers.fc(input=conv5, size=fc_dim, act='relu', param_attr=fluid.param_attr.ParamAttr(name=fc_name[0] + '_weights'), bias_attr=fluid.param_attr.ParamAttr(name=fc_name[0] + '_offset')) fc1 = fluid.layers.dropout(x=fc1, dropout_prob=0.5) fc2 = fluid.layers.fc(input=fc1, size=fc_dim, act='relu', param_attr=fluid.param_attr.ParamAttr(name=fc_name[1] + '_weights'), bias_attr=fluid.param_attr.ParamAttr(name=fc_name[1] + '_offset')) fc2 = fluid.layers.dropout(x=fc2, dropout_prob=0.5) out = fluid.layers.fc(input=fc2, size=class_dim, param_attr=fluid.param_attr.ParamAttr(name=fc_name[2] + '_weights'), bias_attr=fluid.param_attr.ParamAttr(name=fc_name[2] + '_offset')) return out
def net(self, input, args, class_dim=1000, data_format='NCHW'): layers = self.layers vgg_spec = {11: [1, 1, 2, 2, 2], 13: [2, 2, 2, 2, 2], 16: [2, 2, 3, 3, 3], 19: [2, 2, 4, 4, 4]} assert layers in vgg_spec.keys(), 'supported layers are {} but input layer is {}'.format(vgg_spec.keys(), layers) nums = vgg_spec[layers] conv = input for i in range(nums[0]): conv = fluid.layers.conv2d(input=conv, num_filters=64, filter_size=3, stride=1, padding=1, act='relu', param_attr=fluid.param_attr.ParamAttr(name='conv1_' + str(i + 1) + '_weights'), bias_attr=fluid.param_attr.ParamAttr(name='conv1_' + str(i + 1) + '_offset'), data_format=data_format) conv1 = fluid.layers.pool2d(input=conv, pool_size=2, pool_type='max', pool_stride=2) conv = conv1 for i in range(nums[1]): conv = fluid.layers.conv2d(input=conv, num_filters=128, filter_size=3, stride=1, padding=1, act='relu', param_attr=fluid.param_attr.ParamAttr(name='conv2_' + str(i + 1) + '_weights'), bias_attr=fluid.param_attr.ParamAttr(name='conv2_' + str(i + 1) + '_offset'), data_format=data_format) conv2 = fluid.layers.pool2d(input=conv, pool_size=2, pool_type='max', pool_stride=2) conv = conv2 for i in range(nums[2]): conv = fluid.layers.conv2d(input=conv, num_filters=256, filter_size=3, stride=1, padding=1, act='relu', param_attr=fluid.param_attr.ParamAttr(name='conv3_' + str(i + 1) + '_weights'), bias_attr=fluid.param_attr.ParamAttr(name='conv3_' + str(i + 1) + '_offset'), data_format=data_format) conv3 = fluid.layers.pool2d(input=conv, pool_size=2, pool_type='max', pool_stride=2) conv = conv3 for i in range(nums[3]): conv = fluid.layers.conv2d(input=conv, num_filters=512, filter_size=3, stride=1, padding=1, act='relu', param_attr=fluid.param_attr.ParamAttr(name='conv4_' + str(i + 1) + '_weights'), bias_attr=fluid.param_attr.ParamAttr(name='conv4_' + str(i + 1) + '_offset'), data_format=data_format) conv4 = fluid.layers.pool2d(input=conv, pool_size=2, pool_type='max', pool_stride=2) conv = conv4 for i in range(nums[4]): conv = fluid.layers.conv2d(input=conv, num_filters=512, filter_size=3, stride=1, padding=1, act='relu', param_attr=fluid.param_attr.ParamAttr(name='conv5_' + str(i + 1) + '_weights'), bias_attr=fluid.param_attr.ParamAttr(name='conv5_' + str(i + 1) + '_offset'), data_format=data_format) conv5 = fluid.layers.pool2d(input=conv, pool_size=2, pool_type='max', pool_stride=2) fc_dim = 4096 fc_name = ['fc6', 'fc7', 'fc8'] fc1 = fluid.layers.fc(input=conv5, size=fc_dim, act='relu', param_attr=fluid.param_attr.ParamAttr(name=fc_name[0] + '_weights'), bias_attr=fluid.param_attr.ParamAttr(name=fc_name[0] + '_offset')) fc1 = fluid.layers.dropout(x=fc1, dropout_prob=0.5) fc2 = fluid.layers.fc(input=fc1, size=fc_dim, act='relu', param_attr=fluid.param_attr.ParamAttr(name=fc_name[1] + '_weights'), bias_attr=fluid.param_attr.ParamAttr(name=fc_name[1] + '_offset')) fc2 = fluid.layers.dropout(x=fc2, dropout_prob=0.5) out = fluid.layers.fc(input=fc2, size=class_dim, param_attr=fluid.param_attr.ParamAttr(name=fc_name[2] + '_weights'), bias_attr=fluid.param_attr.ParamAttr(name=fc_name[2] + '_offset')) return out
edl
positive
def parse(self, limit=None): if limit is not None: LOG.info('Only parsing first %d rows', limit) LOG.info('Parsing files...') if self.test_only: self.test_mode = True <DeepExtract> omimids = list(self.omim_type.keys() - self.omim_replaced.keys()) LOG.info('Have %i omim numbers to fetch records from their API', len(omimids)) LOG.info('Have %i omim types ', len(self.omim_type)) if self.test_mode: graph = self.testgraph else: graph = self.graph geno = Genotype(graph) model = Model(graph) tax_label = 'Homo sapiens' tax_id = self.globaltt[tax_label] geno.addGenome(tax_id, tax_label) model.addClassToGraph(tax_id, tax_label) includes = set() includes.add('all') self.process_entries(omimids, self._transform_entry, includes, graph, limit) for omim_id in self.omim_replaced: model.addDeprecatedClass('OMIM:' + omim_id, ['OMIM:' + o for o in self.omim_replaced[omim_id]]) </DeepExtract> <DeepExtract> if self.test_mode: graph = self.testgraph else: graph = self.graph line_counter = 0 assoc_count = 0 src_key = 'morbidmap' col = self.files[src_key]['columns'] raw = '/'.join((self.rawdir, self.files[src_key]['file'])) with open(raw) as reader: line = reader.readline() line = reader.readline() line = reader.readline() line = reader.readline().strip() line_counter = 4 row = line.split('\t') if not self.check_fileheader(col, row): pass for line in reader: line_counter += 1 line = line.strip() if line[0] == '#': continue row = line.split('\t') if len(row) != len(col): LOG.warning('Unexpected input on line: %i got: %s', line_counter, row) continue disorder = row[col.index('# Phenotype')] gene_symbols = row[col.index('Gene Symbols')] gene_num = row[col.index('MIM Number')] disorder_match = self.disorder_regex.match(disorder) nogene_match = self.nogene_regex.match(disorder) if disorder_match is not None: disorder_parts = disorder_match.groups() (disorder_label, disorder_num, phene_key) = disorder_parts if self.test_mode and (int(disorder_num) not in self.test_ids or int(gene_num) not in self.test_ids): continue assoc_count += 1 gene_symbols = gene_symbols.split(', ') gene_id = 'OMIM:' + str(gene_num) self._make_pheno_assoc(graph, gene_id, disorder_num, disorder_label, phene_key) elif nogene_match is not None: (disorder_label, phene_key) = nogene_match.groups() disorder_num = gene_num disorder_id = 'OMIM:' + str(disorder_num) if self.test_mode and int(disorder_num) not in self.test_ids: continue if disorder_id in self.omim_ncbigene_idmap: gene_ids = self.omim_ncbigene_idmap[disorder_id] if gene_ids is None: continue for gene_num in gene_ids: gene_id = 'NCBIGene:' + str(gene_num).strip() assoc_count += 1 self._make_pheno_assoc(graph, gene_id, disorder_num, disorder_label, phene_key) else: feature_id = self._make_anonymous_feature(gene_num) assoc_count += 1 self._make_pheno_assoc(graph, feature_id, disorder_num, disorder_label, phene_key) LOG.info("We don't have an NCBIGene feature id to link %s with %s", disorder_id, disorder_label) if self.test_mode and gene_num not in self.test_ids: continue else: LOG.warning('There are misformatted rows %i:%s', line_counter, line) if not self.test_mode and limit is not None and (line_counter > limit): break LOG.info('Added %d G2P associations', assoc_count) </DeepExtract> <DeepExtract> if self.test_mode: graph = self.testgraph else: graph = self.graph LOG.info('getting phenotypic series titles') model = Model(graph) line_counter = 0 src_key = 'phenotypicSeries' col = self.files[src_key]['columns'] raw = '/'.join((self.rawdir, self.files[src_key]['file'])) with open(raw) as reader: line = reader.readline() line = reader.readline() line = reader.readline() line = reader.readline() line = reader.readline().strip() line_counter = 5 row = line.split('\t') if not self.check_fileheader(col, row): pass for line in reader: line_counter += 1 row = line.strip().split('\t') if row and len(row) != len(col): LOG.warning('Unexpected input on line: %i got: %s', line_counter, row) continue ps_label = row[col.index('Phenotypic Series Title')].strip() ps_num = row[col.index('Phenotypic Series number')].strip() omimps_curie = 'OMIMPS:' + ps_num model.addClassToGraph(omimps_curie, ps_label, class_category=blv.terms['Disease']) if not self.test_mode and limit is not None and (line_counter > limit): break </DeepExtract> LOG.info('Done parsing.')
def parse(self, limit=None): if limit is not None: LOG.info('Only parsing first %d rows', limit) LOG.info('Parsing files...') if self.test_only: self.test_mode = True omimids = list(self.omim_type.keys() - self.omim_replaced.keys()) LOG.info('Have %i omim numbers to fetch records from their API', len(omimids)) LOG.info('Have %i omim types ', len(self.omim_type)) if self.test_mode: graph = self.testgraph else: graph = self.graph geno = Genotype(graph) model = Model(graph) tax_label = 'Homo sapiens' tax_id = self.globaltt[tax_label] geno.addGenome(tax_id, tax_label) model.addClassToGraph(tax_id, tax_label) includes = set() includes.add('all') self.process_entries(omimids, self._transform_entry, includes, graph, limit) for omim_id in self.omim_replaced: model.addDeprecatedClass('OMIM:' + omim_id, ['OMIM:' + o for o in self.omim_replaced[omim_id]]) if self.test_mode: graph = self.testgraph else: graph = self.graph line_counter = 0 assoc_count = 0 src_key = 'morbidmap' col = self.files[src_key]['columns'] raw = '/'.join((self.rawdir, self.files[src_key]['file'])) with open(raw) as reader: line = reader.readline() line = reader.readline() line = reader.readline() line = reader.readline().strip() line_counter = 4 row = line.split('\t') if not self.check_fileheader(col, row): pass for line in reader: line_counter += 1 line = line.strip() if line[0] == '#': continue row = line.split('\t') if len(row) != len(col): LOG.warning('Unexpected input on line: %i got: %s', line_counter, row) continue disorder = row[col.index('# Phenotype')] gene_symbols = row[col.index('Gene Symbols')] gene_num = row[col.index('MIM Number')] disorder_match = self.disorder_regex.match(disorder) nogene_match = self.nogene_regex.match(disorder) if disorder_match is not None: disorder_parts = disorder_match.groups() (disorder_label, disorder_num, phene_key) = disorder_parts if self.test_mode and (int(disorder_num) not in self.test_ids or int(gene_num) not in self.test_ids): continue assoc_count += 1 gene_symbols = gene_symbols.split(', ') gene_id = 'OMIM:' + str(gene_num) self._make_pheno_assoc(graph, gene_id, disorder_num, disorder_label, phene_key) elif nogene_match is not None: (disorder_label, phene_key) = nogene_match.groups() disorder_num = gene_num disorder_id = 'OMIM:' + str(disorder_num) if self.test_mode and int(disorder_num) not in self.test_ids: continue if disorder_id in self.omim_ncbigene_idmap: gene_ids = self.omim_ncbigene_idmap[disorder_id] if gene_ids is None: continue for gene_num in gene_ids: gene_id = 'NCBIGene:' + str(gene_num).strip() assoc_count += 1 self._make_pheno_assoc(graph, gene_id, disorder_num, disorder_label, phene_key) else: feature_id = self._make_anonymous_feature(gene_num) assoc_count += 1 self._make_pheno_assoc(graph, feature_id, disorder_num, disorder_label, phene_key) LOG.info("We don't have an NCBIGene feature id to link %s with %s", disorder_id, disorder_label) if self.test_mode and gene_num not in self.test_ids: continue else: LOG.warning('There are misformatted rows %i:%s', line_counter, line) if not self.test_mode and limit is not None and (line_counter > limit): break LOG.info('Added %d G2P associations', assoc_count) if self.test_mode: graph = self.testgraph else: graph = self.graph LOG.info('getting phenotypic series titles') model = Model(graph) line_counter = 0 src_key = 'phenotypicSeries' col = self.files[src_key]['columns'] raw = '/'.join((self.rawdir, self.files[src_key]['file'])) with open(raw) as reader: line = reader.readline() line = reader.readline() line = reader.readline() line = reader.readline() line = reader.readline().strip() line_counter = 5 row = line.split('\t') if not self.check_fileheader(col, row): pass for line in reader: line_counter += 1 row = line.strip().split('\t') if row and len(row) != len(col): LOG.warning('Unexpected input on line: %i got: %s', line_counter, row) continue ps_label = row[col.index('Phenotypic Series Title')].strip() ps_num = row[col.index('Phenotypic Series number')].strip() omimps_curie = 'OMIMPS:' + ps_num model.addClassToGraph(omimps_curie, ps_label, class_category=blv.terms['Disease']) if not self.test_mode and limit is not None and (line_counter > limit): break LOG.info('Done parsing.')
dipper
positive
def set_node_attrs_on_tree(data_json, node_attrs): """ Assign desired colorings, metadata etc to the tree structure Parameters ---------- data_json : dict node_attrs: dict keys: strain names. values: dict with keys -> all available metadata (even "excluded" keys), values -> data (string / numeric / bool) """ <DeepExtract> def node_to_author_tuple(data): author_data = (data.get('author', 'unknown'), data.get('title', 'unknown'), data.get('journal', 'unknown')) author_to_unique_tuples = defaultdict(list) node_author_info = {} for (node_name, node_info) in node_attrs.items(): author = node_info.get('author') if not author: author = node_info.get('authors') if not author: continue node_author_info[node_name] = {'author': author} if 'title' in node_info: title = node_info['title'].strip() if is_valid(title): node_author_info[node_name]['title'] = title if 'journal' in node_info: journal = node_info['journal'].strip() if is_valid(journal): node_author_info[node_name]['journal'] = journal if 'paper_url' in node_info: paper_url = node_info['paper_url'].strip() if is_valid(paper_url): node_author_info[node_name]['paper_url'] = paper_url author_tuple = node_to_author_tuple(node_author_info[node_name]) if author_tuple not in author_to_unique_tuples[author]: author_to_unique_tuples[author].append(author_tuple) for (node_name, node_info) in node_attrs.items(): if node_name not in node_author_info: continue author_tuple = node_to_author_tuple(node_author_info[node_name]) author = node_author_info[node_name]['author'] if len(author_to_unique_tuples[author]) > 1: index = author_to_unique_tuples[author].index(author_tuple) disambiguation_suffix = counter_to_disambiguation_suffix(index) node_author_info[node_name]['value'] = f'{author} {disambiguation_suffix}' else: node_author_info[node_name]['value'] = author author_data = node_author_info </DeepExtract> def _transfer_mutations(node, raw_data): if 'aa_muts' in raw_data or 'muts' in raw_data: node['branch_attrs']['mutations'] = {} if 'muts' in raw_data and len(raw_data['muts']): node['branch_attrs']['mutations']['nuc'] = raw_data['muts'] if 'aa_muts' in raw_data: aa = {gene: data for (gene, data) in raw_data['aa_muts'].items() if len(data)} node['branch_attrs']['mutations'].update(aa) if aa: aa_lab = '; '.join(('{!s}: {!s}'.format(key, ', '.join(val)) for (key, val) in aa.items())) if 'labels' in node['branch_attrs']: node['branch_attrs']['labels']['aa'] = aa_lab else: node['branch_attrs']['labels'] = {'aa': aa_lab} def _transfer_vaccine_info(node, raw_data): if raw_data.get('vaccine'): node['node_attrs']['vaccine'] = raw_data['vaccine'] def _transfer_labels(node, raw_data): if 'clade_annotation' in raw_data and is_valid(raw_data['clade_annotation']): if 'labels' in node['branch_attrs']: node['branch_attrs']['labels']['clade'] = raw_data['clade_annotation'] else: node['branch_attrs']['labels'] = {'clade': raw_data['clade_annotation']} def _transfer_hidden_flag(node, raw_data): hidden = raw_data.get('hidden', None) if hidden: if hidden in ['always', 'divtree', 'timetree']: node['node_attrs']['hidden'] = hidden elif hidden is True or str(hidden) == '1': node['node_attrs']['hidden'] = 'always' else: warn('Hidden node trait of {} is invalid. Ignoring.'.format(hidden)) def _transfer_num_date(node, raw_data): if raw_data.get('numdate', None) and (not raw_data.get('num_date', None)): raw_data['num_date'] = raw_data['numdate'] del raw_data['numdate'] if is_valid(raw_data.get('num_date', None)): node['node_attrs']['num_date'] = {'value': raw_data['num_date']} if is_valid(raw_data.get('num_date_confidence', None)): node['node_attrs']['num_date']['confidence'] = raw_data['num_date_confidence'] def _transfer_url_accession(node, raw_data): for prop in ['url', 'accession']: if is_valid(raw_data.get(prop, None)): node['node_attrs'][prop] = str(raw_data[prop]) def _transfer_colorings_filters(node, raw_data): trait_keys = set() if 'colorings' in data_json['meta']: trait_keys = trait_keys.union([t['key'] for t in data_json['meta']['colorings']]) if 'filters' in data_json['meta']: trait_keys = trait_keys.union(data_json['meta']['filters']) exclude_list = ['gt', 'num_date', 'author'] trait_keys = trait_keys.difference(exclude_list) for key in trait_keys: if is_valid(raw_data.get(key, None)): node['node_attrs'][key] = {'value': raw_data[key]} if is_valid(raw_data.get(key + '_confidence', None)): node['node_attrs'][key]['confidence'] = raw_data[key + '_confidence'] if is_valid(raw_data.get(key + '_entropy', None)): node['node_attrs'][key]['entropy'] = raw_data[key + '_entropy'] def _transfer_author_data(node): if node['name'] in author_data: node['node_attrs']['author'] = author_data[node['name']] def _recursively_set_data(node): raw_data = node_attrs[node['name']] <DeepExtract> if 'aa_muts' in raw_data or 'muts' in raw_data: node['branch_attrs']['mutations'] = {} if 'muts' in raw_data and len(raw_data['muts']): node['branch_attrs']['mutations']['nuc'] = raw_data['muts'] if 'aa_muts' in raw_data: aa = {gene: data for (gene, data) in raw_data['aa_muts'].items() if len(data)} node['branch_attrs']['mutations'].update(aa) if aa: aa_lab = '; '.join(('{!s}: {!s}'.format(key, ', '.join(val)) for (key, val) in aa.items())) if 'labels' in node['branch_attrs']: node['branch_attrs']['labels']['aa'] = aa_lab else: node['branch_attrs']['labels'] = {'aa': aa_lab} </DeepExtract> <DeepExtract> if raw_data.get('vaccine'): node['node_attrs']['vaccine'] = raw_data['vaccine'] </DeepExtract> <DeepExtract> if 'clade_annotation' in raw_data and is_valid(raw_data['clade_annotation']): if 'labels' in node['branch_attrs']: node['branch_attrs']['labels']['clade'] = raw_data['clade_annotation'] else: node['branch_attrs']['labels'] = {'clade': raw_data['clade_annotation']} </DeepExtract> <DeepExtract> hidden = raw_data.get('hidden', None) if hidden: if hidden in ['always', 'divtree', 'timetree']: node['node_attrs']['hidden'] = hidden elif hidden is True or str(hidden) == '1': node['node_attrs']['hidden'] = 'always' else: warn('Hidden node trait of {} is invalid. Ignoring.'.format(hidden)) </DeepExtract> <DeepExtract> if raw_data.get('numdate', None) and (not raw_data.get('num_date', None)): raw_data['num_date'] = raw_data['numdate'] del raw_data['numdate'] if is_valid(raw_data.get('num_date', None)): node['node_attrs']['num_date'] = {'value': raw_data['num_date']} if is_valid(raw_data.get('num_date_confidence', None)): node['node_attrs']['num_date']['confidence'] = raw_data['num_date_confidence'] </DeepExtract> <DeepExtract> for prop in ['url', 'accession']: if is_valid(raw_data.get(prop, None)): node['node_attrs'][prop] = str(raw_data[prop]) </DeepExtract> <DeepExtract> if node['name'] in author_data: node['node_attrs']['author'] = author_data[node['name']] </DeepExtract> <DeepExtract> trait_keys = set() if 'colorings' in data_json['meta']: trait_keys = trait_keys.union([t['key'] for t in data_json['meta']['colorings']]) if 'filters' in data_json['meta']: trait_keys = trait_keys.union(data_json['meta']['filters']) exclude_list = ['gt', 'num_date', 'author'] trait_keys = trait_keys.difference(exclude_list) for key in trait_keys: if is_valid(raw_data.get(key, None)): node['node_attrs'][key] = {'value': raw_data[key]} if is_valid(raw_data.get(key + '_confidence', None)): node['node_attrs'][key]['confidence'] = raw_data[key + '_confidence'] if is_valid(raw_data.get(key + '_entropy', None)): node['node_attrs'][key]['entropy'] = raw_data[key + '_entropy'] </DeepExtract> for child in node.get('children', []): <DeepExtract> raw_data = node_attrs[child['name']] _transfer_mutations(child, raw_data) _transfer_vaccine_info(child, raw_data) _transfer_labels(child, raw_data) _transfer_hidden_flag(child, raw_data) _transfer_num_date(child, raw_data) _transfer_url_accession(child, raw_data) _transfer_author_data(child) _transfer_colorings_filters(child, raw_data) for child in child.get('children', []): _recursively_set_data(child) </DeepExtract> <DeepExtract> raw_data = node_attrs[data_json['tree']['name']] _transfer_mutations(data_json['tree'], raw_data) _transfer_vaccine_info(data_json['tree'], raw_data) _transfer_labels(data_json['tree'], raw_data) _transfer_hidden_flag(data_json['tree'], raw_data) _transfer_num_date(data_json['tree'], raw_data) _transfer_url_accession(data_json['tree'], raw_data) _transfer_author_data(data_json['tree']) _transfer_colorings_filters(data_json['tree'], raw_data) for child in data_json['tree'].get('children', []): _recursively_set_data(child) </DeepExtract>
def set_node_attrs_on_tree(data_json, node_attrs): """ Assign desired colorings, metadata etc to the tree structure Parameters ---------- data_json : dict node_attrs: dict keys: strain names. values: dict with keys -> all available metadata (even "excluded" keys), values -> data (string / numeric / bool) """ def node_to_author_tuple(data): author_data = (data.get('author', 'unknown'), data.get('title', 'unknown'), data.get('journal', 'unknown')) author_to_unique_tuples = defaultdict(list) node_author_info = {} for (node_name, node_info) in node_attrs.items(): author = node_info.get('author') if not author: author = node_info.get('authors') if not author: continue node_author_info[node_name] = {'author': author} if 'title' in node_info: title = node_info['title'].strip() if is_valid(title): node_author_info[node_name]['title'] = title if 'journal' in node_info: journal = node_info['journal'].strip() if is_valid(journal): node_author_info[node_name]['journal'] = journal if 'paper_url' in node_info: paper_url = node_info['paper_url'].strip() if is_valid(paper_url): node_author_info[node_name]['paper_url'] = paper_url author_tuple = node_to_author_tuple(node_author_info[node_name]) if author_tuple not in author_to_unique_tuples[author]: author_to_unique_tuples[author].append(author_tuple) for (node_name, node_info) in node_attrs.items(): if node_name not in node_author_info: continue author_tuple = node_to_author_tuple(node_author_info[node_name]) author = node_author_info[node_name]['author'] if len(author_to_unique_tuples[author]) > 1: index = author_to_unique_tuples[author].index(author_tuple) disambiguation_suffix = counter_to_disambiguation_suffix(index) node_author_info[node_name]['value'] = f'{author} {disambiguation_suffix}' else: node_author_info[node_name]['value'] = author author_data = node_author_info def _transfer_mutations(node, raw_data): if 'aa_muts' in raw_data or 'muts' in raw_data: node['branch_attrs']['mutations'] = {} if 'muts' in raw_data and len(raw_data['muts']): node['branch_attrs']['mutations']['nuc'] = raw_data['muts'] if 'aa_muts' in raw_data: aa = {gene: data for (gene, data) in raw_data['aa_muts'].items() if len(data)} node['branch_attrs']['mutations'].update(aa) if aa: aa_lab = '; '.join(('{!s}: {!s}'.format(key, ', '.join(val)) for (key, val) in aa.items())) if 'labels' in node['branch_attrs']: node['branch_attrs']['labels']['aa'] = aa_lab else: node['branch_attrs']['labels'] = {'aa': aa_lab} def _transfer_vaccine_info(node, raw_data): if raw_data.get('vaccine'): node['node_attrs']['vaccine'] = raw_data['vaccine'] def _transfer_labels(node, raw_data): if 'clade_annotation' in raw_data and is_valid(raw_data['clade_annotation']): if 'labels' in node['branch_attrs']: node['branch_attrs']['labels']['clade'] = raw_data['clade_annotation'] else: node['branch_attrs']['labels'] = {'clade': raw_data['clade_annotation']} def _transfer_hidden_flag(node, raw_data): hidden = raw_data.get('hidden', None) if hidden: if hidden in ['always', 'divtree', 'timetree']: node['node_attrs']['hidden'] = hidden elif hidden is True or str(hidden) == '1': node['node_attrs']['hidden'] = 'always' else: warn('Hidden node trait of {} is invalid. Ignoring.'.format(hidden)) def _transfer_num_date(node, raw_data): if raw_data.get('numdate', None) and (not raw_data.get('num_date', None)): raw_data['num_date'] = raw_data['numdate'] del raw_data['numdate'] if is_valid(raw_data.get('num_date', None)): node['node_attrs']['num_date'] = {'value': raw_data['num_date']} if is_valid(raw_data.get('num_date_confidence', None)): node['node_attrs']['num_date']['confidence'] = raw_data['num_date_confidence'] def _transfer_url_accession(node, raw_data): for prop in ['url', 'accession']: if is_valid(raw_data.get(prop, None)): node['node_attrs'][prop] = str(raw_data[prop]) def _transfer_colorings_filters(node, raw_data): trait_keys = set() if 'colorings' in data_json['meta']: trait_keys = trait_keys.union([t['key'] for t in data_json['meta']['colorings']]) if 'filters' in data_json['meta']: trait_keys = trait_keys.union(data_json['meta']['filters']) exclude_list = ['gt', 'num_date', 'author'] trait_keys = trait_keys.difference(exclude_list) for key in trait_keys: if is_valid(raw_data.get(key, None)): node['node_attrs'][key] = {'value': raw_data[key]} if is_valid(raw_data.get(key + '_confidence', None)): node['node_attrs'][key]['confidence'] = raw_data[key + '_confidence'] if is_valid(raw_data.get(key + '_entropy', None)): node['node_attrs'][key]['entropy'] = raw_data[key + '_entropy'] def _transfer_author_data(node): if node['name'] in author_data: node['node_attrs']['author'] = author_data[node['name']] def _recursively_set_data(node): raw_data = node_attrs[node['name']] if 'aa_muts' in raw_data or 'muts' in raw_data: node['branch_attrs']['mutations'] = {} if 'muts' in raw_data and len(raw_data['muts']): node['branch_attrs']['mutations']['nuc'] = raw_data['muts'] if 'aa_muts' in raw_data: aa = {gene: data for (gene, data) in raw_data['aa_muts'].items() if len(data)} node['branch_attrs']['mutations'].update(aa) if aa: aa_lab = '; '.join(('{!s}: {!s}'.format(key, ', '.join(val)) for (key, val) in aa.items())) if 'labels' in node['branch_attrs']: node['branch_attrs']['labels']['aa'] = aa_lab else: node['branch_attrs']['labels'] = {'aa': aa_lab} if raw_data.get('vaccine'): node['node_attrs']['vaccine'] = raw_data['vaccine'] if 'clade_annotation' in raw_data and is_valid(raw_data['clade_annotation']): if 'labels' in node['branch_attrs']: node['branch_attrs']['labels']['clade'] = raw_data['clade_annotation'] else: node['branch_attrs']['labels'] = {'clade': raw_data['clade_annotation']} hidden = raw_data.get('hidden', None) if hidden: if hidden in ['always', 'divtree', 'timetree']: node['node_attrs']['hidden'] = hidden elif hidden is True or str(hidden) == '1': node['node_attrs']['hidden'] = 'always' else: warn('Hidden node trait of {} is invalid. Ignoring.'.format(hidden)) if raw_data.get('numdate', None) and (not raw_data.get('num_date', None)): raw_data['num_date'] = raw_data['numdate'] del raw_data['numdate'] if is_valid(raw_data.get('num_date', None)): node['node_attrs']['num_date'] = {'value': raw_data['num_date']} if is_valid(raw_data.get('num_date_confidence', None)): node['node_attrs']['num_date']['confidence'] = raw_data['num_date_confidence'] for prop in ['url', 'accession']: if is_valid(raw_data.get(prop, None)): node['node_attrs'][prop] = str(raw_data[prop]) if node['name'] in author_data: node['node_attrs']['author'] = author_data[node['name']] trait_keys = set() if 'colorings' in data_json['meta']: trait_keys = trait_keys.union([t['key'] for t in data_json['meta']['colorings']]) if 'filters' in data_json['meta']: trait_keys = trait_keys.union(data_json['meta']['filters']) exclude_list = ['gt', 'num_date', 'author'] trait_keys = trait_keys.difference(exclude_list) for key in trait_keys: if is_valid(raw_data.get(key, None)): node['node_attrs'][key] = {'value': raw_data[key]} if is_valid(raw_data.get(key + '_confidence', None)): node['node_attrs'][key]['confidence'] = raw_data[key + '_confidence'] if is_valid(raw_data.get(key + '_entropy', None)): node['node_attrs'][key]['entropy'] = raw_data[key + '_entropy'] for child in node.get('children', []): raw_data = node_attrs[child['name']] _transfer_mutations(child, raw_data) _transfer_vaccine_info(child, raw_data) _transfer_labels(child, raw_data) _transfer_hidden_flag(child, raw_data) _transfer_num_date(child, raw_data) _transfer_url_accession(child, raw_data) _transfer_author_data(child) _transfer_colorings_filters(child, raw_data) for child in child.get('children', []): _recursively_set_data(child) raw_data = node_attrs[data_json['tree']['name']] _transfer_mutations(data_json['tree'], raw_data) _transfer_vaccine_info(data_json['tree'], raw_data) _transfer_labels(data_json['tree'], raw_data) _transfer_hidden_flag(data_json['tree'], raw_data) _transfer_num_date(data_json['tree'], raw_data) _transfer_url_accession(data_json['tree'], raw_data) _transfer_author_data(data_json['tree']) _transfer_colorings_filters(data_json['tree'], raw_data) for child in data_json['tree'].get('children', []): _recursively_set_data(child) </DeepExtract>
augur
positive
def advance_to_end(arr, n): <DeepExtract> if n == 1: x = 0 res = math.inf for i in range(n - 2, -1, -1): if i + arr[i] >= n - 1: sub_res = min_jumps(arr, i + 1) if sub_res != math.inf: res = min(res, sub_res + 1) x = res </DeepExtract> if x > 0 and x != math.inf: return 'true' elif x == 0 or x == math.inf: return 'false'
def advance_to_end(arr, n): if n == 1: x = 0 res = math.inf for i in range(n - 2, -1, -1): if i + arr[i] >= n - 1: sub_res = min_jumps(arr, i + 1) if sub_res != math.inf: res = min(res, sub_res + 1) x = res if x > 0 and x != math.inf: return 'true' elif x == 0 or x == math.inf: return 'false'
Competitive-Coding-Platforms
positive
def statement(self): localctx = BasisParser.StatementContext(self, self._ctx, self.state) <DeepExtract> if hasattr(localctx, 'enterProgram'): localctx.enterProgram(self) </DeepExtract> try: self.state = 83 self._errHandler.sync(self) la_ = self._interp.adaptivePredict(self._input, 2, self._ctx) if la_ == 1: self.enterOuterAlt(localctx, 1) self.state = 81 <DeepExtract> localctx = BasisParser.SimpleStatementContext(self, self._ctx, self.state) self.enterRule(localctx, 4, self.RULE_simpleStatement) self._la = 0 try: self.enterOuterAlt(localctx, 1) self.state = 85 self.shortStatement() self.state = 90 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input, 3, self._ctx) while _alt != 2 and _alt != ATN.INVALID_ALT_NUMBER: if _alt == 1: self.state = 86 self.match(BasisParser.Semicolon) self.state = 87 self.shortStatement() self.state = 92 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input, 3, self._ctx) self.state = 94 self._errHandler.sync(self) _la = self._input.LA(1) if _la == BasisParser.Semicolon: self.state = 93 self.match(BasisParser.Semicolon) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx </DeepExtract> pass elif la_ == 2: self.enterOuterAlt(localctx, 2) self.state = 82 <DeepExtract> localctx = BasisParser.CompoundStatementContext(self, self._ctx, self.state) self.enterRule(localctx, 8, self.RULE_compoundStatement) try: self.state = 103 self._errHandler.sync(self) la_ = self._interp.adaptivePredict(self._input, 5, self._ctx) if la_ == 1: self.enterOuterAlt(localctx, 1) self.state = 98 self.declarePackage() pass elif la_ == 2: self.enterOuterAlt(localctx, 2) self.state = 99 self.declareImport() pass elif la_ == 3: self.enterOuterAlt(localctx, 3) self.state = 100 self.declareVariable() pass elif la_ == 4: self.enterOuterAlt(localctx, 4) self.state = 101 self.declareFunction() pass elif la_ == 5: self.enterOuterAlt(localctx, 5) self.state = 102 self.ifStatement() pass except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx </DeepExtract> pass except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: <DeepExtract> if hasattr(listener, 'exitProgram'): listener.exitProgram(self) </DeepExtract> return localctx
def statement(self): localctx = BasisParser.StatementContext(self, self._ctx, self.state) if hasattr(localctx, 'enterProgram'): localctx.enterProgram(self) try: self.state = 83 self._errHandler.sync(self) la_ = self._interp.adaptivePredict(self._input, 2, self._ctx) if la_ == 1: self.enterOuterAlt(localctx, 1) self.state = 81 localctx = BasisParser.SimpleStatementContext(self, self._ctx, self.state) self.enterRule(localctx, 4, self.RULE_simpleStatement) self._la = 0 try: self.enterOuterAlt(localctx, 1) self.state = 85 self.shortStatement() self.state = 90 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input, 3, self._ctx) while _alt != 2 and _alt != ATN.INVALID_ALT_NUMBER: if _alt == 1: self.state = 86 self.match(BasisParser.Semicolon) self.state = 87 self.shortStatement() self.state = 92 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input, 3, self._ctx) self.state = 94 self._errHandler.sync(self) _la = self._input.LA(1) if _la == BasisParser.Semicolon: self.state = 93 self.match(BasisParser.Semicolon) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx pass elif la_ == 2: self.enterOuterAlt(localctx, 2) self.state = 82 localctx = BasisParser.CompoundStatementContext(self, self._ctx, self.state) self.enterRule(localctx, 8, self.RULE_compoundStatement) try: self.state = 103 self._errHandler.sync(self) la_ = self._interp.adaptivePredict(self._input, 5, self._ctx) if la_ == 1: self.enterOuterAlt(localctx, 1) self.state = 98 self.declarePackage() pass elif la_ == 2: self.enterOuterAlt(localctx, 2) self.state = 99 self.declareImport() pass elif la_ == 3: self.enterOuterAlt(localctx, 3) self.state = 100 self.declareVariable() pass elif la_ == 4: self.enterOuterAlt(localctx, 4) self.state = 101 self.declareFunction() pass elif la_ == 5: self.enterOuterAlt(localctx, 5) self.state = 102 self.ifStatement() pass except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx pass except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: if hasattr(listener, 'exitProgram'): listener.exitProgram(self) return localctx
Basis
positive
def get_column_ARI(X_L, view_assignment_truth): view_assignments = X_L['column_partition']['assignments'] <DeepExtract> from collections import defaultdict def make_set_dict(list): set_dict = defaultdict(set) add_element = lambda idx_group: set_dict[idx_group[1]].add(idx_group[0]) map(add_element, enumerate(list)) ARI = set_dict def check_short_circuit(set_dict_1, list_1, set_dict_2, list_2): both_all_apart = len(set_dict_1) == len(list_1) and len(set_dict_2) == len(list_2) both_all_together = len(set_dict_1) == 1 and len(set_dict_2) == 1 ARI = both_all_apart or both_all_together def gen_contingency_data(set_dict_1, set_dict_2): array_dim = (len(set_dict_1), len(set_dict_2)) Ns = numpy.ndarray(array_dim) for (idx_1, value1) in enumerate(set_dict_1.values()): for (idx_2, value2) in enumerate(set_dict_2.values()): Ns[idx_1, idx_2] = len(value1.intersection(value2)) As = Ns.sum(axis=1) Bs = Ns.sum(axis=0) ARI = (Ns, As, Bs) def choose_2_sum(x): ARI = sum(x * (x - 1) / 2.0) group_idx_dict_1 = make_set_dict(view_assignments) group_idx_dict_2 = make_set_dict(view_assignment_truth) if check_short_circuit(group_idx_dict_1, view_assignments, group_idx_dict_2, view_assignment_truth): ARI = 1.0 (Ns, As, Bs) = gen_contingency_data(group_idx_dict_1, group_idx_dict_2) n_choose_2 = choose_2_sum(numpy.array([len(view_assignments)])) cross_sums = choose_2_sum(Ns[Ns > 1]) a_sums = choose_2_sum(As) b_sums = choose_2_sum(Bs) numerator = n_choose_2 * cross_sums - a_sums * b_sums denominator = 0.5 * n_choose_2 * (a_sums + b_sums) - a_sums * b_sums ARI = numerator / denominator </DeepExtract> return ARI
def get_column_ARI(X_L, view_assignment_truth): view_assignments = X_L['column_partition']['assignments'] from collections import defaultdict def make_set_dict(list): set_dict = defaultdict(set) add_element = lambda idx_group: set_dict[idx_group[1]].add(idx_group[0]) map(add_element, enumerate(list)) ARI = set_dict def check_short_circuit(set_dict_1, list_1, set_dict_2, list_2): both_all_apart = len(set_dict_1) == len(list_1) and len(set_dict_2) == len(list_2) both_all_together = len(set_dict_1) == 1 and len(set_dict_2) == 1 ARI = both_all_apart or both_all_together def gen_contingency_data(set_dict_1, set_dict_2): array_dim = (len(set_dict_1), len(set_dict_2)) Ns = numpy.ndarray(array_dim) for (idx_1, value1) in enumerate(set_dict_1.values()): for (idx_2, value2) in enumerate(set_dict_2.values()): Ns[idx_1, idx_2] = len(value1.intersection(value2)) As = Ns.sum(axis=1) Bs = Ns.sum(axis=0) ARI = (Ns, As, Bs) def choose_2_sum(x): ARI = sum(x * (x - 1) / 2.0) group_idx_dict_1 = make_set_dict(view_assignments) group_idx_dict_2 = make_set_dict(view_assignment_truth) if check_short_circuit(group_idx_dict_1, view_assignments, group_idx_dict_2, view_assignment_truth): ARI = 1.0 (Ns, As, Bs) = gen_contingency_data(group_idx_dict_1, group_idx_dict_2) n_choose_2 = choose_2_sum(numpy.array([len(view_assignments)])) cross_sums = choose_2_sum(Ns[Ns > 1]) a_sums = choose_2_sum(As) b_sums = choose_2_sum(Bs) numerator = n_choose_2 * cross_sums - a_sums * b_sums denominator = 0.5 * n_choose_2 * (a_sums + b_sums) - a_sums * b_sums ARI = numerator / denominator return ARI
crosscat
positive
def get_mnist_dataset(batch_size): mnist_folder = 'data/mnist' <DeepExtract> safe_mkdir(mnist_folder) url = 'http://yann.lecun.com/exdb/mnist' filenames = ['train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz'] expected_bytes = [9912422, 28881, 1648877, 4542] for (filename, byte) in zip(filenames, expected_bytes): download_url = os.path.join(url, filename) local_dest = os.path.join(mnist_folder, filename) download_one_file(download_url, local_dest, byte, True) </DeepExtract> <DeepExtract> (imgs, labels) = parse_data(mnist_folder, 'train', False) indices = np.random.permutation(labels.shape[0]) (train_idx, val_idx) = (indices[:num_train], indices[num_train:]) (train_img, train_labels) = (imgs[train_idx, :], labels[train_idx, :]) (val_img, val_labels) = (imgs[val_idx, :], labels[val_idx, :]) test = parse_data(mnist_folder, 't10k', False) (train, val, test) = ((train_img, train_labels), (val_img, val_labels), test) </DeepExtract> train_data = tf.data.Dataset.from_tensor_slices(train) train_data = train_data.shuffle(10000) train_data = train_data.batch(batch_size) test_data = tf.data.Dataset.from_tensor_slices(test) test_data = test_data.batch(batch_size) return (train_data, test_data)
def get_mnist_dataset(batch_size): mnist_folder = 'data/mnist' safe_mkdir(mnist_folder) url = 'http://yann.lecun.com/exdb/mnist' filenames = ['train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz'] expected_bytes = [9912422, 28881, 1648877, 4542] for (filename, byte) in zip(filenames, expected_bytes): download_url = os.path.join(url, filename) local_dest = os.path.join(mnist_folder, filename) download_one_file(download_url, local_dest, byte, True) (imgs, labels) = parse_data(mnist_folder, 'train', False) indices = np.random.permutation(labels.shape[0]) (train_idx, val_idx) = (indices[:num_train], indices[num_train:]) (train_img, train_labels) = (imgs[train_idx, :], labels[train_idx, :]) (val_img, val_labels) = (imgs[val_idx, :], labels[val_idx, :]) test = parse_data(mnist_folder, 't10k', False) (train, val, test) = ((train_img, train_labels), (val_img, val_labels), test) train_data = tf.data.Dataset.from_tensor_slices(train) train_data = train_data.shuffle(10000) train_data = train_data.batch(batch_size) test_data = tf.data.Dataset.from_tensor_slices(test) test_data = test_data.batch(batch_size) return (train_data, test_data)
deep-learning-note
positive
def evaluate(self, est_p, weights): """ Evaluates the squared loss of the estimated p with given weights Parameters ---------- est_p : :obj:`list` of :obj:`float` points at which to evaluate the objective """ <DeepExtract> pass </DeepExtract> return np.sum(weights * (self.true_p_ - est_p) ** 2) * (1.0 / np.sum(weights))
def evaluate(self, est_p, weights): """ Evaluates the squared loss of the estimated p with given weights Parameters ---------- est_p : :obj:`list` of :obj:`float` points at which to evaluate the objective """ pass return np.sum(weights * (self.true_p_ - est_p) ** 2) * (1.0 / np.sum(weights))
dex-net
positive
def fetch_exchange(zone_key1: str, zone_key2: str, session: Optional[Session]=None, target_datetime: Optional[datetime]=None, logger: Logger=getLogger(__name__)) -> dict: """Requests the last known power exchange (in MW) between two zones.""" sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2])) if sorted_zone_keys not in EXCHANGES: raise NotImplementedError('Exchange pair not supported: {}'.format(sorted_zone_keys)) s = session or Session() <DeepExtract> req = s.get(MX_EXCHANGE_URL) soup = BeautifulSoup(req.text, 'html.parser') exchange_div = soup.find('div', attrs={'id': EXCHANGES[sorted_zone_keys]}) val = exchange_div.text trantab = str.maketrans({chr(8208): chr(45), ',': ''}) val = val.translate(trantab) flow = float(val) if sorted_zone_keys in ['BZ->MX-PN', 'MX-CE->MX-OR', 'MX-CE->MX-OC']: flow = -1 * flow netflow = flow </DeepExtract> data = {'sortedZoneKeys': sorted_zone_keys, 'datetime': arrow.now('America/Tijuana').datetime, 'netFlow': netflow, 'source': 'cenace.gob.mx'} return data
def fetch_exchange(zone_key1: str, zone_key2: str, session: Optional[Session]=None, target_datetime: Optional[datetime]=None, logger: Logger=getLogger(__name__)) -> dict: """Requests the last known power exchange (in MW) between two zones.""" sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2])) if sorted_zone_keys not in EXCHANGES: raise NotImplementedError('Exchange pair not supported: {}'.format(sorted_zone_keys)) s = session or Session() req = s.get(MX_EXCHANGE_URL) soup = BeautifulSoup(req.text, 'html.parser') exchange_div = soup.find('div', attrs={'id': EXCHANGES[sorted_zone_keys]}) val = exchange_div.text trantab = str.maketrans({chr(8208): chr(45), ',': ''}) val = val.translate(trantab) flow = float(val) if sorted_zone_keys in ['BZ->MX-PN', 'MX-CE->MX-OR', 'MX-CE->MX-OC']: flow = -1 * flow netflow = flow data = {'sortedZoneKeys': sorted_zone_keys, 'datetime': arrow.now('America/Tijuana').datetime, 'netFlow': netflow, 'source': 'cenace.gob.mx'} return data
electricitymap-contrib
positive
def _getter_bag(namespace, name, converter): def get(self): cached = self.cache.get(namespace, {}).get(name) if cached: return cached retval = [] for element in self.getElement('', namespace, name): bags = element.getElementsByTagNameNS(RDF_NAMESPACE, 'Bag') if len(bags): for bag in bags: for item in bag.getElementsByTagNameNS(RDF_NAMESPACE, 'li'): <DeepExtract> text = '' for child in item.childNodes: if child.nodeType == child.TEXT_NODE: text += child.data value = text </DeepExtract> value = converter(value) retval.append(value) ns_cache = self.cache.setdefault(namespace, {}) ns_cache[name] = retval return retval return get
def _getter_bag(namespace, name, converter): def get(self): cached = self.cache.get(namespace, {}).get(name) if cached: return cached retval = [] for element in self.getElement('', namespace, name): bags = element.getElementsByTagNameNS(RDF_NAMESPACE, 'Bag') if len(bags): for bag in bags: for item in bag.getElementsByTagNameNS(RDF_NAMESPACE, 'li'): text = '' for child in item.childNodes: if child.nodeType == child.TEXT_NODE: text += child.data value = text value = converter(value) retval.append(value) ns_cache = self.cache.setdefault(namespace, {}) ns_cache[name] = retval return retval return get
endesive
positive
def __add__(self, array): <DeepExtract> fields = FieldList(*self._fields) </DeepExtract> fields += array return fields
def __add__(self, array): fields = FieldList(*self._fields) fields += array return fields
bubbles
positive
def create_instance(cls_or_collection, attributes, lazy=False, db_loader=None): if not isinstance(cls_or_collection, six.string_types): collection = self.get_collection_for_cls(cls_or_collection) else: collection = cls_or_collection obj = super(Backend, self).create_instance(cls_or_collection, {}, call_hook=False, lazy=lazy, deserialize=False, db_loader=db_loader) <DeepExtract> if attributes is None: attributes = obj.attributes collection = self.get_collection_for_cls(obj.__class__) for (key, params) in self._related_fields[collection].items(): if isinstance(params['field'], ManyToManyField): try: objects = get_value(attributes, key) except KeyError: objects = None if isinstance(objects, ManyToManyProxy): continue set_value(attributes, key, ManyToManyProxy(obj, key, params, objects=objects)) elif isinstance(params['field'], ForeignKeyField): try: foreign_key_data = get_value(attributes, key) except KeyError: continue if isinstance(foreign_key_data, Document): continue if foreign_key_data: if not isinstance(foreign_key_data, dict): foreign_key_data = {'pk': foreign_key_data, '__lazy__': True, '__collection__': collection} try: (d, lazy_foreign_obj) = self.deserialize_db_data(foreign_key_data) foreign_obj = self.create_instance(params['class'], d, lazy=lazy_foreign_obj) except: logger.warning('Found corrupted data in related field data for key {}'.format(key)) continue else: foreign_obj = None set_value(attributes, key, foreign_obj) elif isinstance(params['field'], OneToManyField): try: objects = get_value(attributes, key) except KeyError: objects = None if isinstance(objects, (Document, QuerySet)): continue if not 'pk' in attributes: set_value(attributes, key, None) continue table = self._collection_tables[params['collection']] related_table = self._collection_tables[params['backref']['collection']] qs = QuerySet(backend=self, table=table, cls=params['class'], condition=table.c[params['backref']['column']] == expression.cast(attributes['pk'], params['type']), objects=objects, raw=False) if params['field'].unique: if objects is not None: try: set_value(attributes, key, qs[0]) except IndexError: set_value(attributes, key, None) else: def db_loader(params=params, qs=qs): try: obj = qs[0] except IndexError: raise params['class'].DoesNotExist if len(qs) > 1: raise params['class'].MultipleDocumentsReturned return obj set_value(attributes, key, self.create_instance(params['class'], {}, lazy=True, db_loader=db_loader)) else: set_value(attributes, key, qs) obj.attributes = attributes </DeepExtract> obj.attributes = self.deserialize(attributes) self.call_hook('after_load', obj) return obj
def create_instance(cls_or_collection, attributes, lazy=False, db_loader=None): if not isinstance(cls_or_collection, six.string_types): collection = self.get_collection_for_cls(cls_or_collection) else: collection = cls_or_collection obj = super(Backend, self).create_instance(cls_or_collection, {}, call_hook=False, lazy=lazy, deserialize=False, db_loader=db_loader) if attributes is None: attributes = obj.attributes collection = self.get_collection_for_cls(obj.__class__) for (key, params) in self._related_fields[collection].items(): if isinstance(params['field'], ManyToManyField): try: objects = get_value(attributes, key) except KeyError: objects = None if isinstance(objects, ManyToManyProxy): continue set_value(attributes, key, ManyToManyProxy(obj, key, params, objects=objects)) elif isinstance(params['field'], ForeignKeyField): try: foreign_key_data = get_value(attributes, key) except KeyError: continue if isinstance(foreign_key_data, Document): continue if foreign_key_data: if not isinstance(foreign_key_data, dict): foreign_key_data = {'pk': foreign_key_data, '__lazy__': True, '__collection__': collection} try: (d, lazy_foreign_obj) = self.deserialize_db_data(foreign_key_data) foreign_obj = self.create_instance(params['class'], d, lazy=lazy_foreign_obj) except: logger.warning('Found corrupted data in related field data for key {}'.format(key)) continue else: foreign_obj = None set_value(attributes, key, foreign_obj) elif isinstance(params['field'], OneToManyField): try: objects = get_value(attributes, key) except KeyError: objects = None if isinstance(objects, (Document, QuerySet)): continue if not 'pk' in attributes: set_value(attributes, key, None) continue table = self._collection_tables[params['collection']] related_table = self._collection_tables[params['backref']['collection']] qs = QuerySet(backend=self, table=table, cls=params['class'], condition=table.c[params['backref']['column']] == expression.cast(attributes['pk'], params['type']), objects=objects, raw=False) if params['field'].unique: if objects is not None: try: set_value(attributes, key, qs[0]) except IndexError: set_value(attributes, key, None) else: def db_loader(params=params, qs=qs): try: obj = qs[0] except IndexError: raise params['class'].DoesNotExist if len(qs) > 1: raise params['class'].MultipleDocumentsReturned return obj set_value(attributes, key, self.create_instance(params['class'], {}, lazy=True, db_loader=db_loader)) else: set_value(attributes, key, qs) obj.attributes = attributes obj.attributes = self.deserialize(attributes) self.call_hook('after_load', obj) return obj
blitzdb
positive
def load_last_model(self): """Find the last checkpoint file of the model directory, and load the weights. """ checkpoints = next(os.walk(self.model_dir))[2] checkpoints = filter(lambda f: f.startswith('mask_rcnn'), checkpoints) checkpoints = sorted(checkpoints) if checkpoints: checkpoint = os.path.join(self.model_dir, checkpoints[-1]) <DeepExtract> if os.path.exists(checkpoint): load_dict = torch.load(checkpoint) if transfer: remove_list = ['classifier.linear_bbox.bias', 'classifier.linear_bbox.weight', 'classifier.linear_class.bias', 'classifier.linear_class.weight', 'mask.conv5.weight', 'mask.conv5.bias'] for remove_weight in remove_list: load_dict.pop(remove_weight) state_dict = self.state_dict() state_dict.update(load_dict) self.load_state_dict(state_dict) else: self.load_state_dict(load_dict) else: print('Weight file not found ...') self.set_log_dir(checkpoint) if not os.path.exists(self.log_dir): os.makedirs(self.log_dir) </DeepExtract> print(checkpoint)
def load_last_model(self): """Find the last checkpoint file of the model directory, and load the weights. """ checkpoints = next(os.walk(self.model_dir))[2] checkpoints = filter(lambda f: f.startswith('mask_rcnn'), checkpoints) checkpoints = sorted(checkpoints) if checkpoints: checkpoint = os.path.join(self.model_dir, checkpoints[-1]) if os.path.exists(checkpoint): load_dict = torch.load(checkpoint) if transfer: remove_list = ['classifier.linear_bbox.bias', 'classifier.linear_bbox.weight', 'classifier.linear_class.bias', 'classifier.linear_class.weight', 'mask.conv5.weight', 'mask.conv5.bias'] for remove_weight in remove_list: load_dict.pop(remove_weight) state_dict = self.state_dict() state_dict.update(load_dict) self.load_state_dict(state_dict) else: self.load_state_dict(load_dict) else: print('Weight file not found ...') self.set_log_dir(checkpoint) if not os.path.exists(self.log_dir): os.makedirs(self.log_dir) print(checkpoint)
3D-SDN
positive
def run(self, s): <DeepExtract> nodes = set() vertices = list() for line in s.splitlines(): (node1, node2) = (line[5], line[-12]) nodes.add(node1) nodes.add(node2) vertices.append((node1, node2)) (nodes, vertices) = (nodes, vertices) </DeepExtract> available = nodes - set((node for (_, node) in vertices)) completed = set() in_progress = dict() time = 0 while nodes: finished = [char for (char, done_time) in in_progress.items() if done_time <= time] for char in finished: in_progress.pop(char) nodes.remove(char) completed.add(char) next_nodes = (b for (a, b) in vertices if a == char) for next_n in next_nodes: if not set((a for (a, b) in vertices if b == next_n)) - completed: available.add(next_n) while available and len(in_progress) < 5: char = min(available) available.remove(char) in_progress[char] = time + self.get_duration(char) time += 1 return time - 1
def run(self, s): nodes = set() vertices = list() for line in s.splitlines(): (node1, node2) = (line[5], line[-12]) nodes.add(node1) nodes.add(node2) vertices.append((node1, node2)) (nodes, vertices) = (nodes, vertices) available = nodes - set((node for (_, node) in vertices)) completed = set() in_progress = dict() time = 0 while nodes: finished = [char for (char, done_time) in in_progress.items() if done_time <= time] for char in finished: in_progress.pop(char) nodes.remove(char) completed.add(char) next_nodes = (b for (a, b) in vertices if a == char) for next_n in next_nodes: if not set((a for (a, b) in vertices if b == next_n)) - completed: available.add(next_n) while available and len(in_progress) < 5: char = min(available) available.remove(char) in_progress[char] = time + self.get_duration(char) time += 1 return time - 1
advent-of-code-2018
positive
def __init__(self, trace, expression): <DeepExtract> try: self.expression = Constant(trace, expression.value(), expression.type_(), expression.signed()) except AttributeError: trace.error('Not a valid expression') except NotConstant: self.expression = expression </DeepExtract> self.trace = trace Expression.__init__(self, 'int', True)
def __init__(self, trace, expression): try: self.expression = Constant(trace, expression.value(), expression.type_(), expression.signed()) except AttributeError: trace.error('Not a valid expression') except NotConstant: self.expression = expression self.trace = trace Expression.__init__(self, 'int', True)
Chips-2.0
positive
def _paged_call(self, payload_func, max_items, **kwargs): if isinstance(self, EWSAccountService): log_prefix = 'EWS %s, account %s, service %s' % (self.protocol.service_endpoint, self.account, self.SERVICE_NAME) else: log_prefix = 'EWS %s, service %s' % (self.protocol.service_endpoint, self.SERVICE_NAME) if isinstance(self, EWSFolderService): expected_message_count = len(self.folders) else: expected_message_count = 1 paging_infos = [dict(item_count=0, next_offset=None) for _ in range(expected_message_count)] common_next_offset = 0 total_item_count = 0 while True: log.debug('%s: Getting items at offset %s (max_items %s)', log_prefix, common_next_offset, max_items) kwargs['offset'] = common_next_offset payload = payload_func(**kwargs) try: <DeepExtract> if not isinstance(payload, RestrictedElement): raise ValueError("'payload' %r must be an RestrictedElement" % payload) (account, hint) = self._get_account_and_version_hint() api_versions = self._get_versions_to_try(hint) global req_id for api_version in api_versions: log.debug('Trying API version %s for account %s', api_version, account) soap_payload = wrap(content=payload, version=api_version, account=account) http_headers = extra_headers(account=account) req_id += 1 local_req_id = req_id self._write_xml('request', local_req_id, soap_payload) (r, session) = post_ratelimited(protocol=self.protocol, session=self.protocol.get_session(), url=self.protocol.service_endpoint, headers=extra_headers(account=account), data=wrap(content=payload, version=api_version, account=account), allow_redirects=False, stream=False) self.protocol.release_session(session) self._write_xml('response', local_req_id, r.text) try: soap_response_payload = to_xml(r.content) except ParseError as e: raise SOAPError('Bad SOAP response: %s' % e) try: res = self._get_soap_payload(soap_response=soap_response_payload) except ErrorInvalidServerVersion: log.debug('API version %s was invalid', api_version) continue except ErrorInvalidSchemaVersionForMailboxVersion: if not account: raise ValueError("'account' should not be None") log.debug('API version %s was invalid for account %s', api_version, account) continue except ResponseMessageError as rme: try: self._update_api_version(hint=hint, api_version=api_version, response=r) except TransportError as te: log.debug('Failed to update version info (%s)', te) raise rme else: self._update_api_version(hint=hint, api_version=api_version, response=r) response = res if account: raise ErrorInvalidSchemaVersionForMailboxVersion('Tried versions %s but all were invalid for account %s' % (api_versions, account)) raise ErrorInvalidServerVersion('Tried versions %s but all were invalid' % api_versions) </DeepExtract> except ErrorServerBusy as e: log.debug('Got ErrorServerBusy (back off %s seconds)', e.back_off) if self.protocol.credentials.fail_fast: raise self.protocol.credentials.back_off(e.back_off) continue if len(response) != expected_message_count: raise MalformedResponseError("Expected %s items in 'response', got %s (%s)" % (expected_message_count, len(response), response)) parsed_pages = [self._get_page(message) for message in response] for ((rootfolder, next_offset), paging_info) in zip(parsed_pages, paging_infos): paging_info['next_offset'] = next_offset if rootfolder is not None: container = rootfolder.find(self.element_container_name) if container is None: raise MalformedResponseError('No %s elements in ResponseMessage (%s)' % (self.element_container_name, xml_to_str(rootfolder))) for elem in self._get_elements_in_container(container=container): paging_info['item_count'] += 1 yield elem total_item_count += paging_info['item_count'] if max_items and total_item_count >= max_items: log.debug("'max_items' count reached (inner)") break if not paging_info['next_offset']: continue if paging_info['next_offset'] != paging_info['item_count']: raise MalformedResponseError('Unexpected next offset: %s -> %s' % (paging_info['item_count'], paging_info['next_offset'])) if max_items and total_item_count >= max_items: log.debug("'max_items' count reached (outer)") break unique_item_counts = {p['next_offset'] for p in paging_infos if p['next_offset'] is not None} if not unique_item_counts: break if len(unique_item_counts) > 1: raise MalformedResponseError('Inconsistent next offsets: %s' % unique_item_counts) common_next_offset = unique_item_counts.pop()
def _paged_call(self, payload_func, max_items, **kwargs): if isinstance(self, EWSAccountService): log_prefix = 'EWS %s, account %s, service %s' % (self.protocol.service_endpoint, self.account, self.SERVICE_NAME) else: log_prefix = 'EWS %s, service %s' % (self.protocol.service_endpoint, self.SERVICE_NAME) if isinstance(self, EWSFolderService): expected_message_count = len(self.folders) else: expected_message_count = 1 paging_infos = [dict(item_count=0, next_offset=None) for _ in range(expected_message_count)] common_next_offset = 0 total_item_count = 0 while True: log.debug('%s: Getting items at offset %s (max_items %s)', log_prefix, common_next_offset, max_items) kwargs['offset'] = common_next_offset payload = payload_func(**kwargs) try: if not isinstance(payload, RestrictedElement): raise ValueError("'payload' %r must be an RestrictedElement" % payload) (account, hint) = self._get_account_and_version_hint() api_versions = self._get_versions_to_try(hint) global req_id for api_version in api_versions: log.debug('Trying API version %s for account %s', api_version, account) soap_payload = wrap(content=payload, version=api_version, account=account) http_headers = extra_headers(account=account) req_id += 1 local_req_id = req_id self._write_xml('request', local_req_id, soap_payload) (r, session) = post_ratelimited(protocol=self.protocol, session=self.protocol.get_session(), url=self.protocol.service_endpoint, headers=extra_headers(account=account), data=wrap(content=payload, version=api_version, account=account), allow_redirects=False, stream=False) self.protocol.release_session(session) self._write_xml('response', local_req_id, r.text) try: soap_response_payload = to_xml(r.content) except ParseError as e: raise SOAPError('Bad SOAP response: %s' % e) try: res = self._get_soap_payload(soap_response=soap_response_payload) except ErrorInvalidServerVersion: log.debug('API version %s was invalid', api_version) continue except ErrorInvalidSchemaVersionForMailboxVersion: if not account: raise ValueError("'account' should not be None") log.debug('API version %s was invalid for account %s', api_version, account) continue except ResponseMessageError as rme: try: self._update_api_version(hint=hint, api_version=api_version, response=r) except TransportError as te: log.debug('Failed to update version info (%s)', te) raise rme else: self._update_api_version(hint=hint, api_version=api_version, response=r) response = res if account: raise ErrorInvalidSchemaVersionForMailboxVersion('Tried versions %s but all were invalid for account %s' % (api_versions, account)) raise ErrorInvalidServerVersion('Tried versions %s but all were invalid' % api_versions) except ErrorServerBusy as e: log.debug('Got ErrorServerBusy (back off %s seconds)', e.back_off) if self.protocol.credentials.fail_fast: raise self.protocol.credentials.back_off(e.back_off) continue if len(response) != expected_message_count: raise MalformedResponseError("Expected %s items in 'response', got %s (%s)" % (expected_message_count, len(response), response)) parsed_pages = [self._get_page(message) for message in response] for ((rootfolder, next_offset), paging_info) in zip(parsed_pages, paging_infos): paging_info['next_offset'] = next_offset if rootfolder is not None: container = rootfolder.find(self.element_container_name) if container is None: raise MalformedResponseError('No %s elements in ResponseMessage (%s)' % (self.element_container_name, xml_to_str(rootfolder))) for elem in self._get_elements_in_container(container=container): paging_info['item_count'] += 1 yield elem total_item_count += paging_info['item_count'] if max_items and total_item_count >= max_items: log.debug("'max_items' count reached (inner)") break if not paging_info['next_offset']: continue if paging_info['next_offset'] != paging_info['item_count']: raise MalformedResponseError('Unexpected next offset: %s -> %s' % (paging_info['item_count'], paging_info['next_offset'])) if max_items and total_item_count >= max_items: log.debug("'max_items' count reached (outer)") break unique_item_counts = {p['next_offset'] for p in paging_infos if p['next_offset'] is not None} if not unique_item_counts: break if len(unique_item_counts) > 1: raise MalformedResponseError('Inconsistent next offsets: %s' % unique_item_counts) common_next_offset = unique_item_counts.pop()
exchangelib
positive
def find_element_by(self, ref): if ' ' in ref: <DeepExtract> if ' ' in ref: (search, pattern) = ref.split(' ', 1) result = [] for elem in self.find_elements_by(search): text = elem.text if fnmatch(text.strip(), pattern): result.append(elem) logg.info("found %s for '%s'", len(result), ref) result = result if ref.startswith('#'): result = [self.driver.find_element_by_name(ref[1:])] if ref.startswith('='): result = self.driver.find_elements_by_name(ref[1:]) if ref.startswith('.'): result = self.driver.find_elements_by_class_name(ref[1:]) if ref.startswith('>'): result = self.driver.find_elements_by_tag_name(ref[1:]) logg.error('bad ref %s', ref) raise Exception('bad ref %s', ref) </DeepExtract> if result: return result[0] logg.error('not matched: %s', ref) if ref.startswith('#'): return self.driver.find_element_by_id(ref[1:]) if ref.startswith('='): return self.driver.find_element_by_name(ref[1:]) if ref.startswith('.'): return self.driver.find_element_by_class_name(ref[1:]) if ref.startswith('>'): return self.driver.find_element_by_tag_name(ref[1:]) logg.error('bad ref %s', ref) raise Exception('bad ref %s', ref)
def find_element_by(self, ref): if ' ' in ref: if ' ' in ref: (search, pattern) = ref.split(' ', 1) result = [] for elem in self.find_elements_by(search): text = elem.text if fnmatch(text.strip(), pattern): result.append(elem) logg.info("found %s for '%s'", len(result), ref) result = result if ref.startswith('#'): result = [self.driver.find_element_by_name(ref[1:])] if ref.startswith('='): result = self.driver.find_elements_by_name(ref[1:]) if ref.startswith('.'): result = self.driver.find_elements_by_class_name(ref[1:]) if ref.startswith('>'): result = self.driver.find_elements_by_tag_name(ref[1:]) logg.error('bad ref %s', ref) raise Exception('bad ref %s', ref) if result: return result[0] logg.error('not matched: %s', ref) if ref.startswith('#'): return self.driver.find_element_by_id(ref[1:]) if ref.startswith('='): return self.driver.find_element_by_name(ref[1:]) if ref.startswith('.'): return self.driver.find_element_by_class_name(ref[1:]) if ref.startswith('>'): return self.driver.find_element_by_tag_name(ref[1:]) logg.error('bad ref %s', ref) raise Exception('bad ref %s', ref)
docker-systemctl-images
positive
@response_contains_primary_identifier @response_does_not_contain_write_only_properties def test_read_success(resource_client, current_resource_model): (_status, response, _error_code) = resource_client.call_and_assert(Action.READ, OperationStatus.SUCCESS, current_resource_model) <DeepExtract> pruned_input_model = prune_properties_from_model(copy.deepcopy(current_resource_model), set(list(resource_client.read_only_paths) + list(resource_client.write_only_paths))) pruned_output_model = prune_properties_from_model(copy.deepcopy(response['resourceModel']), resource_client.read_only_paths) pruned_output_model = prune_properties_if_not_exist_in_path(pruned_output_model, pruned_input_model, resource_client.create_only_paths) resource_client.compare(pruned_input_model, pruned_output_model) </DeepExtract> return response
@response_contains_primary_identifier @response_does_not_contain_write_only_properties def test_read_success(resource_client, current_resource_model): (_status, response, _error_code) = resource_client.call_and_assert(Action.READ, OperationStatus.SUCCESS, current_resource_model) pruned_input_model = prune_properties_from_model(copy.deepcopy(current_resource_model), set(list(resource_client.read_only_paths) + list(resource_client.write_only_paths))) pruned_output_model = prune_properties_from_model(copy.deepcopy(response['resourceModel']), resource_client.read_only_paths) pruned_output_model = prune_properties_if_not_exist_in_path(pruned_output_model, pruned_input_model, resource_client.create_only_paths) resource_client.compare(pruned_input_model, pruned_output_model) return response
cloudformation-cli
positive
def to_file(data: Any, target_file_name: str, target_path: str='') -> str: """Serialize object to JSON""" if not os.path.exists(target_path): os.makedirs(target_path) <DeepExtract> target_file = f'{target_file_name}.{target_file_extension}' path = os.path.join(target_path, target_file) </DeepExtract> with open(path, 'w', encoding=BYTE_ENCODING) as outfile: json.dump(data, outfile, default=pydantic_encoder) return path
def to_file(data: Any, target_file_name: str, target_path: str='') -> str: """Serialize object to JSON""" if not os.path.exists(target_path): os.makedirs(target_path) target_file = f'{target_file_name}.{target_file_extension}' path = os.path.join(target_path, target_file) with open(path, 'w', encoding=BYTE_ENCODING) as outfile: json.dump(data, outfile, default=pydantic_encoder) return path
electionguard-python
positive
def im_detect_bbox_aug(model, im, box_proposals=None): """Performs bbox detection with test-time augmentations. Function signature is the same as for im_detect_bbox. """ assert not cfg.TEST.BBOX_AUG.SCALE_SIZE_DEP, 'Size dependent scaling not implemented' assert not cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION' or cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION', 'Coord heuristic must be union whenever score heuristic is union' assert not cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION' or cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION', 'Score heuristic must be union whenever coord heuristic is union' assert not cfg.MODEL.FASTER_RCNN or cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION', 'Union heuristic must be used to combine Faster RCNN predictions' scores_ts = [] boxes_ts = [] def add_preds_t(scores_t, boxes_t): scores_ts.append(scores_t) boxes_ts.append(boxes_t) if cfg.TEST.BBOX_AUG.H_FLIP: <DeepExtract> im_hf = im[:, ::-1, :] im_width = im.shape[1] if not cfg.MODEL.FASTER_RCNN: box_proposals_hf = box_utils.flip_boxes(box_proposals, im_width) else: box_proposals_hf = None (scores_hf, boxes_hf, im_scale) = im_detect_bbox(model, im_hf, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes=box_proposals_hf) boxes_inv = box_utils.flip_boxes(boxes_hf, im_width) (scores_hf, boxes_hf, _) = (scores_hf, boxes_inv, im_scale) </DeepExtract> <DeepExtract> scores_ts.append(scores_hf) boxes_ts.append(boxes_hf) </DeepExtract> for scale in cfg.TEST.BBOX_AUG.SCALES: max_size = cfg.TEST.BBOX_AUG.MAX_SIZE <DeepExtract> if hflip: (scores_scl, boxes_scl, _) = im_detect_bbox_hflip(model, im, scale, max_size, box_proposals=box_proposals) else: (scores_scl, boxes_scl, _) = im_detect_bbox(model, im, scale, max_size, boxes=box_proposals) (scores_scl, boxes_scl) = (scores_scl, boxes_scl) </DeepExtract> <DeepExtract> scores_ts.append(scores_scl) boxes_ts.append(boxes_scl) </DeepExtract> if cfg.TEST.BBOX_AUG.SCALE_H_FLIP: <DeepExtract> if True: (scores_scl, boxes_scl, _) = im_detect_bbox_hflip(model, im, scale, max_size, box_proposals=box_proposals) else: (scores_scl, boxes_scl, _) = im_detect_bbox(model, im, scale, max_size, boxes=box_proposals) (scores_scl_hf, boxes_scl_hf) = (scores_scl, boxes_scl) </DeepExtract> <DeepExtract> scores_ts.append(scores_scl_hf) boxes_ts.append(boxes_scl_hf) </DeepExtract> for aspect_ratio in cfg.TEST.BBOX_AUG.ASPECT_RATIOS: <DeepExtract> im_ar = image_utils.aspect_ratio_rel(im, aspect_ratio) if not cfg.MODEL.FASTER_RCNN: box_proposals_ar = box_utils.aspect_ratio(box_proposals, aspect_ratio) else: box_proposals_ar = None if hflip: (scores_ar, boxes_ar, _) = im_detect_bbox_hflip(model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, box_proposals=box_proposals_ar) else: (scores_ar, boxes_ar, _) = im_detect_bbox(model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes=box_proposals_ar) boxes_inv = box_utils.aspect_ratio(boxes_ar, 1.0 / aspect_ratio) (scores_ar, boxes_ar) = (scores_ar, boxes_inv) </DeepExtract> <DeepExtract> scores_ts.append(scores_ar) boxes_ts.append(boxes_ar) </DeepExtract> if cfg.TEST.BBOX_AUG.ASPECT_RATIO_H_FLIP: <DeepExtract> im_ar = image_utils.aspect_ratio_rel(im, aspect_ratio) if not cfg.MODEL.FASTER_RCNN: box_proposals_ar = box_utils.aspect_ratio(box_proposals, aspect_ratio) else: box_proposals_ar = None if True: (scores_ar, boxes_ar, _) = im_detect_bbox_hflip(model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, box_proposals=box_proposals_ar) else: (scores_ar, boxes_ar, _) = im_detect_bbox(model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes=box_proposals_ar) boxes_inv = box_utils.aspect_ratio(boxes_ar, 1.0 / aspect_ratio) (scores_ar_hf, boxes_ar_hf) = (scores_ar, boxes_inv) </DeepExtract> <DeepExtract> scores_ts.append(scores_ar_hf) boxes_ts.append(boxes_ar_hf) </DeepExtract> <DeepExtract> (inputs, im_scale) = _get_blobs(im, box_proposals, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE) if cfg.DEDUP_BOXES > 0 and (not cfg.MODEL.FASTER_RCNN): v = np.array([1, 1000.0, 1000000.0, 1000000000.0, 1000000000000.0]) hashes = np.round(inputs['rois'] * cfg.DEDUP_BOXES).dot(v) (_, index, inv_index) = np.unique(hashes, return_index=True, return_inverse=True) inputs['rois'] = inputs['rois'][index, :] box_proposals = box_proposals[index, :] if cfg.FPN.MULTILEVEL_ROIS and (not cfg.MODEL.FASTER_RCNN): _add_multilevel_rois_for_test(inputs, 'rois') for (k, v) in inputs.items(): workspace.FeedBlob(core.ScopedName(k), v) workspace.RunNet(model.net.Proto().name) if cfg.MODEL.FASTER_RCNN: rois = workspace.FetchBlob(core.ScopedName('rois')) box_proposals = rois[:, 1:5] / im_scale scores = workspace.FetchBlob(core.ScopedName('cls_prob')).squeeze() scores = scores.reshape([-1, scores.shape[-1]]) if cfg.TEST.BBOX_REG: box_deltas = workspace.FetchBlob(core.ScopedName('bbox_pred')).squeeze() box_deltas = box_deltas.reshape([-1, box_deltas.shape[-1]]) if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG: box_deltas = box_deltas[:, -4:] pred_boxes = box_utils.bbox_transform(box_proposals, box_deltas, cfg.MODEL.BBOX_REG_WEIGHTS) pred_boxes = box_utils.clip_tiled_boxes(pred_boxes, im.shape) if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG: pred_boxes = np.tile(pred_boxes, (1, scores.shape[1])) else: pred_boxes = np.tile(box_proposals, (1, scores.shape[1])) if cfg.DEDUP_BOXES > 0 and (not cfg.MODEL.FASTER_RCNN): scores = scores[inv_index, :] pred_boxes = pred_boxes[inv_index, :] (scores_i, boxes_i, im_scale_i) = (scores, pred_boxes, im_scale) </DeepExtract> <DeepExtract> scores_ts.append(scores_i) boxes_ts.append(boxes_i) </DeepExtract> if cfg.TEST.BBOX_AUG.SCORE_HEUR == 'ID': scores_c = scores_i elif cfg.TEST.BBOX_AUG.SCORE_HEUR == 'AVG': scores_c = np.mean(scores_ts, axis=0) elif cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION': scores_c = np.vstack(scores_ts) else: raise NotImplementedError('Score heur {} not supported'.format(cfg.TEST.BBOX_AUG.SCORE_HEUR)) if cfg.TEST.BBOX_AUG.COORD_HEUR == 'ID': boxes_c = boxes_i elif cfg.TEST.BBOX_AUG.COORD_HEUR == 'AVG': boxes_c = np.mean(boxes_ts, axis=0) elif cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION': boxes_c = np.vstack(boxes_ts) else: raise NotImplementedError('Coord heur {} not supported'.format(cfg.TEST.BBOX_AUG.COORD_HEUR)) return (scores_c, boxes_c, im_scale_i)
def im_detect_bbox_aug(model, im, box_proposals=None): """Performs bbox detection with test-time augmentations. Function signature is the same as for im_detect_bbox. """ assert not cfg.TEST.BBOX_AUG.SCALE_SIZE_DEP, 'Size dependent scaling not implemented' assert not cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION' or cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION', 'Coord heuristic must be union whenever score heuristic is union' assert not cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION' or cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION', 'Score heuristic must be union whenever coord heuristic is union' assert not cfg.MODEL.FASTER_RCNN or cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION', 'Union heuristic must be used to combine Faster RCNN predictions' scores_ts = [] boxes_ts = [] def add_preds_t(scores_t, boxes_t): scores_ts.append(scores_t) boxes_ts.append(boxes_t) if cfg.TEST.BBOX_AUG.H_FLIP: im_hf = im[:, ::-1, :] im_width = im.shape[1] if not cfg.MODEL.FASTER_RCNN: box_proposals_hf = box_utils.flip_boxes(box_proposals, im_width) else: box_proposals_hf = None (scores_hf, boxes_hf, im_scale) = im_detect_bbox(model, im_hf, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes=box_proposals_hf) boxes_inv = box_utils.flip_boxes(boxes_hf, im_width) (scores_hf, boxes_hf, _) = (scores_hf, boxes_inv, im_scale) scores_ts.append(scores_hf) boxes_ts.append(boxes_hf) for scale in cfg.TEST.BBOX_AUG.SCALES: max_size = cfg.TEST.BBOX_AUG.MAX_SIZE if hflip: (scores_scl, boxes_scl, _) = im_detect_bbox_hflip(model, im, scale, max_size, box_proposals=box_proposals) else: (scores_scl, boxes_scl, _) = im_detect_bbox(model, im, scale, max_size, boxes=box_proposals) (scores_scl, boxes_scl) = (scores_scl, boxes_scl) scores_ts.append(scores_scl) boxes_ts.append(boxes_scl) if cfg.TEST.BBOX_AUG.SCALE_H_FLIP: if True: (scores_scl, boxes_scl, _) = im_detect_bbox_hflip(model, im, scale, max_size, box_proposals=box_proposals) else: (scores_scl, boxes_scl, _) = im_detect_bbox(model, im, scale, max_size, boxes=box_proposals) (scores_scl_hf, boxes_scl_hf) = (scores_scl, boxes_scl) scores_ts.append(scores_scl_hf) boxes_ts.append(boxes_scl_hf) for aspect_ratio in cfg.TEST.BBOX_AUG.ASPECT_RATIOS: im_ar = image_utils.aspect_ratio_rel(im, aspect_ratio) if not cfg.MODEL.FASTER_RCNN: box_proposals_ar = box_utils.aspect_ratio(box_proposals, aspect_ratio) else: box_proposals_ar = None if hflip: (scores_ar, boxes_ar, _) = im_detect_bbox_hflip(model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, box_proposals=box_proposals_ar) else: (scores_ar, boxes_ar, _) = im_detect_bbox(model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes=box_proposals_ar) boxes_inv = box_utils.aspect_ratio(boxes_ar, 1.0 / aspect_ratio) (scores_ar, boxes_ar) = (scores_ar, boxes_inv) scores_ts.append(scores_ar) boxes_ts.append(boxes_ar) if cfg.TEST.BBOX_AUG.ASPECT_RATIO_H_FLIP: im_ar = image_utils.aspect_ratio_rel(im, aspect_ratio) if not cfg.MODEL.FASTER_RCNN: box_proposals_ar = box_utils.aspect_ratio(box_proposals, aspect_ratio) else: box_proposals_ar = None if True: (scores_ar, boxes_ar, _) = im_detect_bbox_hflip(model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, box_proposals=box_proposals_ar) else: (scores_ar, boxes_ar, _) = im_detect_bbox(model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes=box_proposals_ar) boxes_inv = box_utils.aspect_ratio(boxes_ar, 1.0 / aspect_ratio) (scores_ar_hf, boxes_ar_hf) = (scores_ar, boxes_inv) scores_ts.append(scores_ar_hf) boxes_ts.append(boxes_ar_hf) (inputs, im_scale) = _get_blobs(im, box_proposals, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE) if cfg.DEDUP_BOXES > 0 and (not cfg.MODEL.FASTER_RCNN): v = np.array([1, 1000.0, 1000000.0, 1000000000.0, 1000000000000.0]) hashes = np.round(inputs['rois'] * cfg.DEDUP_BOXES).dot(v) (_, index, inv_index) = np.unique(hashes, return_index=True, return_inverse=True) inputs['rois'] = inputs['rois'][index, :] box_proposals = box_proposals[index, :] if cfg.FPN.MULTILEVEL_ROIS and (not cfg.MODEL.FASTER_RCNN): _add_multilevel_rois_for_test(inputs, 'rois') for (k, v) in inputs.items(): workspace.FeedBlob(core.ScopedName(k), v) workspace.RunNet(model.net.Proto().name) if cfg.MODEL.FASTER_RCNN: rois = workspace.FetchBlob(core.ScopedName('rois')) box_proposals = rois[:, 1:5] / im_scale scores = workspace.FetchBlob(core.ScopedName('cls_prob')).squeeze() scores = scores.reshape([-1, scores.shape[-1]]) if cfg.TEST.BBOX_REG: box_deltas = workspace.FetchBlob(core.ScopedName('bbox_pred')).squeeze() box_deltas = box_deltas.reshape([-1, box_deltas.shape[-1]]) if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG: box_deltas = box_deltas[:, -4:] pred_boxes = box_utils.bbox_transform(box_proposals, box_deltas, cfg.MODEL.BBOX_REG_WEIGHTS) pred_boxes = box_utils.clip_tiled_boxes(pred_boxes, im.shape) if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG: pred_boxes = np.tile(pred_boxes, (1, scores.shape[1])) else: pred_boxes = np.tile(box_proposals, (1, scores.shape[1])) if cfg.DEDUP_BOXES > 0 and (not cfg.MODEL.FASTER_RCNN): scores = scores[inv_index, :] pred_boxes = pred_boxes[inv_index, :] (scores_i, boxes_i, im_scale_i) = (scores, pred_boxes, im_scale) scores_ts.append(scores_i) boxes_ts.append(boxes_i) if cfg.TEST.BBOX_AUG.SCORE_HEUR == 'ID': scores_c = scores_i elif cfg.TEST.BBOX_AUG.SCORE_HEUR == 'AVG': scores_c = np.mean(scores_ts, axis=0) elif cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION': scores_c = np.vstack(scores_ts) else: raise NotImplementedError('Score heur {} not supported'.format(cfg.TEST.BBOX_AUG.SCORE_HEUR)) if cfg.TEST.BBOX_AUG.COORD_HEUR == 'ID': boxes_c = boxes_i elif cfg.TEST.BBOX_AUG.COORD_HEUR == 'AVG': boxes_c = np.mean(boxes_ts, axis=0) elif cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION': boxes_c = np.vstack(boxes_ts) else: raise NotImplementedError('Coord heur {} not supported'.format(cfg.TEST.BBOX_AUG.COORD_HEUR)) return (scores_c, boxes_c, im_scale_i)
Detectron
positive
def load_os_user_metadata(url, key): """ Load user meta data from OpenStack """ <DeepExtract> try: data = urllib2.urlopen(url).read().strip() except urllib2.HTTPError as e: if e.code == 404 and default != _NO_DEFAULT: data = default raise </DeepExtract> jsonData = json.loads(data) if jsonData[key]: return jsonData[key]
def load_os_user_metadata(url, key): """ Load user meta data from OpenStack """ try: data = urllib2.urlopen(url).read().strip() except urllib2.HTTPError as e: if e.code == 404 and default != _NO_DEFAULT: data = default raise jsonData = json.loads(data) if jsonData[key]: return jsonData[key]
atg-commerce-iaas
positive
def get_old_header(contents, header=''): <DeepExtract> contents = COQ_PROG_ARGS_REP.sub('', contents) </DeepExtract> if header[:2] == '(*' and header[-2:] == '*)' and ('*)' not in header[2:-2]): pre_header = header[:header.index('%')] if pre_header in contents and contents.index('*)') > contents.index(pre_header): return contents[contents.index(pre_header) + len(pre_header):contents.index('*)')].strip() return 'original input'
def get_old_header(contents, header=''): contents = COQ_PROG_ARGS_REP.sub('', contents) if header[:2] == '(*' and header[-2:] == '*)' and ('*)' not in header[2:-2]): pre_header = header[:header.index('%')] if pre_header in contents and contents.index('*)') > contents.index(pre_header): return contents[contents.index(pre_header) + len(pre_header):contents.index('*)')].strip() return 'original input'
coq-tools
positive
def for_contract(self, entity_slug: str, user_model, ce_model_uuid): <DeepExtract> qs = self.products(entity_slug=entity_slug, user_model=user_model) </DeepExtract> qs = qs.filter(itemtransactionmodel__ce_model_id=ce_model_uuid) return qs.distinct('uuid')
def for_contract(self, entity_slug: str, user_model, ce_model_uuid): qs = self.products(entity_slug=entity_slug, user_model=user_model) qs = qs.filter(itemtransactionmodel__ce_model_id=ce_model_uuid) return qs.distinct('uuid')
django-ledger
positive
def newCategory(self): ncn = widgets.QInputDialog.getText(self, 'DDT4All', _('Enter category name')) <DeepExtract> necatname = ncn[0] </DeepExtract> if necatname: self.paramview.createCategory(necatname) self.treeview_params.addTopLevelItem(widgets.QTreeWidgetItem([necatname]))
def newCategory(self): ncn = widgets.QInputDialog.getText(self, 'DDT4All', _('Enter category name')) necatname = ncn[0] if necatname: self.paramview.createCategory(necatname) self.treeview_params.addTopLevelItem(widgets.QTreeWidgetItem([necatname]))
ddt4all
positive
def test_build_spends(self): TX_DB = {} exponent = wif_to_secret_exponent('5JMys7YfK72cRVTrbwkq5paxU7vgkMypB55KyXEtN5uSnjV7K8Y') compressed = False public_key_sec = public_pair_to_sec(ecdsa.public_pair_for_secret_exponent(ecdsa.generator_secp256k1, exponent), compressed=compressed) the_coinbase_tx = Tx.coinbase_tx(public_key_sec, int(50 * 100000000.0), COINBASE_BYTES_FROM_80971) TX_DB[the_coinbase_tx.hash()] = the_coinbase_tx compressed = False exponent_2 = int('137f3276686959c82b454eea6eefc9ab1b9e45bd4636fb9320262e114e321da1', 16) bitcoin_address_2 = public_pair_to_bitcoin_address(ecdsa.public_pair_for_secret_exponent(ecdsa.generator_secp256k1, exponent_2), compressed=compressed) self.assertEqual('12WivmEn8AUth6x6U8HuJuXHaJzDw3gHNZ', bitcoin_address_2) coins_from = [(the_coinbase_tx.hash(), 0, the_coinbase_tx.txs_out[0])] coins_to = [(int(50 * 100000000.0), bitcoin_address_2)] <DeepExtract> txs_in = [] unspents = [] for (h, idx, tx_out) in coins_from: txs_in.append(TxIn(h, idx)) unspents.append(tx_out) txs_out = [] for (coin_value, bitcoin_address) in coins_to: txs_out.append(TxOut(coin_value, standard_tx_out_script(bitcoin_address))) (version, lock_time) = (1, 0) tx = Tx(version, txs_in, txs_out, lock_time) tx.set_unspents(unspents) unsigned_coinbase_spend_tx = tx </DeepExtract> solver = build_hash160_lookup([exponent]) coinbase_spend_tx = unsigned_coinbase_spend_tx.sign(solver) self.assertEqual(coinbase_spend_tx.bad_signature_count(), 0) TX_DB[coinbase_spend_tx.hash()] = coinbase_spend_tx compressed = True exponent_3 = int('f8d39b8ecd0e1b6fee5a340519f239097569d7a403a50bb14fb2f04eff8db0ff', 16) bitcoin_address_3 = public_pair_to_bitcoin_address(ecdsa.public_pair_for_secret_exponent(ecdsa.generator_secp256k1, exponent_3), compressed=compressed) self.assertEqual('13zzEHPCH2WUZJzANymow3ZrxcZ8iFBrY5', bitcoin_address_3) coins_from = [(coinbase_spend_tx.hash(), 0, coinbase_spend_tx.txs_out[0])] <DeepExtract> txs_in = [] unspents = [] for (h, idx, tx_out) in coins_from: txs_in.append(TxIn(h, idx)) unspents.append(tx_out) txs_out = [] for (coin_value, bitcoin_address) in [(int(50 * 100000000.0), bitcoin_address_3)]: txs_out.append(TxOut(coin_value, standard_tx_out_script(bitcoin_address))) (version, lock_time) = (1, 0) tx = Tx(version, txs_in, txs_out, lock_time) tx.set_unspents(unspents) unsigned_spend_tx = tx </DeepExtract> solver.update(build_hash160_lookup([exponent_2])) spend_tx = unsigned_spend_tx.sign(solver) self.assertEqual(spend_tx.bad_signature_count(), 0)
def test_build_spends(self): TX_DB = {} exponent = wif_to_secret_exponent('5JMys7YfK72cRVTrbwkq5paxU7vgkMypB55KyXEtN5uSnjV7K8Y') compressed = False public_key_sec = public_pair_to_sec(ecdsa.public_pair_for_secret_exponent(ecdsa.generator_secp256k1, exponent), compressed=compressed) the_coinbase_tx = Tx.coinbase_tx(public_key_sec, int(50 * 100000000.0), COINBASE_BYTES_FROM_80971) TX_DB[the_coinbase_tx.hash()] = the_coinbase_tx compressed = False exponent_2 = int('137f3276686959c82b454eea6eefc9ab1b9e45bd4636fb9320262e114e321da1', 16) bitcoin_address_2 = public_pair_to_bitcoin_address(ecdsa.public_pair_for_secret_exponent(ecdsa.generator_secp256k1, exponent_2), compressed=compressed) self.assertEqual('12WivmEn8AUth6x6U8HuJuXHaJzDw3gHNZ', bitcoin_address_2) coins_from = [(the_coinbase_tx.hash(), 0, the_coinbase_tx.txs_out[0])] coins_to = [(int(50 * 100000000.0), bitcoin_address_2)] txs_in = [] unspents = [] for (h, idx, tx_out) in coins_from: txs_in.append(TxIn(h, idx)) unspents.append(tx_out) txs_out = [] for (coin_value, bitcoin_address) in coins_to: txs_out.append(TxOut(coin_value, standard_tx_out_script(bitcoin_address))) (version, lock_time) = (1, 0) tx = Tx(version, txs_in, txs_out, lock_time) tx.set_unspents(unspents) unsigned_coinbase_spend_tx = tx solver = build_hash160_lookup([exponent]) coinbase_spend_tx = unsigned_coinbase_spend_tx.sign(solver) self.assertEqual(coinbase_spend_tx.bad_signature_count(), 0) TX_DB[coinbase_spend_tx.hash()] = coinbase_spend_tx compressed = True exponent_3 = int('f8d39b8ecd0e1b6fee5a340519f239097569d7a403a50bb14fb2f04eff8db0ff', 16) bitcoin_address_3 = public_pair_to_bitcoin_address(ecdsa.public_pair_for_secret_exponent(ecdsa.generator_secp256k1, exponent_3), compressed=compressed) self.assertEqual('13zzEHPCH2WUZJzANymow3ZrxcZ8iFBrY5', bitcoin_address_3) coins_from = [(coinbase_spend_tx.hash(), 0, coinbase_spend_tx.txs_out[0])] txs_in = [] unspents = [] for (h, idx, tx_out) in coins_from: txs_in.append(TxIn(h, idx)) unspents.append(tx_out) txs_out = [] for (coin_value, bitcoin_address) in [(int(50 * 100000000.0), bitcoin_address_3)]: txs_out.append(TxOut(coin_value, standard_tx_out_script(bitcoin_address))) (version, lock_time) = (1, 0) tx = Tx(version, txs_in, txs_out, lock_time) tx.set_unspents(unspents) unsigned_spend_tx = tx solver.update(build_hash160_lookup([exponent_2])) spend_tx = unsigned_spend_tx.sign(solver) self.assertEqual(spend_tx.bad_signature_count(), 0)
dashman
positive
def test_connection_reset_during_command(self, mocker: MockFixture, plain_controller, client): <DeepExtract> (code, mesg) = client.helo(domain) assert code == 250 return mesg </DeepExtract> smtpd: Server = plain_controller.smtpd spy = mocker.spy(smtpd._writer, 'close') client.send('MAIL FROM: <anne') reset_connection(client) with pytest.raises(SMTPServerDisconnected): client.noop() catchup_delay() assert spy.call_count > 0
def test_connection_reset_during_command(self, mocker: MockFixture, plain_controller, client): (code, mesg) = client.helo(domain) assert code == 250 return mesg smtpd: Server = plain_controller.smtpd spy = mocker.spy(smtpd._writer, 'close') client.send('MAIL FROM: <anne') reset_connection(client) with pytest.raises(SMTPServerDisconnected): client.noop() catchup_delay() assert spy.call_count > 0
aiosmtpd
positive
def _build_repr(self) -> Tuple[List[List[str]], List[int], List[int], List[int]]: columns: List[str] = self.columns num_rows: int = len(self) max_cols = options.options_dict['max_cols'] max_rows = options.options_dict['max_rows'] max_colwidth = options.options_dict['max_colwidth'] if options._head_method: max_rows = num_rows if len(columns) > max_cols: col_num: int = max_cols // 2 columns = columns[:col_num] + ['...'] + columns[-col_num:] if num_rows > max_rows: if options.options_dict['show_tail']: first: List[int] = list(range(max_rows // 2)) last: List[int] = list(range(num_rows - max_rows // 2, num_rows)) idx: List[int] = first + last else: idx = list(range(max_rows)) else: idx = list(range(num_rows)) data_list: List[List[str]] = [[''] + [str(i) for i in idx]] long_len: List[int] = [len(data_list[0][-1])] decimal_len: List[int] = [0] data: List cur_len: int dec_len: List whole_len: List dec_len_arr: ndarray whole_len_arr: ndarray for column in columns: if column != '...': vals = self._get_column_values(column)[idx] dtype = self._column_info[column].dtype if dtype == 'M': unit = utils.get_datetime_str(vals) vals = vals.astype(f'datetime64[{unit}]') data = [column] + [str(val).replace('T', ' ') if not np.isnat(val) else str(val) for val in vals] elif dtype == 'm': unit = utils.get_timedelta_str(vals) vals = vals.astype(f'timedelta64[{unit}]') data = [column] + [str(val).replace('T', ' ') if not np.isnat(val) else str(val) for val in vals] elif dtype == 'S': loc = self._column_info[column].loc rev_map = self._str_reverse_map[loc] data = [column] + ['NaN' if val == 0 else rev_map[val] for val in vals] elif dtype == 'i': data = [column] + ['NaN' if val == MIN_INT else val for val in vals] elif dtype == 'b': bool_dict = {-1: 'NaN', 0: 'False', 1: 'True'} data = [column] + [bool_dict[val] for val in vals] elif dtype == 'f': data = [column] + ['NaN' if np.isnan(val) else val for val in vals] else: data = ['...'] * (len(idx) + 1) long_len.append(3) decimal_len.append(0) data_list.append(data) continue if len(self) == 0: data_list.append(data) long_len.append(len(column)) decimal_len.append(0) continue if self._column_info[column].dtype == 'S': <DeepExtract> cur_len = self._stat_funcs('max', [len(str(x)) for x in data]) </DeepExtract> <DeepExtract> cur_len = self._stat_funcs('min', cur_len) </DeepExtract> long_len.append(cur_len) decimal_len.append(0) elif self._column_info[column].dtype == 'f': dec_len = [utils.get_decimal_len(x) for x in data[1:]] whole_len = [utils.get_whole_len(x) for x in data[1:]] dec_len_arr = np.array(dec_len).clip(0, 6) whole_len_arr = np.array(whole_len) lengths = [len(column), dec_len_arr.max() + whole_len_arr.max() + 1] max_decimal = dec_len_arr.max() long_len.append(max(lengths)) decimal_len.append(min(max_decimal, 6)) elif self._column_info[column].dtype == 'i': lengths = [len(column)] + [len(str(x)) for x in data[1:]] long_len.append(max(lengths)) decimal_len.append(0) elif self._column_info[column].dtype == 'b': long_len.append(max(len(column), 5)) decimal_len.append(0) elif self._column_info[column].dtype in 'Mm': long_len.append(max(len(column), len(data[1]))) decimal_len.append(0) data_list.append(data) return (data_list, long_len, decimal_len, idx)
def _build_repr(self) -> Tuple[List[List[str]], List[int], List[int], List[int]]: columns: List[str] = self.columns num_rows: int = len(self) max_cols = options.options_dict['max_cols'] max_rows = options.options_dict['max_rows'] max_colwidth = options.options_dict['max_colwidth'] if options._head_method: max_rows = num_rows if len(columns) > max_cols: col_num: int = max_cols // 2 columns = columns[:col_num] + ['...'] + columns[-col_num:] if num_rows > max_rows: if options.options_dict['show_tail']: first: List[int] = list(range(max_rows // 2)) last: List[int] = list(range(num_rows - max_rows // 2, num_rows)) idx: List[int] = first + last else: idx = list(range(max_rows)) else: idx = list(range(num_rows)) data_list: List[List[str]] = [[''] + [str(i) for i in idx]] long_len: List[int] = [len(data_list[0][-1])] decimal_len: List[int] = [0] data: List cur_len: int dec_len: List whole_len: List dec_len_arr: ndarray whole_len_arr: ndarray for column in columns: if column != '...': vals = self._get_column_values(column)[idx] dtype = self._column_info[column].dtype if dtype == 'M': unit = utils.get_datetime_str(vals) vals = vals.astype(f'datetime64[{unit}]') data = [column] + [str(val).replace('T', ' ') if not np.isnat(val) else str(val) for val in vals] elif dtype == 'm': unit = utils.get_timedelta_str(vals) vals = vals.astype(f'timedelta64[{unit}]') data = [column] + [str(val).replace('T', ' ') if not np.isnat(val) else str(val) for val in vals] elif dtype == 'S': loc = self._column_info[column].loc rev_map = self._str_reverse_map[loc] data = [column] + ['NaN' if val == 0 else rev_map[val] for val in vals] elif dtype == 'i': data = [column] + ['NaN' if val == MIN_INT else val for val in vals] elif dtype == 'b': bool_dict = {-1: 'NaN', 0: 'False', 1: 'True'} data = [column] + [bool_dict[val] for val in vals] elif dtype == 'f': data = [column] + ['NaN' if np.isnan(val) else val for val in vals] else: data = ['...'] * (len(idx) + 1) long_len.append(3) decimal_len.append(0) data_list.append(data) continue if len(self) == 0: data_list.append(data) long_len.append(len(column)) decimal_len.append(0) continue if self._column_info[column].dtype == 'S': cur_len = self._stat_funcs('max', [len(str(x)) for x in data]) cur_len = self._stat_funcs('min', cur_len) long_len.append(cur_len) decimal_len.append(0) elif self._column_info[column].dtype == 'f': dec_len = [utils.get_decimal_len(x) for x in data[1:]] whole_len = [utils.get_whole_len(x) for x in data[1:]] dec_len_arr = np.array(dec_len).clip(0, 6) whole_len_arr = np.array(whole_len) lengths = [len(column), dec_len_arr.max() + whole_len_arr.max() + 1] max_decimal = dec_len_arr.max() long_len.append(max(lengths)) decimal_len.append(min(max_decimal, 6)) elif self._column_info[column].dtype == 'i': lengths = [len(column)] + [len(str(x)) for x in data[1:]] long_len.append(max(lengths)) decimal_len.append(0) elif self._column_info[column].dtype == 'b': long_len.append(max(len(column), 5)) decimal_len.append(0) elif self._column_info[column].dtype in 'Mm': long_len.append(max(len(column), len(data[1]))) decimal_len.append(0) data_list.append(data) return (data_list, long_len, decimal_len, idx)
dexplo
positive
@contextmanager def bulk_commit(self): """ Context manager used to speedup insertion of big number of records :: >>> d1 = DbDict('test') >>> with d1.bulk_commit(): ... for i in range(1000): ... d1[i] = i * 2 """ self._bulk_commit = True self.can_commit = False try: yield <DeepExtract> if True or self.can_commit: if self._pending_connection is not None: self._pending_connection.commit() </DeepExtract> finally: self._bulk_commit = False self.can_commit = True self._pending_connection.close() self._pending_connection = None
@contextmanager def bulk_commit(self): """ Context manager used to speedup insertion of big number of records :: >>> d1 = DbDict('test') >>> with d1.bulk_commit(): ... for i in range(1000): ... d1[i] = i * 2 """ self._bulk_commit = True self.can_commit = False try: yield if True or self.can_commit: if self._pending_connection is not None: self._pending_connection.commit() finally: self._bulk_commit = False self.can_commit = True self._pending_connection.close() self._pending_connection = None
alp
positive
def __init__(self, alpha, geom, disc, x0): if disc.meshtype == 'static' and x0 is not None or (disc.meshtype == 'dynamic' and x0 is None): raise ValueError('disc.meshtype and x0 do not match.') (self.alpha, self.geom) = (alpha, geom) (self.disc, self.x0) = (disc, x0) if disc.meshtype == 'dynamic': <DeepExtract> h = 1 / self.disc.resolution[-1] min_h = self.geom.mindist2boundary(self.x0) if min_h < h: print('Warning: mesh step size %f is larger than the boundary distance %f.' % (h, min_h)) </DeepExtract> (self.x, self.xindex_start, self.w) = (None, None, None) <DeepExtract> n = self.disc.resolution[0] if self.disc.meshtype == 'static' else self.dynamic_dist2npts(self.geom.diam) + 1 w = [bkd.as_tensor(1.0, dtype=config.real(bkd.lib)) if bkd.is_tensor(self.alpha) else 1.0] for j in range(1, n): w.append(w[-1] * (j - 1 - self.alpha) / j) self._w_init = array_ops_compat.convert_to_array(w) </DeepExtract>
def __init__(self, alpha, geom, disc, x0): if disc.meshtype == 'static' and x0 is not None or (disc.meshtype == 'dynamic' and x0 is None): raise ValueError('disc.meshtype and x0 do not match.') (self.alpha, self.geom) = (alpha, geom) (self.disc, self.x0) = (disc, x0) if disc.meshtype == 'dynamic': h = 1 / self.disc.resolution[-1] min_h = self.geom.mindist2boundary(self.x0) if min_h < h: print('Warning: mesh step size %f is larger than the boundary distance %f.' % (h, min_h)) (self.x, self.xindex_start, self.w) = (None, None, None) n = self.disc.resolution[0] if self.disc.meshtype == 'static' else self.dynamic_dist2npts(self.geom.diam) + 1 w = [bkd.as_tensor(1.0, dtype=config.real(bkd.lib)) if bkd.is_tensor(self.alpha) else 1.0] for j in range(1, n): w.append(w[-1] * (j - 1 - self.alpha) / j) self._w_init = array_ops_compat.convert_to_array(w) </DeepExtract>
deepxde
positive
@mock.patch('requests.post', mock_status_ok) def test_release_summary(self): """Test the text summary of a release.""" <DeepExtract> url = '/v1/apps' response = self.client.post(url, HTTP_AUTHORIZATION='token {}'.format(self.token)) self.assertEqual(response.status_code, 201) app_id = response.data['id'] url = '/v1/apps/{app_id}/config'.format(**locals()) body = {'values': json.dumps({'NEW_URL1': 'http://localhost:8080/'})} response = self.client.post(url, json.dumps(body), content_type='application/json', HTTP_AUTHORIZATION='token {}'.format(self.token)) self.assertEqual(response.status_code, 201) self.assertIn('NEW_URL1', response.data['values']) url = '/v1/apps/{app_id}/releases'.format(**locals()) response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token)) self.assertEqual(response.status_code, 200) self.assertEqual(response.data['count'], 2) url = '/v1/apps/{app_id}/releases/v1'.format(**locals()) response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token)) self.assertEqual(response.status_code, 200) release1 = response.data self.assertIn('config', response.data) self.assertIn('build', response.data) self.assertEquals(release1['version'], 1) url = '/v1/apps/{app_id}/releases/v2'.format(**locals()) response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token)) self.assertEqual(response.status_code, 200) release2 = response.data self.assertNotEqual(release1['uuid'], release2['uuid']) self.assertNotEqual(release1['config'], release2['config']) self.assertEqual(release1['build'], release2['build']) self.assertEquals(release2['version'], 2) url = '/v1/apps/{app_id}/builds'.format(**locals()) build_config = json.dumps({'PATH': 'bin:/usr/local/bin:/usr/bin:/bin'}) body = {'image': 'autotest/example'} response = self.client.post(url, json.dumps(body), content_type='application/json', HTTP_AUTHORIZATION='token {}'.format(self.token)) self.assertEqual(response.status_code, 201) self.assertEqual(response.data['image'], body['image']) url = '/v1/apps/{app_id}/releases/v3'.format(**locals()) response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token)) self.assertEqual(response.status_code, 200) release3 = response.data self.assertNotEqual(release2['uuid'], release3['uuid']) self.assertNotEqual(release2['build'], release3['build']) self.assertEquals(release3['version'], 3) url = '/v1/apps/{app_id}/releases/v2'.format(**locals()) response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token)) self.assertEqual(response.status_code, 200) release2 = response.data self.assertNotEqual(release2['uuid'], release3['uuid']) self.assertNotEqual(release2['build'], release3['build']) self.assertEquals(release2['version'], 2) url = '/v1/apps/{app_id}/releases'.format(**locals()) response = self.client.post(url, HTTP_AUTHORIZATION='token {}'.format(self.token)) self.assertEqual(response.status_code, 405) response = self.client.put(url, HTTP_AUTHORIZATION='token {}'.format(self.token)) self.assertEqual(response.status_code, 405) response = self.client.patch(url, HTTP_AUTHORIZATION='token {}'.format(self.token)) self.assertEqual(response.status_code, 405) response = self.client.delete(url, HTTP_AUTHORIZATION='token {}'.format(self.token)) self.assertEqual(response.status_code, 405) release3 = release3 </DeepExtract> release = Release.objects.get(uuid=release3['uuid']) self.assertIn('autotest deployed ', release.summary)
@mock.patch('requests.post', mock_status_ok) def test_release_summary(self): """Test the text summary of a release.""" url = '/v1/apps' response = self.client.post(url, HTTP_AUTHORIZATION='token {}'.format(self.token)) self.assertEqual(response.status_code, 201) app_id = response.data['id'] url = '/v1/apps/{app_id}/config'.format(**locals()) body = {'values': json.dumps({'NEW_URL1': 'http://localhost:8080/'})} response = self.client.post(url, json.dumps(body), content_type='application/json', HTTP_AUTHORIZATION='token {}'.format(self.token)) self.assertEqual(response.status_code, 201) self.assertIn('NEW_URL1', response.data['values']) url = '/v1/apps/{app_id}/releases'.format(**locals()) response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token)) self.assertEqual(response.status_code, 200) self.assertEqual(response.data['count'], 2) url = '/v1/apps/{app_id}/releases/v1'.format(**locals()) response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token)) self.assertEqual(response.status_code, 200) release1 = response.data self.assertIn('config', response.data) self.assertIn('build', response.data) self.assertEquals(release1['version'], 1) url = '/v1/apps/{app_id}/releases/v2'.format(**locals()) response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token)) self.assertEqual(response.status_code, 200) release2 = response.data self.assertNotEqual(release1['uuid'], release2['uuid']) self.assertNotEqual(release1['config'], release2['config']) self.assertEqual(release1['build'], release2['build']) self.assertEquals(release2['version'], 2) url = '/v1/apps/{app_id}/builds'.format(**locals()) build_config = json.dumps({'PATH': 'bin:/usr/local/bin:/usr/bin:/bin'}) body = {'image': 'autotest/example'} response = self.client.post(url, json.dumps(body), content_type='application/json', HTTP_AUTHORIZATION='token {}'.format(self.token)) self.assertEqual(response.status_code, 201) self.assertEqual(response.data['image'], body['image']) url = '/v1/apps/{app_id}/releases/v3'.format(**locals()) response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token)) self.assertEqual(response.status_code, 200) release3 = response.data self.assertNotEqual(release2['uuid'], release3['uuid']) self.assertNotEqual(release2['build'], release3['build']) self.assertEquals(release3['version'], 3) url = '/v1/apps/{app_id}/releases/v2'.format(**locals()) response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token)) self.assertEqual(response.status_code, 200) release2 = response.data self.assertNotEqual(release2['uuid'], release3['uuid']) self.assertNotEqual(release2['build'], release3['build']) self.assertEquals(release2['version'], 2) url = '/v1/apps/{app_id}/releases'.format(**locals()) response = self.client.post(url, HTTP_AUTHORIZATION='token {}'.format(self.token)) self.assertEqual(response.status_code, 405) response = self.client.put(url, HTTP_AUTHORIZATION='token {}'.format(self.token)) self.assertEqual(response.status_code, 405) response = self.client.patch(url, HTTP_AUTHORIZATION='token {}'.format(self.token)) self.assertEqual(response.status_code, 405) response = self.client.delete(url, HTTP_AUTHORIZATION='token {}'.format(self.token)) self.assertEqual(response.status_code, 405) release3 = release3 release = Release.objects.get(uuid=release3['uuid']) self.assertIn('autotest deployed ', release.summary)
deis
positive
def grant_access_to_s3_buckets(self, bucket_names): """ Updates the IAM Role Policy to grant access to the specified S3 Bucket. Allowing target account access to the deployment resources. Args: bucket_name (list[str]): The bucket names to grant access to. """ LOGGER.debug('calling grant_s3_buckets_access for bucket_names %s', bucket_names) if len(bucket_names) == 0: return <DeepExtract> s3_statements = list(filter(lambda stmt: stmt['Sid'] == 'S3', self.policy_document.get('Statement', {}))) if len(s3_statements) == 1: statement = s3_statements[0] if len(s3_statements) > 1: raise Exception(f"Found multiple {'S3'} statements in Role {self.role_name} Policy {self.policy_name}.") statement = None </DeepExtract> if statement is None: return for bucket_name in bucket_names: if f'arn:{PARTITION}:s3:::{bucket_name}' in statement['Resource']: continue LOGGER.info('Updating Role %s policy %s to access S3://%s', self.role_name, self.policy_name, bucket_name) self.policy_changed = True if not isinstance(statement['Resource'], list): statement['Resource'] = [statement['Resource']] statement['Resource'].append(f'arn:{PARTITION}:s3:::{bucket_name}') statement['Resource'].append(f'arn:{PARTITION}:s3:::{bucket_name}/*')
def grant_access_to_s3_buckets(self, bucket_names): """ Updates the IAM Role Policy to grant access to the specified S3 Bucket. Allowing target account access to the deployment resources. Args: bucket_name (list[str]): The bucket names to grant access to. """ LOGGER.debug('calling grant_s3_buckets_access for bucket_names %s', bucket_names) if len(bucket_names) == 0: return s3_statements = list(filter(lambda stmt: stmt['Sid'] == 'S3', self.policy_document.get('Statement', {}))) if len(s3_statements) == 1: statement = s3_statements[0] if len(s3_statements) > 1: raise Exception(f"Found multiple {'S3'} statements in Role {self.role_name} Policy {self.policy_name}.") statement = None if statement is None: return for bucket_name in bucket_names: if f'arn:{PARTITION}:s3:::{bucket_name}' in statement['Resource']: continue LOGGER.info('Updating Role %s policy %s to access S3://%s', self.role_name, self.policy_name, bucket_name) self.policy_changed = True if not isinstance(statement['Resource'], list): statement['Resource'] = [statement['Resource']] statement['Resource'].append(f'arn:{PARTITION}:s3:::{bucket_name}') statement['Resource'].append(f'arn:{PARTITION}:s3:::{bucket_name}/*')
aws-deployment-framework
positive
def _expect_input_queue(signals, timeout, discard_other_messages): if timeout is not None: end_time = time.time() + timeout remaining_time = timeout else: remaining_time = None while True: try: message = self._input_queue.get(timeout=remaining_time) except queue.Empty: return <DeepExtract> if message.name == self.database.name: if all([message.signals[name] == signals[name] for name in signals]): decoded = message.signals </DeepExtract> if decoded is not None: return decoded if not discard_other_messages: self._input_list.append(message) if timeout is not None: remaining_time = end_time - time.time() if remaining_time <= 0: return
def _expect_input_queue(signals, timeout, discard_other_messages): if timeout is not None: end_time = time.time() + timeout remaining_time = timeout else: remaining_time = None while True: try: message = self._input_queue.get(timeout=remaining_time) except queue.Empty: return if message.name == self.database.name: if all([message.signals[name] == signals[name] for name in signals]): decoded = message.signals if decoded is not None: return decoded if not discard_other_messages: self._input_list.append(message) if timeout is not None: remaining_time = end_time - time.time() if remaining_time <= 0: return
cantools
positive
def output(self): <DeepExtract> pipeline_args = self.get_pipeline_args() consensus_args = Consensus.get_args(pipeline_args, **args) </DeepExtract> yield luigi.LocalTarget(consensus_args.metrics_json) yield luigi.LocalTarget(consensus_args.consensus_gp) yield luigi.LocalTarget(consensus_args.consensus_gp_info) yield luigi.LocalTarget(consensus_args.consensus_gff3) yield luigi.LocalTarget(consensus_args.consensus_fasta) yield luigi.LocalTarget(consensus_args.consensus_protein_fasta)
def output(self): pipeline_args = self.get_pipeline_args() consensus_args = Consensus.get_args(pipeline_args, **args) yield luigi.LocalTarget(consensus_args.metrics_json) yield luigi.LocalTarget(consensus_args.consensus_gp) yield luigi.LocalTarget(consensus_args.consensus_gp_info) yield luigi.LocalTarget(consensus_args.consensus_gff3) yield luigi.LocalTarget(consensus_args.consensus_fasta) yield luigi.LocalTarget(consensus_args.consensus_protein_fasta)
Comparative-Annotation-Toolkit
positive
def isdir(self, dirname): dirname = dirname.strip('/') <DeepExtract> try: if method == 'GET': response = urllib.request.urlopen(self.keysurl + dirname, timeout=10).read() else: request = urllib.request.Request(self.keysurl + dirname, data=data.encode('ascii'), method=method) response = urllib.request.urlopen(request, timeout=10).read() except urllib.error.HTTPError as e: response = e.fp.read() result = json.loads(str(response, encoding='utf-8')) out = result </DeepExtract> if 'action' not in out: return [False, dirname + ' not found'] if 'dir' not in out['node']: return [False, dirname + ' is a key'] return [True, dirname]
def isdir(self, dirname): dirname = dirname.strip('/') try: if method == 'GET': response = urllib.request.urlopen(self.keysurl + dirname, timeout=10).read() else: request = urllib.request.Request(self.keysurl + dirname, data=data.encode('ascii'), method=method) response = urllib.request.urlopen(request, timeout=10).read() except urllib.error.HTTPError as e: response = e.fp.read() result = json.loads(str(response, encoding='utf-8')) out = result if 'action' not in out: return [False, dirname + ' not found'] if 'dir' not in out['node']: return [False, dirname + ' is a key'] return [True, dirname]
docklet
positive
def put(self, key, value): if not key: return <DeepExtract> char = key[0] if not self._root: self._root = TNode() self._root.char = char if char < self._root.char: self._root.left = self._put(self._root.left, key, value, 0) elif char > self._root.char: self._root.right = self._put(self._root.right, key, value, 0) elif 0 < len(key) - 1: self._root.mid = self._put(self._root.mid, key, value, 0 + 1) else: self._root.val = value self._root = self._root </DeepExtract> self._size += 1
def put(self, key, value): if not key: return char = key[0] if not self._root: self._root = TNode() self._root.char = char if char < self._root.char: self._root.left = self._put(self._root.left, key, value, 0) elif char > self._root.char: self._root.right = self._put(self._root.right, key, value, 0) elif 0 < len(key) - 1: self._root.mid = self._put(self._root.mid, key, value, 0 + 1) else: self._root.val = value self._root = self._root self._size += 1
algorithms-sedgewick-python
positive
def check(self, hostname, key): """ Return True if the given key is associated with the given hostname in this dictionary. :param str hostname: hostname (or IP) of the SSH server :param .PKey key: the key to check :return: ``True`` if the key is associated with the hostname; else ``False`` """ <DeepExtract> class SubDict(MutableMapping): def __init__(self, hostname, entries, hostkeys): self._hostname = hostname self._entries = entries self._hostkeys = hostkeys def __iter__(self): for k in self.keys(): yield k def __len__(self): k = len(self.keys()) def __delitem__(self, key): for e in list(self._entries): if e.key.get_name() == key: self._entries.remove(e) else: raise KeyError(key) def __getitem__(self, key): for e in self._entries: if e.key.get_name() == key: k = e.key raise KeyError(key) def __setitem__(self, key, val): for e in self._entries: if e.key is None: continue if e.key.get_name() == key: e.key = val break else: e = HostKeyEntry([hostname], val) self._entries.append(e) self._hostkeys._entries.append(e) def keys(self): k = [e.key.get_name() for e in self._entries if e.key is not None] entries = [] for e in self._entries: if self._hostname_matches(hostname, e): entries.append(e) if len(entries) == 0: k = None k = SubDict(hostname, entries, self) </DeepExtract> if k is None: return False host_key = k.get(key.get_name(), None) if host_key is None: return False return host_key.asbytes() == key.asbytes()
def check(self, hostname, key): """ Return True if the given key is associated with the given hostname in this dictionary. :param str hostname: hostname (or IP) of the SSH server :param .PKey key: the key to check :return: ``True`` if the key is associated with the hostname; else ``False`` """ class SubDict(MutableMapping): def __init__(self, hostname, entries, hostkeys): self._hostname = hostname self._entries = entries self._hostkeys = hostkeys def __iter__(self): for k in self.keys(): yield k def __len__(self): k = len(self.keys()) def __delitem__(self, key): for e in list(self._entries): if e.key.get_name() == key: self._entries.remove(e) else: raise KeyError(key) def __getitem__(self, key): for e in self._entries: if e.key.get_name() == key: k = e.key raise KeyError(key) def __setitem__(self, key, val): for e in self._entries: if e.key is None: continue if e.key.get_name() == key: e.key = val break else: e = HostKeyEntry([hostname], val) self._entries.append(e) self._hostkeys._entries.append(e) def keys(self): k = [e.key.get_name() for e in self._entries if e.key is not None] entries = [] for e in self._entries: if self._hostname_matches(hostname, e): entries.append(e) if len(entries) == 0: k = None k = SubDict(hostname, entries, self) if k is None: return False host_key = k.get(key.get_name(), None) if host_key is None: return False return host_key.asbytes() == key.asbytes()
cerbrutus
positive
def _get_order_by(self): """Returns directional lookups to be unpacked in `QuerySet.order_by`.""" order_by = [] ordering = self.ordering if ordering: tree = trees[self.tree] (ids, directions) = zip(*ordering) <DeepExtract> from avocado.models import DataConceptField if not ids: groups = OrderedDict() cfields = list(DataConceptField.objects.filter(concept__pk__in=ids).select_related().order_by('concept', 'order')) cfields.sort(key=lambda o: ids.index(o.concept.pk)) groups = OrderedDict() for cf in cfields: pk = cf.concept.pk if pk not in groups: groups[pk] = [] groups[pk].append(cf.field) groups = groups </DeepExtract> for (pk, direction) in ordering: for f in groups[pk]: lookup = tree.query_string_for_field(f.order_field, model=f.model) if direction.lower() == 'desc': order_by.append('-' + lookup) else: order_by.append(lookup) return order_by
def _get_order_by(self): """Returns directional lookups to be unpacked in `QuerySet.order_by`.""" order_by = [] ordering = self.ordering if ordering: tree = trees[self.tree] (ids, directions) = zip(*ordering) from avocado.models import DataConceptField if not ids: groups = OrderedDict() cfields = list(DataConceptField.objects.filter(concept__pk__in=ids).select_related().order_by('concept', 'order')) cfields.sort(key=lambda o: ids.index(o.concept.pk)) groups = OrderedDict() for cf in cfields: pk = cf.concept.pk if pk not in groups: groups[pk] = [] groups[pk].append(cf.field) groups = groups for (pk, direction) in ordering: for f in groups[pk]: lookup = tree.query_string_for_field(f.order_field, model=f.model) if direction.lower() == 'desc': order_by.append('-' + lookup) else: order_by.append(lookup) return order_by
avocado
positive
def squeezenet(images, is_training=True, num_classes=1001): """Squeezenet 1.0 model.""" <DeepExtract> net = tf.layers.conv2d(images, 96, [7, 7], (2, 2), kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, activation=tf.nn.relu, name='conv1', padding='same') </DeepExtract> net = tf.layers.max_pooling2d(net, [3, 3], strides=(2, 2), name='maxpool1') <DeepExtract> with tf.variable_scope('fire2', 'fire', [net]): squeezed = conv2d(net, 16, [1, 1], name='squeeze') e1x1 = conv2d(squeezed, 64, [1, 1], name='e1x1') e3x3 = conv2d(squeezed, 64, [3, 3], name='e3x3') net = tf.concat([e1x1, e3x3], axis=3) </DeepExtract> <DeepExtract> with tf.variable_scope('fire3', 'fire', [net]): squeezed = conv2d(net, 16, [1, 1], name='squeeze') e1x1 = conv2d(squeezed, 64, [1, 1], name='e1x1') e3x3 = conv2d(squeezed, 64, [3, 3], name='e3x3') net = tf.concat([e1x1, e3x3], axis=3) </DeepExtract> <DeepExtract> with tf.variable_scope('fire4', 'fire', [net]): squeezed = conv2d(net, 32, [1, 1], name='squeeze') e1x1 = conv2d(squeezed, 128, [1, 1], name='e1x1') e3x3 = conv2d(squeezed, 128, [3, 3], name='e3x3') net = tf.concat([e1x1, e3x3], axis=3) </DeepExtract> net = tf.layers.max_pooling2d(net, [3, 3], strides=(2, 2), name='maxpool4') <DeepExtract> with tf.variable_scope('fire5', 'fire', [net]): squeezed = conv2d(net, 32, [1, 1], name='squeeze') e1x1 = conv2d(squeezed, 128, [1, 1], name='e1x1') e3x3 = conv2d(squeezed, 128, [3, 3], name='e3x3') net = tf.concat([e1x1, e3x3], axis=3) </DeepExtract> <DeepExtract> with tf.variable_scope('fire6', 'fire', [net]): squeezed = conv2d(net, 48, [1, 1], name='squeeze') e1x1 = conv2d(squeezed, 192, [1, 1], name='e1x1') e3x3 = conv2d(squeezed, 192, [3, 3], name='e3x3') net = tf.concat([e1x1, e3x3], axis=3) </DeepExtract> <DeepExtract> with tf.variable_scope('fire7', 'fire', [net]): squeezed = conv2d(net, 48, [1, 1], name='squeeze') e1x1 = conv2d(squeezed, 192, [1, 1], name='e1x1') e3x3 = conv2d(squeezed, 192, [3, 3], name='e3x3') net = tf.concat([e1x1, e3x3], axis=3) </DeepExtract> <DeepExtract> with tf.variable_scope('fire8', 'fire', [net]): squeezed = conv2d(net, 64, [1, 1], name='squeeze') e1x1 = conv2d(squeezed, 256, [1, 1], name='e1x1') e3x3 = conv2d(squeezed, 256, [3, 3], name='e3x3') net = tf.concat([e1x1, e3x3], axis=3) </DeepExtract> net = tf.layers.max_pooling2d(net, [3, 3], strides=(2, 2), name='maxpool8') <DeepExtract> with tf.variable_scope('fire9', 'fire', [net]): squeezed = conv2d(net, 64, [1, 1], name='squeeze') e1x1 = conv2d(squeezed, 256, [1, 1], name='e1x1') e3x3 = conv2d(squeezed, 256, [3, 3], name='e3x3') net = tf.concat([e1x1, e3x3], axis=3) </DeepExtract> net = tf.layers.dropout(net, rate=0.5 if is_training else 0.0, name='drop9') <DeepExtract> net = tf.layers.conv2d(net, num_classes, [1, 1], (1, 1), kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, activation=tf.nn.relu, name='conv10', padding='same') </DeepExtract> net = tf.layers.average_pooling2d(net, pool_size=(13, 13), strides=(1, 1)) logits = tf.layers.flatten(net) return logits
def squeezenet(images, is_training=True, num_classes=1001): """Squeezenet 1.0 model.""" net = tf.layers.conv2d(images, 96, [7, 7], (2, 2), kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, activation=tf.nn.relu, name='conv1', padding='same') net = tf.layers.max_pooling2d(net, [3, 3], strides=(2, 2), name='maxpool1') with tf.variable_scope('fire2', 'fire', [net]): squeezed = conv2d(net, 16, [1, 1], name='squeeze') e1x1 = conv2d(squeezed, 64, [1, 1], name='e1x1') e3x3 = conv2d(squeezed, 64, [3, 3], name='e3x3') net = tf.concat([e1x1, e3x3], axis=3) with tf.variable_scope('fire3', 'fire', [net]): squeezed = conv2d(net, 16, [1, 1], name='squeeze') e1x1 = conv2d(squeezed, 64, [1, 1], name='e1x1') e3x3 = conv2d(squeezed, 64, [3, 3], name='e3x3') net = tf.concat([e1x1, e3x3], axis=3) with tf.variable_scope('fire4', 'fire', [net]): squeezed = conv2d(net, 32, [1, 1], name='squeeze') e1x1 = conv2d(squeezed, 128, [1, 1], name='e1x1') e3x3 = conv2d(squeezed, 128, [3, 3], name='e3x3') net = tf.concat([e1x1, e3x3], axis=3) net = tf.layers.max_pooling2d(net, [3, 3], strides=(2, 2), name='maxpool4') with tf.variable_scope('fire5', 'fire', [net]): squeezed = conv2d(net, 32, [1, 1], name='squeeze') e1x1 = conv2d(squeezed, 128, [1, 1], name='e1x1') e3x3 = conv2d(squeezed, 128, [3, 3], name='e3x3') net = tf.concat([e1x1, e3x3], axis=3) with tf.variable_scope('fire6', 'fire', [net]): squeezed = conv2d(net, 48, [1, 1], name='squeeze') e1x1 = conv2d(squeezed, 192, [1, 1], name='e1x1') e3x3 = conv2d(squeezed, 192, [3, 3], name='e3x3') net = tf.concat([e1x1, e3x3], axis=3) with tf.variable_scope('fire7', 'fire', [net]): squeezed = conv2d(net, 48, [1, 1], name='squeeze') e1x1 = conv2d(squeezed, 192, [1, 1], name='e1x1') e3x3 = conv2d(squeezed, 192, [3, 3], name='e3x3') net = tf.concat([e1x1, e3x3], axis=3) with tf.variable_scope('fire8', 'fire', [net]): squeezed = conv2d(net, 64, [1, 1], name='squeeze') e1x1 = conv2d(squeezed, 256, [1, 1], name='e1x1') e3x3 = conv2d(squeezed, 256, [3, 3], name='e3x3') net = tf.concat([e1x1, e3x3], axis=3) net = tf.layers.max_pooling2d(net, [3, 3], strides=(2, 2), name='maxpool8') with tf.variable_scope('fire9', 'fire', [net]): squeezed = conv2d(net, 64, [1, 1], name='squeeze') e1x1 = conv2d(squeezed, 256, [1, 1], name='e1x1') e3x3 = conv2d(squeezed, 256, [3, 3], name='e3x3') net = tf.concat([e1x1, e3x3], axis=3) net = tf.layers.dropout(net, rate=0.5 if is_training else 0.0, name='drop9') net = tf.layers.conv2d(net, num_classes, [1, 1], (1, 1), kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, activation=tf.nn.relu, name='conv10', padding='same') net = tf.layers.average_pooling2d(net, pool_size=(13, 13), strides=(1, 1)) logits = tf.layers.flatten(net) return logits
class-balanced-loss
positive
def test_balance_2d_plasma_neutrality(self): """test matching of plasma neutrality for 2d profiles""" densities_1 = from_elementdensity(self.atomic_data, self.element, self.n_element_2d, self.n_e_2d, self.t_e_profile_2d, free_variable=(self.r, self.z)) densities_2 = from_elementdensity(self.atomic_data, self.element2, self.n_element2_2d, self.n_e_profile_2d, self.t_e_2d, free_variable=(self.r, self.z)) densities_3 = match_plasma_neutrality(self.atomic_data, self.element_bulk, [densities_1, densities_2], self.n_e_2d, self.t_e_profile_2d, free_variable=(self.r, self.z)) <DeepExtract> if isinstance(densities_1, dict): if isinstance(densities_1[0], Iterable): total = np.zeros_like(densities_1[0]) else: total = 0 for (index, values) in densities_1.items(): total += values * index elif isinstance(densities_1, np.ndarray): total = np.zeros_like(densities_1[0, ...]) for index in np.ndindex(densities_1.shape): total[index] += densities_1[index] * index[0] total = total </DeepExtract> total += self.sumup_electrons(densities_2) total += self.sumup_electrons(densities_3) self.assertTrue(np.allclose(total, self.n_e_profile_2d, rtol=self.TOLERANCE))
def test_balance_2d_plasma_neutrality(self): """test matching of plasma neutrality for 2d profiles""" densities_1 = from_elementdensity(self.atomic_data, self.element, self.n_element_2d, self.n_e_2d, self.t_e_profile_2d, free_variable=(self.r, self.z)) densities_2 = from_elementdensity(self.atomic_data, self.element2, self.n_element2_2d, self.n_e_profile_2d, self.t_e_2d, free_variable=(self.r, self.z)) densities_3 = match_plasma_neutrality(self.atomic_data, self.element_bulk, [densities_1, densities_2], self.n_e_2d, self.t_e_profile_2d, free_variable=(self.r, self.z)) if isinstance(densities_1, dict): if isinstance(densities_1[0], Iterable): total = np.zeros_like(densities_1[0]) else: total = 0 for (index, values) in densities_1.items(): total += values * index elif isinstance(densities_1, np.ndarray): total = np.zeros_like(densities_1[0, ...]) for index in np.ndindex(densities_1.shape): total[index] += densities_1[index] * index[0] total = total total += self.sumup_electrons(densities_2) total += self.sumup_electrons(densities_3) self.assertTrue(np.allclose(total, self.n_e_profile_2d, rtol=self.TOLERANCE))
core
positive
def IE_processor(df) -> list: """Creates quarter hour datapoints for IE exchange.""" datapoints = [] for (index, row) in df.iterrows(): snapshot = {} <DeepExtract> NIR = tz.gettz('Europe/Belfast') modified_timestamp = parser.parse(row['TimeStamp'], dayfirst=True).replace(tzinfo=parser.parse(row['TimeStamp'], dayfirst=True).tzinfo or NIR) snapshot['datetime'] = modified_timestamp </DeepExtract> netFlow = -1 * row['Tie_Lines_MW'] snapshot['netFlow'] = netFlow snapshot['source'] = 'soni.ltd.uk' snapshot['sortedZoneKeys'] = 'GB-NIR->IE' datapoints.append(snapshot) return datapoints
def IE_processor(df) -> list: """Creates quarter hour datapoints for IE exchange.""" datapoints = [] for (index, row) in df.iterrows(): snapshot = {} NIR = tz.gettz('Europe/Belfast') modified_timestamp = parser.parse(row['TimeStamp'], dayfirst=True).replace(tzinfo=parser.parse(row['TimeStamp'], dayfirst=True).tzinfo or NIR) snapshot['datetime'] = modified_timestamp netFlow = -1 * row['Tie_Lines_MW'] snapshot['netFlow'] = netFlow snapshot['source'] = 'soni.ltd.uk' snapshot['sortedZoneKeys'] = 'GB-NIR->IE' datapoints.append(snapshot) return datapoints
electricitymap-contrib
positive
def process_resoto_events(self, event: Event): graph = event.data log.info('Checking for outstanding Slack notifications') <DeepExtract> log.debug('Updating Users Groups and Channels') tmp_users = {} tmp_emails = {} tmp_usergroups = {} tmp_channels = {} for user in graph.search('kind', 'slack_user'): tmp_users[user.name] = user if user.email: tmp_emails[user.email] = user for usergroup in graph.search('kind', 'slack_usergroup'): if usergroup.is_usergroup: tmp_usergroups[usergroup.name] = usergroup for channel in graph.search('kind', 'slack_conversation'): if channel.is_channel: tmp_channels[channel.name] = channel self.users2id = tmp_users self.emails2id = tmp_emails self.usergroups2id = tmp_usergroups self.channels2id = tmp_channels </DeepExtract> for node in graph.nodes: if isinstance(node, BaseResource) and len(node.event_log) > 0 and ('resoto:owner' in node.tags): cloud = node.cloud(graph) account = node.account(graph) region = node.region(graph) owner_tag = str(node.tags['resoto:owner']) if not isinstance(cloud, BaseCloud) or not isinstance(account, BaseAccount) or (not isinstance(region, BaseRegion)): continue destination = None if owner_tag.startswith('slack:'): owner = owner_tag[6:] destination = self.users2id.get(owner) elif owner_tag.startswith('email:'): owner = owner_tag[6:] destination = self.emails2id.get(owner) else: log.error(f'Unknown owner tag format {owner_tag} for node {node.dname} in cloud {cloud.name} account {account.dname} region {region.name}') if not isinstance(destination, SlackUser): log.error(f'Unable to determine Slack destination based on resoto:owner tag value {owner_tag}') continue event_log_text = '' for event in node.event_log: event_log_text += f"{utc_str(event['timestamp'])} {event['msg']}" + '\n' slack_message = f'Hello {destination.first_name}, your cloud resource `{node.dname}` in cloud `{cloud.name}` account `{account.dname}` region `{region.name}` was modified during the current resoto run. Here is the event log:\n```\n{event_log_text}```' <DeepExtract> log.debug(f'Sending Slack message to ID {destination.id}') response = self.client.conversations_open(users=[destination.id]) if response.data.get('ok', False): channel = response.data.get('channel', {}).get('id') self.client.chat_postMessage(channel=channel, text=slack_message) </DeepExtract>
def process_resoto_events(self, event: Event): graph = event.data log.info('Checking for outstanding Slack notifications') log.debug('Updating Users Groups and Channels') tmp_users = {} tmp_emails = {} tmp_usergroups = {} tmp_channels = {} for user in graph.search('kind', 'slack_user'): tmp_users[user.name] = user if user.email: tmp_emails[user.email] = user for usergroup in graph.search('kind', 'slack_usergroup'): if usergroup.is_usergroup: tmp_usergroups[usergroup.name] = usergroup for channel in graph.search('kind', 'slack_conversation'): if channel.is_channel: tmp_channels[channel.name] = channel self.users2id = tmp_users self.emails2id = tmp_emails self.usergroups2id = tmp_usergroups self.channels2id = tmp_channels for node in graph.nodes: if isinstance(node, BaseResource) and len(node.event_log) > 0 and ('resoto:owner' in node.tags): cloud = node.cloud(graph) account = node.account(graph) region = node.region(graph) owner_tag = str(node.tags['resoto:owner']) if not isinstance(cloud, BaseCloud) or not isinstance(account, BaseAccount) or (not isinstance(region, BaseRegion)): continue destination = None if owner_tag.startswith('slack:'): owner = owner_tag[6:] destination = self.users2id.get(owner) elif owner_tag.startswith('email:'): owner = owner_tag[6:] destination = self.emails2id.get(owner) else: log.error(f'Unknown owner tag format {owner_tag} for node {node.dname} in cloud {cloud.name} account {account.dname} region {region.name}') if not isinstance(destination, SlackUser): log.error(f'Unable to determine Slack destination based on resoto:owner tag value {owner_tag}') continue event_log_text = '' for event in node.event_log: event_log_text += f"{utc_str(event['timestamp'])} {event['msg']}" + '\n' slack_message = f'Hello {destination.first_name}, your cloud resource `{node.dname}` in cloud `{cloud.name}` account `{account.dname}` region `{region.name}` was modified during the current resoto run. Here is the event log:\n```\n{event_log_text}```' log.debug(f'Sending Slack message to ID {destination.id}') response = self.client.conversations_open(users=[destination.id]) if response.data.get('ok', False): channel = response.data.get('channel', {}).get('id') self.client.chat_postMessage(channel=channel, text=slack_message) </DeepExtract>
cloudkeeper
positive
def fit(self, model, data): <DeepExtract> self.train_dataloader = data.train_dataloader() self.val_dataloader = data.val_dataloader() self.num_train_batches = len(self.train_dataloader) self.num_val_batches = len(self.val_dataloader) if self.val_dataloader is not None else 0 </DeepExtract> <DeepExtract> model.trainer = self model.board.xlim = [0, self.max_epochs] self.model = model </DeepExtract> self.optim = model.configure_optimizers() self.epoch = 0 self.train_batch_idx = 0 self.val_batch_idx = 0 for self.epoch in range(self.max_epochs): <DeepExtract> raise NotImplementedError </DeepExtract>
def fit(self, model, data): self.train_dataloader = data.train_dataloader() self.val_dataloader = data.val_dataloader() self.num_train_batches = len(self.train_dataloader) self.num_val_batches = len(self.val_dataloader) if self.val_dataloader is not None else 0 model.trainer = self model.board.xlim = [0, self.max_epochs] self.model = model self.optim = model.configure_optimizers() self.epoch = 0 self.train_batch_idx = 0 self.val_batch_idx = 0 for self.epoch in range(self.max_epochs): raise NotImplementedError </DeepExtract>
d2l-en
positive
def find_parent(self, name=None, attrs={}, **kwargs): """Returns the closest parent of this Tag that matches the given criteria.""" r = None <DeepExtract> l = self._find_all(name, attrs, None, 1, self.parents, **kwargs) </DeepExtract> if l: r = l[0] return r
def find_parent(self, name=None, attrs={}, **kwargs): """Returns the closest parent of this Tag that matches the given criteria.""" r = None l = self._find_all(name, attrs, None, 1, self.parents, **kwargs) if l: r = l[0] return r
BeautifulSoup4
positive
def _BuildInnerList(self): """ Build the list that will be used to populate the ListCtrl. This internal list is an amalgum of model objects, ListGroups and None (which are blank rows). """ self.objectToIndexMap = None if not self.showGroups: return ObjectListView._BuildInnerList(self) if not self.modelObjects: self.groups = list() self.innerList = list() return if self.groups is None: <DeepExtract> if modelObjects is None: modelObjects = self.modelObjects if self.filter: modelObjects = self.filter(modelObjects) groupingColumn = self.GetGroupByColumn() groupMap = {} for model in modelObjects: key = groupingColumn.GetGroupKey(model) group = groupMap.get(key) if group is None: groupMap[key] = group = ListGroup(key, groupingColumn.GetGroupKeyAsString(key)) group.Add(model) groups = groupMap.values() if self.GetShowItemCounts(): self._BuildGroupTitles(groups, groupingColumn) evt = OLVEvent.GroupCreationEvent(self, groups) self.GetEventHandler().ProcessEvent(evt) self.groups = evt.groups </DeepExtract> <DeepExtract> if groups is None: groups = self.groups if ascending is None: ascending = self.sortAscending if self.GetAlwaysGroupByColumn(): sortCol = self.GetSortColumn() else: sortCol = self.GetGroupByColumn() evt = OLVEvent.SortGroupsEvent(self, groups, sortCol, ascending) self.GetEventHandler().ProcessEvent(evt) if evt.wasHandled: return def _getLowerCaseKey(group): try: return group.key.lower() except: return group.key groups.sort(key=_getLowerCaseKey, reverse=not ascending) for x in groups: self._SortObjects(x.modelObjects, sortCol, self.GetPrimaryColumn()) </DeepExtract> self.innerList = list() for grp in self.groups: if len(self.innerList) and self.putBlankLineBetweenGroups: self.innerList.append(None) self.innerList.append(grp) if grp.isExpanded: self.innerList.extend(grp.modelObjects)
def _BuildInnerList(self): """ Build the list that will be used to populate the ListCtrl. This internal list is an amalgum of model objects, ListGroups and None (which are blank rows). """ self.objectToIndexMap = None if not self.showGroups: return ObjectListView._BuildInnerList(self) if not self.modelObjects: self.groups = list() self.innerList = list() return if self.groups is None: if modelObjects is None: modelObjects = self.modelObjects if self.filter: modelObjects = self.filter(modelObjects) groupingColumn = self.GetGroupByColumn() groupMap = {} for model in modelObjects: key = groupingColumn.GetGroupKey(model) group = groupMap.get(key) if group is None: groupMap[key] = group = ListGroup(key, groupingColumn.GetGroupKeyAsString(key)) group.Add(model) groups = groupMap.values() if self.GetShowItemCounts(): self._BuildGroupTitles(groups, groupingColumn) evt = OLVEvent.GroupCreationEvent(self, groups) self.GetEventHandler().ProcessEvent(evt) self.groups = evt.groups if groups is None: groups = self.groups if ascending is None: ascending = self.sortAscending if self.GetAlwaysGroupByColumn(): sortCol = self.GetSortColumn() else: sortCol = self.GetGroupByColumn() evt = OLVEvent.SortGroupsEvent(self, groups, sortCol, ascending) self.GetEventHandler().ProcessEvent(evt) if evt.wasHandled: return def _getLowerCaseKey(group): try: return group.key.lower() except: return group.key groups.sort(key=_getLowerCaseKey, reverse=not ascending) for x in groups: self._SortObjects(x.modelObjects, sortCol, self.GetPrimaryColumn()) self.innerList = list() for grp in self.groups: if len(self.innerList) and self.putBlankLineBetweenGroups: self.innerList.append(None) self.innerList.append(grp) if grp.isExpanded: self.innerList.extend(grp.modelObjects)
bookhub
positive
def main(unused_argv): try: tf_config_env = json.loads(os.environ.get('TF_CONFIG', '{}')) task_data = tf_config_env.get('task') or {'type': 'master', 'index': 0} trial = task_data.get('trial') running_on_mlengine = len(tf_config_env) > 0 print('Running {}'.format('on Google ML Engine' if running_on_mlengine else 'on a server/machine')) logging.getLogger('tensorflow').propagate = running_on_mlengine tf.logging.info('Starting training job') gcs_model_output_dir = FLAGS.model_dir if FLAGS.use_local_cache_model_dir: model_output_dir = tempfile.mkdtemp() tf.logging.info('Created local temp folder for models output: {}'.format(model_output_dir)) else: model_output_dir = gcs_model_output_dir if trial is not None: model_output_dir = os.path.join(model_output_dir, trial) gcs_model_output_dir = os.path.join(gcs_model_output_dir, trial) tf.logging.info('Hyperparameter Tuning - Trial {} - model_dir = {} - gcs_model_output_dir = {} '.format(trial, model_output_dir, gcs_model_output_dir)) tf.logging.info('Will save temporary model outputs to {}'.format(model_output_dir)) if FLAGS.warmup_model_dir != None: tf.logging.info('Copying model outputs from previous job ({}) for warm start'.format(FLAGS.warmup_model_dir)) dowload_model_output_from_gcs(model_output_dir, gcs_model_dir=FLAGS.warmup_model_dir, files_pattern=['graph.pb', 'model.ckpt-', 'checkpoint']) local_files_after_download_to_debug = list(glob.iglob('{}/**/*'.format(model_output_dir), recursive=True)) tf.logging.info('Files copied from GCS to warm start training: {}'.format(local_files_after_download_to_debug)) tf.logging.info('Loading ACR module assets') (acr_label_encoders, articles_metadata_df, content_article_embeddings_matrix) = load_acr_module_resources(FLAGS.acr_module_resources_path) l2_normalizer_by_sample = Normalizer(norm='l2') content_article_embeddings_matrix = l2_normalizer_by_sample.fit_transform(content_article_embeddings_matrix) content_article_embeddings_matrix = content_article_embeddings_matrix * FLAGS.content_embedding_scale_factor <DeepExtract> articles_features_config = {'article_id': {'type': 'categorical', 'dtype': 'int'}, 'created_at_ts': {'type': 'numerical', 'dtype': 'int'}, 'category0': {'type': 'categorical', 'dtype': 'int'}, 'category1': {'type': 'categorical', 'dtype': 'int'}, 'author': {'type': 'categorical', 'dtype': 'int'}} feature_groups = {'category': ['category0', 'category1'], 'author': ['author']} if FLAGS.enabled_articles_input_features_groups != [ALL_FEATURES]: for feature_group in feature_groups: if feature_group not in FLAGS.enabled_articles_input_features_groups: for feature in feature_groups[feature_group]: del articles_features_config[feature] for feature_name in articles_features_config: if feature_name in acr_label_encoders and articles_features_config[feature_name]['type'] == 'categorical': articles_features_config[feature_name]['cardinality'] = len(acr_label_encoders[feature_name]) tf.logging.info('Article Features: {}'.format(articles_features_config)) articles_features_config = articles_features_config </DeepExtract> <DeepExtract> articles_metadata = {} for feature_name in articles_features_config: articles_metadata[feature_name] = articles_metadata_df[feature_name].values articles_metadata[feature_name] = np.hstack([[0], articles_metadata[feature_name]]) articles_metadata = articles_metadata </DeepExtract> tf.logging.info('Loading NAR module preprocesing assets') nar_label_encoders = load_nar_module_preprocessing_resources(FLAGS.nar_module_preprocessing_resources_path) <DeepExtract> session_features_config = {'single_features': {'user_id': {'type': 'categorical', 'dtype': 'bytes'}, 'session_id': {'type': 'numerical', 'dtype': 'int'}, 'session_size': {'type': 'numerical', 'dtype': 'int'}, 'session_start': {'type': 'numerical', 'dtype': 'int'}}, 'sequence_features': {'event_timestamp': {'type': 'numerical', 'dtype': 'int'}, 'item_clicked': {'type': 'categorical', 'dtype': 'int'}, 'city': {'type': 'categorical', 'dtype': 'int'}, 'region': {'type': 'categorical', 'dtype': 'int'}, 'country': {'type': 'categorical', 'dtype': 'int'}, 'device': {'type': 'categorical', 'dtype': 'int'}, 'os': {'type': 'categorical', 'dtype': 'int'}, 'local_hour_sin': {'type': 'numerical', 'dtype': 'float'}, 'local_hour_cos': {'type': 'numerical', 'dtype': 'float'}, 'weekday': {'type': 'numerical', 'dtype': 'float'}, 'referrer_class': {'type': 'categorical', 'dtype': 'int'}}} feature_groups = {'time': ['local_hour_sin', 'local_hour_cos', 'weekday'], 'device': ['device', 'os'], 'location': ['country', 'region', 'city'], 'referrer': ['referrer_class']} if FLAGS.enabled_clicks_input_features_groups != [ALL_FEATURES]: for feature_group in feature_groups: if feature_group not in FLAGS.enabled_clicks_input_features_groups: for feature in feature_groups[feature_group]: del session_features_config['sequence_features'][feature] for feature_groups_key in session_features_config: features_group_config = session_features_config[feature_groups_key] for feature_name in features_group_config: if feature_name in nar_label_encoders and features_group_config[feature_name]['type'] == 'categorical': features_group_config[feature_name]['cardinality'] = len(nar_label_encoders[feature_name]) tf.logging.info('Session Features: {}'.format(session_features_config)) session_features_config = session_features_config </DeepExtract> tf.logging.info('Building NAR model') global eval_sessions_metrics_log, clicked_items_state, sessions_negative_items_log, sessions_chameleon_recommendations_log, global_eval_hour_id eval_sessions_metrics_log = [] clicked_items_state = ClickedItemsState(FLAGS.recent_clicks_buffer_hours, FLAGS.recent_clicks_buffer_max_size, FLAGS.recent_clicks_for_normalization, content_article_embeddings_matrix.shape[0]) <DeepExtract> run_config = tf.estimator.RunConfig(tf_random_seed=RANDOM_SEED, keep_checkpoint_max=1, save_checkpoints_secs=1200, save_summary_steps=100, log_step_count_steps=100) estimator = tf.estimator.Estimator(config=run_config, model_dir=model_output_dir, model_fn=nar_module_model_fn, params={'batch_size': FLAGS.batch_size, 'lr': FLAGS.learning_rate, 'dropout_keep_prob': FLAGS.dropout_keep_prob, 'reg_weight_decay': FLAGS.reg_l2, 'recent_clicks_buffer_hours': FLAGS.recent_clicks_buffer_hours, 'recent_clicks_buffer_max_size': FLAGS.recent_clicks_buffer_max_size, 'recent_clicks_for_normalization': FLAGS.recent_clicks_for_normalization, 'eval_metrics_top_n': FLAGS.eval_metrics_top_n, 'CAR_embedding_size': FLAGS.CAR_embedding_size, 'rnn_units': FLAGS.rnn_units, 'train_total_negative_samples': FLAGS.train_total_negative_samples, 'train_negative_samples_from_buffer': FLAGS.train_negative_samples_from_buffer, 'eval_total_negative_samples': FLAGS.eval_total_negative_samples, 'eval_negative_samples_from_buffer': FLAGS.eval_negative_samples_from_buffer, 'softmax_temperature': FLAGS.softmax_temperature, 'save_histograms': FLAGS.save_histograms, 'eval_metrics_by_session_position': FLAGS.eval_metrics_by_session_position, 'novelty_reg_factor': FLAGS.novelty_reg_factor, 'diversity_reg_factor': FLAGS.diversity_reg_factor, 'eval_negative_sample_relevance': FLAGS.eval_negative_sample_relevance, 'eval_cold_start': FLAGS.eval_cold_start, 'session_features_config': session_features_config, 'articles_features_config': articles_features_config, 'articles_metadata': articles_metadata, 'content_article_embeddings_matrix': content_article_embeddings_matrix}) model = estimator </DeepExtract> tf.logging.info('Getting training file names') train_files = resolve_files(FLAGS.train_set_path_regex) if FLAGS.train_files_from > FLAGS.train_files_up_to: raise Exception('Final training file cannot be lower than Starting training file') train_files = train_files[FLAGS.train_files_from:FLAGS.train_files_up_to + 1] tf.logging.info('{} files where the network will be trained and evaluated on, from {} to {}'.format(len(train_files), train_files[0], train_files[-1])) start_train = time() tf.logging.info('Starting Training Loop') training_files_chunks = list(chunks(train_files, FLAGS.training_hours_for_each_eval)) for chunk_id in range(0, len(training_files_chunks) - 1): training_files_chunk = training_files_chunks[chunk_id] tf.logging.info('Training files from {} to {}'.format(training_files_chunk[0], training_files_chunk[-1])) model.train(input_fn=lambda : prepare_dataset_iterator(training_files_chunk, session_features_config, batch_size=FLAGS.batch_size, truncate_session_length=FLAGS.truncate_session_length)) if chunk_id < len(training_files_chunks) - 1: eval_file = training_files_chunks[chunk_id + 1][0] tf.logging.info('Evaluating file {}'.format(eval_file)) model.evaluate(input_fn=lambda : prepare_dataset_iterator(eval_file, session_features_config, batch_size=FLAGS.batch_size, truncate_session_length=FLAGS.truncate_session_length)) if chunk_id % FLAGS.save_results_each_n_evals == 0: tf.logging.info('Saving eval metrics') save_eval_benchmark_metrics_csv(eval_sessions_metrics_log, model_output_dir, training_hours_for_each_eval=FLAGS.training_hours_for_each_eval) if FLAGS.save_eval_sessions_negative_samples: <DeepExtract> append_lines_to_text_file(os.path.join(model_output_dir, output_file), map(lambda x: json.dumps({'session_id': x['session_id'], 'negative_items': x['negative_items']}), sessions_negative_items_log)) </DeepExtract> sessions_negative_items_log = [] if FLAGS.save_eval_sessions_recommendations: <DeepExtract> append_lines_to_text_file(os.path.join(model_output_dir, output_file), map(lambda x: json.dumps({'eval_hour_id': global_eval_hour_id, 'session_id': x['session_id'], 'next_click_labels': x['next_click_labels'], 'predicted_item_ids': x['predicted_item_ids'], 'predicted_item_probs': x['predicted_item_probs'], 'predicted_item_norm_pop': x['predicted_item_norm_pop']}), sessions_chameleon_recommendations_log)) </DeepExtract> sessions_chameleon_recommendations_log = [] global_eval_hour_id += 1 if FLAGS.use_local_cache_model_dir: tf.logging.info('Uploading cached results to GCS') upload_model_output_to_gcs(model_output_dir, gcs_model_dir=gcs_model_output_dir, files_pattern=['.csv', '.json']) tf.logging.info('Finalized Training') save_eval_benchmark_metrics_csv(eval_sessions_metrics_log, model_output_dir, training_hours_for_each_eval=FLAGS.training_hours_for_each_eval) if FLAGS.save_eval_sessions_negative_samples: <DeepExtract> append_lines_to_text_file(os.path.join(model_output_dir, output_file), map(lambda x: json.dumps({'session_id': x['session_id'], 'negative_items': x['negative_items']}), sessions_negative_items_log)) </DeepExtract> if FLAGS.save_eval_sessions_recommendations: <DeepExtract> append_lines_to_text_file(os.path.join(model_output_dir, output_file), map(lambda x: json.dumps({'eval_hour_id': global_eval_hour_id, 'session_id': x['session_id'], 'next_click_labels': x['next_click_labels'], 'predicted_item_ids': x['predicted_item_ids'], 'predicted_item_probs': x['predicted_item_probs'], 'predicted_item_norm_pop': x['predicted_item_norm_pop']}), sessions_chameleon_recommendations_log)) </DeepExtract> tf.logging.info('Saved eval metrics') if FLAGS.use_local_cache_model_dir: upload_model_output_to_gcs(model_output_dir, gcs_model_dir=gcs_model_output_dir, files_pattern=None) log_elapsed_time(start_train, 'Finalized TRAINING Loop') except Exception as ex: tf.logging.error('ERROR: {}'.format(ex)) raise
def main(unused_argv): try: tf_config_env = json.loads(os.environ.get('TF_CONFIG', '{}')) task_data = tf_config_env.get('task') or {'type': 'master', 'index': 0} trial = task_data.get('trial') running_on_mlengine = len(tf_config_env) > 0 print('Running {}'.format('on Google ML Engine' if running_on_mlengine else 'on a server/machine')) logging.getLogger('tensorflow').propagate = running_on_mlengine tf.logging.info('Starting training job') gcs_model_output_dir = FLAGS.model_dir if FLAGS.use_local_cache_model_dir: model_output_dir = tempfile.mkdtemp() tf.logging.info('Created local temp folder for models output: {}'.format(model_output_dir)) else: model_output_dir = gcs_model_output_dir if trial is not None: model_output_dir = os.path.join(model_output_dir, trial) gcs_model_output_dir = os.path.join(gcs_model_output_dir, trial) tf.logging.info('Hyperparameter Tuning - Trial {} - model_dir = {} - gcs_model_output_dir = {} '.format(trial, model_output_dir, gcs_model_output_dir)) tf.logging.info('Will save temporary model outputs to {}'.format(model_output_dir)) if FLAGS.warmup_model_dir != None: tf.logging.info('Copying model outputs from previous job ({}) for warm start'.format(FLAGS.warmup_model_dir)) dowload_model_output_from_gcs(model_output_dir, gcs_model_dir=FLAGS.warmup_model_dir, files_pattern=['graph.pb', 'model.ckpt-', 'checkpoint']) local_files_after_download_to_debug = list(glob.iglob('{}/**/*'.format(model_output_dir), recursive=True)) tf.logging.info('Files copied from GCS to warm start training: {}'.format(local_files_after_download_to_debug)) tf.logging.info('Loading ACR module assets') (acr_label_encoders, articles_metadata_df, content_article_embeddings_matrix) = load_acr_module_resources(FLAGS.acr_module_resources_path) l2_normalizer_by_sample = Normalizer(norm='l2') content_article_embeddings_matrix = l2_normalizer_by_sample.fit_transform(content_article_embeddings_matrix) content_article_embeddings_matrix = content_article_embeddings_matrix * FLAGS.content_embedding_scale_factor articles_features_config = {'article_id': {'type': 'categorical', 'dtype': 'int'}, 'created_at_ts': {'type': 'numerical', 'dtype': 'int'}, 'category0': {'type': 'categorical', 'dtype': 'int'}, 'category1': {'type': 'categorical', 'dtype': 'int'}, 'author': {'type': 'categorical', 'dtype': 'int'}} feature_groups = {'category': ['category0', 'category1'], 'author': ['author']} if FLAGS.enabled_articles_input_features_groups != [ALL_FEATURES]: for feature_group in feature_groups: if feature_group not in FLAGS.enabled_articles_input_features_groups: for feature in feature_groups[feature_group]: del articles_features_config[feature] for feature_name in articles_features_config: if feature_name in acr_label_encoders and articles_features_config[feature_name]['type'] == 'categorical': articles_features_config[feature_name]['cardinality'] = len(acr_label_encoders[feature_name]) tf.logging.info('Article Features: {}'.format(articles_features_config)) articles_features_config = articles_features_config articles_metadata = {} for feature_name in articles_features_config: articles_metadata[feature_name] = articles_metadata_df[feature_name].values articles_metadata[feature_name] = np.hstack([[0], articles_metadata[feature_name]]) articles_metadata = articles_metadata tf.logging.info('Loading NAR module preprocesing assets') nar_label_encoders = load_nar_module_preprocessing_resources(FLAGS.nar_module_preprocessing_resources_path) session_features_config = {'single_features': {'user_id': {'type': 'categorical', 'dtype': 'bytes'}, 'session_id': {'type': 'numerical', 'dtype': 'int'}, 'session_size': {'type': 'numerical', 'dtype': 'int'}, 'session_start': {'type': 'numerical', 'dtype': 'int'}}, 'sequence_features': {'event_timestamp': {'type': 'numerical', 'dtype': 'int'}, 'item_clicked': {'type': 'categorical', 'dtype': 'int'}, 'city': {'type': 'categorical', 'dtype': 'int'}, 'region': {'type': 'categorical', 'dtype': 'int'}, 'country': {'type': 'categorical', 'dtype': 'int'}, 'device': {'type': 'categorical', 'dtype': 'int'}, 'os': {'type': 'categorical', 'dtype': 'int'}, 'local_hour_sin': {'type': 'numerical', 'dtype': 'float'}, 'local_hour_cos': {'type': 'numerical', 'dtype': 'float'}, 'weekday': {'type': 'numerical', 'dtype': 'float'}, 'referrer_class': {'type': 'categorical', 'dtype': 'int'}}} feature_groups = {'time': ['local_hour_sin', 'local_hour_cos', 'weekday'], 'device': ['device', 'os'], 'location': ['country', 'region', 'city'], 'referrer': ['referrer_class']} if FLAGS.enabled_clicks_input_features_groups != [ALL_FEATURES]: for feature_group in feature_groups: if feature_group not in FLAGS.enabled_clicks_input_features_groups: for feature in feature_groups[feature_group]: del session_features_config['sequence_features'][feature] for feature_groups_key in session_features_config: features_group_config = session_features_config[feature_groups_key] for feature_name in features_group_config: if feature_name in nar_label_encoders and features_group_config[feature_name]['type'] == 'categorical': features_group_config[feature_name]['cardinality'] = len(nar_label_encoders[feature_name]) tf.logging.info('Session Features: {}'.format(session_features_config)) session_features_config = session_features_config tf.logging.info('Building NAR model') global eval_sessions_metrics_log, clicked_items_state, sessions_negative_items_log, sessions_chameleon_recommendations_log, global_eval_hour_id eval_sessions_metrics_log = [] clicked_items_state = ClickedItemsState(FLAGS.recent_clicks_buffer_hours, FLAGS.recent_clicks_buffer_max_size, FLAGS.recent_clicks_for_normalization, content_article_embeddings_matrix.shape[0]) run_config = tf.estimator.RunConfig(tf_random_seed=RANDOM_SEED, keep_checkpoint_max=1, save_checkpoints_secs=1200, save_summary_steps=100, log_step_count_steps=100) estimator = tf.estimator.Estimator(config=run_config, model_dir=model_output_dir, model_fn=nar_module_model_fn, params={'batch_size': FLAGS.batch_size, 'lr': FLAGS.learning_rate, 'dropout_keep_prob': FLAGS.dropout_keep_prob, 'reg_weight_decay': FLAGS.reg_l2, 'recent_clicks_buffer_hours': FLAGS.recent_clicks_buffer_hours, 'recent_clicks_buffer_max_size': FLAGS.recent_clicks_buffer_max_size, 'recent_clicks_for_normalization': FLAGS.recent_clicks_for_normalization, 'eval_metrics_top_n': FLAGS.eval_metrics_top_n, 'CAR_embedding_size': FLAGS.CAR_embedding_size, 'rnn_units': FLAGS.rnn_units, 'train_total_negative_samples': FLAGS.train_total_negative_samples, 'train_negative_samples_from_buffer': FLAGS.train_negative_samples_from_buffer, 'eval_total_negative_samples': FLAGS.eval_total_negative_samples, 'eval_negative_samples_from_buffer': FLAGS.eval_negative_samples_from_buffer, 'softmax_temperature': FLAGS.softmax_temperature, 'save_histograms': FLAGS.save_histograms, 'eval_metrics_by_session_position': FLAGS.eval_metrics_by_session_position, 'novelty_reg_factor': FLAGS.novelty_reg_factor, 'diversity_reg_factor': FLAGS.diversity_reg_factor, 'eval_negative_sample_relevance': FLAGS.eval_negative_sample_relevance, 'eval_cold_start': FLAGS.eval_cold_start, 'session_features_config': session_features_config, 'articles_features_config': articles_features_config, 'articles_metadata': articles_metadata, 'content_article_embeddings_matrix': content_article_embeddings_matrix}) model = estimator tf.logging.info('Getting training file names') train_files = resolve_files(FLAGS.train_set_path_regex) if FLAGS.train_files_from > FLAGS.train_files_up_to: raise Exception('Final training file cannot be lower than Starting training file') train_files = train_files[FLAGS.train_files_from:FLAGS.train_files_up_to + 1] tf.logging.info('{} files where the network will be trained and evaluated on, from {} to {}'.format(len(train_files), train_files[0], train_files[-1])) start_train = time() tf.logging.info('Starting Training Loop') training_files_chunks = list(chunks(train_files, FLAGS.training_hours_for_each_eval)) for chunk_id in range(0, len(training_files_chunks) - 1): training_files_chunk = training_files_chunks[chunk_id] tf.logging.info('Training files from {} to {}'.format(training_files_chunk[0], training_files_chunk[-1])) model.train(input_fn=lambda : prepare_dataset_iterator(training_files_chunk, session_features_config, batch_size=FLAGS.batch_size, truncate_session_length=FLAGS.truncate_session_length)) if chunk_id < len(training_files_chunks) - 1: eval_file = training_files_chunks[chunk_id + 1][0] tf.logging.info('Evaluating file {}'.format(eval_file)) model.evaluate(input_fn=lambda : prepare_dataset_iterator(eval_file, session_features_config, batch_size=FLAGS.batch_size, truncate_session_length=FLAGS.truncate_session_length)) if chunk_id % FLAGS.save_results_each_n_evals == 0: tf.logging.info('Saving eval metrics') save_eval_benchmark_metrics_csv(eval_sessions_metrics_log, model_output_dir, training_hours_for_each_eval=FLAGS.training_hours_for_each_eval) if FLAGS.save_eval_sessions_negative_samples: append_lines_to_text_file(os.path.join(model_output_dir, output_file), map(lambda x: json.dumps({'session_id': x['session_id'], 'negative_items': x['negative_items']}), sessions_negative_items_log)) sessions_negative_items_log = [] if FLAGS.save_eval_sessions_recommendations: append_lines_to_text_file(os.path.join(model_output_dir, output_file), map(lambda x: json.dumps({'eval_hour_id': global_eval_hour_id, 'session_id': x['session_id'], 'next_click_labels': x['next_click_labels'], 'predicted_item_ids': x['predicted_item_ids'], 'predicted_item_probs': x['predicted_item_probs'], 'predicted_item_norm_pop': x['predicted_item_norm_pop']}), sessions_chameleon_recommendations_log)) sessions_chameleon_recommendations_log = [] global_eval_hour_id += 1 if FLAGS.use_local_cache_model_dir: tf.logging.info('Uploading cached results to GCS') upload_model_output_to_gcs(model_output_dir, gcs_model_dir=gcs_model_output_dir, files_pattern=['.csv', '.json']) tf.logging.info('Finalized Training') save_eval_benchmark_metrics_csv(eval_sessions_metrics_log, model_output_dir, training_hours_for_each_eval=FLAGS.training_hours_for_each_eval) if FLAGS.save_eval_sessions_negative_samples: append_lines_to_text_file(os.path.join(model_output_dir, output_file), map(lambda x: json.dumps({'session_id': x['session_id'], 'negative_items': x['negative_items']}), sessions_negative_items_log)) if FLAGS.save_eval_sessions_recommendations: append_lines_to_text_file(os.path.join(model_output_dir, output_file), map(lambda x: json.dumps({'eval_hour_id': global_eval_hour_id, 'session_id': x['session_id'], 'next_click_labels': x['next_click_labels'], 'predicted_item_ids': x['predicted_item_ids'], 'predicted_item_probs': x['predicted_item_probs'], 'predicted_item_norm_pop': x['predicted_item_norm_pop']}), sessions_chameleon_recommendations_log)) tf.logging.info('Saved eval metrics') if FLAGS.use_local_cache_model_dir: upload_model_output_to_gcs(model_output_dir, gcs_model_dir=gcs_model_output_dir, files_pattern=None) log_elapsed_time(start_train, 'Finalized TRAINING Loop') except Exception as ex: tf.logging.error('ERROR: {}'.format(ex)) raise
chameleon_recsys
positive
def get_constant_lr(self, lr_value, init_step=0): <DeepExtract> with self.graph.as_default(): self.global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(init_step), trainable=False) print('global step variable prepared') return self.global_step </DeepExtract> with self.graph.as_default(): print('constant lr, values {}'.format(lr_value)) self.lr = tf.Variable(lr_value, trainable=False, dtype=tf.float32, name='lr') return self.lr
def get_constant_lr(self, lr_value, init_step=0): with self.graph.as_default(): self.global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(init_step), trainable=False) print('global step variable prepared') return self.global_step with self.graph.as_default(): print('constant lr, values {}'.format(lr_value)) self.lr = tf.Variable(lr_value, trainable=False, dtype=tf.float32, name='lr') return self.lr
Centripetal-SGD
positive
def findHeight(node): if node is None: return 0 <DeepExtract> if node.left is None: lheight = 0 lheight = findHeight(node.left.left) rheight = findHeight(node.left.right) lheight = max(lheight, rheight) + 1 </DeepExtract> <DeepExtract> if node.right is None: rheight = 0 lheight = findHeight(node.right.left) rheight = findHeight(node.right.right) rheight = max(lheight, rheight) + 1 </DeepExtract> return max(lheight, rheight) + 1
def findHeight(node): if node is None: return 0 if node.left is None: lheight = 0 lheight = findHeight(node.left.left) rheight = findHeight(node.left.right) lheight = max(lheight, rheight) + 1 if node.right is None: rheight = 0 lheight = findHeight(node.right.left) rheight = findHeight(node.right.right) rheight = max(lheight, rheight) + 1 return max(lheight, rheight) + 1
challenges
positive
def testInstanceValidator_DetectDuplicateEntityKeys(self): with self.assertRaises(SystemExit): <DeepExtract> parser = _ParserHelper([path.join(_TESTCASE_PATH, 'BAD', 'duplicate_key.yaml')]).GetEntities() </DeepExtract> del parser
def testInstanceValidator_DetectDuplicateEntityKeys(self): with self.assertRaises(SystemExit): parser = _ParserHelper([path.join(_TESTCASE_PATH, 'BAD', 'duplicate_key.yaml')]).GetEntities() del parser
digitalbuildings
positive
def score(self, X, y=None): """Compute the log probability under the model. Parameters ---------- X : array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- logprob : array_like, shape (n_samples,) Log probabilities of each data point in X """ <DeepExtract> check_is_fitted(self, 'means_') X = check_array(X) if X.ndim == 1: X = X[:, np.newaxis] if X.size == 0: (logprob, _) = (np.array([]), np.empty((0, self.n_components))) if X.shape[1] != self.means_.shape[1]: raise ValueError('The shape of X is not compatible with self') lpr = log_multivariate_normal_density(X, self.means_, self.covars_, self.covariance_type) + np.log(self.weights_) logprob = logsumexp(lpr, axis=1) responsibilities = np.exp(lpr - logprob[:, np.newaxis]) (logprob, _) = (logprob, responsibilities) </DeepExtract> return logprob
def score(self, X, y=None): """Compute the log probability under the model. Parameters ---------- X : array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- logprob : array_like, shape (n_samples,) Log probabilities of each data point in X """ check_is_fitted(self, 'means_') X = check_array(X) if X.ndim == 1: X = X[:, np.newaxis] if X.size == 0: (logprob, _) = (np.array([]), np.empty((0, self.n_components))) if X.shape[1] != self.means_.shape[1]: raise ValueError('The shape of X is not compatible with self') lpr = log_multivariate_normal_density(X, self.means_, self.covars_, self.covariance_type) + np.log(self.weights_) logprob = logsumexp(lpr, axis=1) responsibilities = np.exp(lpr - logprob[:, np.newaxis]) (logprob, _) = (logprob, responsibilities) return logprob
bhmm
positive
def test_send_notification_to_followers(self): Follower.objects.follow('any@test.com', 'test_user', self.comment_obj.content_object) followers = Follower.objects.filter_for_model_object(self.comment_obj.content_object).exclude(email=self.comment_obj.email) self.assertEqual(followers.count(), 1) self.email_service.send_notification_to_followers() self.email_service._email_thread.join() self.assertEqual(len(mail.outbox), 1) sent_email = mail.outbox[0] self.assertIsInstance(sent_email, EmailMultiAlternatives) username = self.comment_obj.get_username() thread_name = str(self.comment_obj.content_object) <DeepExtract> for arg in args: self.assertIs(arg in sent_email.body, True, msg=f'{arg} not present in email contents') self.assertTrue(self.site.name in sent_email.body) self.assertTrue(self.content_object_url in sent_email.body) </DeepExtract> self.assertEqual(sent_email.subject, EmailInfo.NOTIFICATION_SUBJECT.format(username=username, thread_name=thread_name)) self.receivers = [followers.first().email] <DeepExtract> self.assertEqual(sent_email.from_email, self.sender) self.assertEqual(sent_email.to, self.receivers) if self.email_service.is_html: self.assertEqual(sent_email.alternatives[0][1], 'text/html') else: self.assertEqual(sent_email.alternatives, []) </DeepExtract>
def test_send_notification_to_followers(self): Follower.objects.follow('any@test.com', 'test_user', self.comment_obj.content_object) followers = Follower.objects.filter_for_model_object(self.comment_obj.content_object).exclude(email=self.comment_obj.email) self.assertEqual(followers.count(), 1) self.email_service.send_notification_to_followers() self.email_service._email_thread.join() self.assertEqual(len(mail.outbox), 1) sent_email = mail.outbox[0] self.assertIsInstance(sent_email, EmailMultiAlternatives) username = self.comment_obj.get_username() thread_name = str(self.comment_obj.content_object) for arg in args: self.assertIs(arg in sent_email.body, True, msg=f'{arg} not present in email contents') self.assertTrue(self.site.name in sent_email.body) self.assertTrue(self.content_object_url in sent_email.body) self.assertEqual(sent_email.subject, EmailInfo.NOTIFICATION_SUBJECT.format(username=username, thread_name=thread_name)) self.receivers = [followers.first().email] self.assertEqual(sent_email.from_email, self.sender) self.assertEqual(sent_email.to, self.receivers) if self.email_service.is_html: self.assertEqual(sent_email.alternatives[0][1], 'text/html') else: self.assertEqual(sent_email.alternatives, []) </DeepExtract>
Comment
positive
def retrieve_task(self, uuid): """Retrieve a :class:`qarnot.task.Task` from its uuid :param str uuid: Desired task uuid :rtype: :class:`~qarnot.task.Task` :returns: Existing task defined by the given uuid :raises qarnot.exceptions.MissingTaskException: task does not exist :raises qarnot.exceptions.UnauthorizedException: invalid credentials :raises qarnot.exceptions.QarnotGenericException: API general error, see message for details """ <DeepExtract> while True: try: ret = self._http.get(self.cluster + get_url('task update', uuid=uuid), timeout=self.timeout, **kwargs) if ret.status_code == 401: raise UnauthorizedException() response = ret except ConnectionError as exception: if str(exception) == '(\'Connection aborted.\', BadStatusLine("\'\'",))': pass else: raise </DeepExtract> if response.status_code == 404: raise MissingTaskException(response.json()['message']) raise_on_error(response) return Task.from_json(self, response.json())
def retrieve_task(self, uuid): """Retrieve a :class:`qarnot.task.Task` from its uuid :param str uuid: Desired task uuid :rtype: :class:`~qarnot.task.Task` :returns: Existing task defined by the given uuid :raises qarnot.exceptions.MissingTaskException: task does not exist :raises qarnot.exceptions.UnauthorizedException: invalid credentials :raises qarnot.exceptions.QarnotGenericException: API general error, see message for details """ while True: try: ret = self._http.get(self.cluster + get_url('task update', uuid=uuid), timeout=self.timeout, **kwargs) if ret.status_code == 401: raise UnauthorizedException() response = ret except ConnectionError as exception: if str(exception) == '(\'Connection aborted.\', BadStatusLine("\'\'",))': pass else: raise if response.status_code == 404: raise MissingTaskException(response.json()['message']) raise_on_error(response) return Task.from_json(self, response.json())
coalition
positive
def get_uploaded_hashes_path(self, uploaded_hashfile): if not self.is_valid_uploaded_hashfile(uploaded_hashfile): return '' <DeepExtract> files = self.filesystem.get_files(self.uploaded_hashes_path) </DeepExtract> uploaded_hashfile = files[uploaded_hashfile] return uploaded_hashfile['path']
def get_uploaded_hashes_path(self, uploaded_hashfile): if not self.is_valid_uploaded_hashfile(uploaded_hashfile): return '' files = self.filesystem.get_files(self.uploaded_hashes_path) uploaded_hashfile = files[uploaded_hashfile] return uploaded_hashfile['path']
crackerjack
positive
def init_next_workchain(self): """Initialize the next workchain calculation.""" try: self.ctx.inputs except AttributeError as no_inputs: raise ValueError('no input dictionary was defined in self.ctx.inputs') from no_inputs self.ctx.inputs.update(self.exposed_inputs(self._next_workchain)) if self.ctx.inputs.parameters.converge.testing: self.report('TESTING') settings = self.ctx.converge.settings param_dict = self.ctx.inputs.parameters if not self.ctx.running_kpoints and (not self.ctx.running_pw): if settings.pwcutoff_org is None and settings.supplied_kmesh: location = 'test-case:test_converge_wc/pw' elif settings.pwcutoff_org is not None and (not settings.supplied_kmesh): location = 'test-case:test_converge_wc/kgrid' else: location = 'test-case:test_converge_wc/both' elif settings.pwcutoff_org is None and settings.supplied_kmesh: location = 'test-case:test_converge_wc/pw/' + str(int(settings.pwcutoff)) elif settings.pwcutoff_org is not None and (not settings.supplied_kmesh): location = 'test-case:test_converge_wc/kgrid/' + str(settings.kgrid[0]) + '_' + str(settings.kgrid[1]) + '_' + str(settings.kgrid[2]) else: location = 'test-case:test_converge_wc/both/' + str(int(settings.pwcutoff)) + '_' + str(settings.kgrid[0]) + '_' + str(settings.kgrid[1]) + '_' + str(settings.kgrid[2]) param_dict['incar'] = {'system': location} self.ctx.converge.parameters = param_dict if self.ctx.set_input_nodes: <DeepExtract> if self.ctx.converge.settings.pwcutoff_org is None or self.ctx.inputs.parameters.converge.testing: self.ctx.inputs.parameters = self.ctx.converge.parameters if self.ctx.inputs.parameters.relax.perform and (not self.ctx.inputs.parameters.converge.relax): self.ctx.inputs.parameters.relax.perform = False if self.ctx.inputs.parameters.converge.relax: self.ctx.inputs.parameters.relax.perform = True self.ctx.inputs.structure = self.ctx.converge.structure.clone() if not self.ctx.converge.settings.supplied_kmesh: self.ctx.inputs.kpoints = self.ctx.converge.kpoints.clone() else: self.ctx.inputs.kpoints = self.inputs.kpoints </DeepExtract> self.ctx.inputs_ready = prepare_process_inputs(self.ctx.inputs, namespaces=['verify', 'dynamics'], exclude_parameters=['converge'])
def init_next_workchain(self): """Initialize the next workchain calculation.""" try: self.ctx.inputs except AttributeError as no_inputs: raise ValueError('no input dictionary was defined in self.ctx.inputs') from no_inputs self.ctx.inputs.update(self.exposed_inputs(self._next_workchain)) if self.ctx.inputs.parameters.converge.testing: self.report('TESTING') settings = self.ctx.converge.settings param_dict = self.ctx.inputs.parameters if not self.ctx.running_kpoints and (not self.ctx.running_pw): if settings.pwcutoff_org is None and settings.supplied_kmesh: location = 'test-case:test_converge_wc/pw' elif settings.pwcutoff_org is not None and (not settings.supplied_kmesh): location = 'test-case:test_converge_wc/kgrid' else: location = 'test-case:test_converge_wc/both' elif settings.pwcutoff_org is None and settings.supplied_kmesh: location = 'test-case:test_converge_wc/pw/' + str(int(settings.pwcutoff)) elif settings.pwcutoff_org is not None and (not settings.supplied_kmesh): location = 'test-case:test_converge_wc/kgrid/' + str(settings.kgrid[0]) + '_' + str(settings.kgrid[1]) + '_' + str(settings.kgrid[2]) else: location = 'test-case:test_converge_wc/both/' + str(int(settings.pwcutoff)) + '_' + str(settings.kgrid[0]) + '_' + str(settings.kgrid[1]) + '_' + str(settings.kgrid[2]) param_dict['incar'] = {'system': location} self.ctx.converge.parameters = param_dict if self.ctx.set_input_nodes: if self.ctx.converge.settings.pwcutoff_org is None or self.ctx.inputs.parameters.converge.testing: self.ctx.inputs.parameters = self.ctx.converge.parameters if self.ctx.inputs.parameters.relax.perform and (not self.ctx.inputs.parameters.converge.relax): self.ctx.inputs.parameters.relax.perform = False if self.ctx.inputs.parameters.converge.relax: self.ctx.inputs.parameters.relax.perform = True self.ctx.inputs.structure = self.ctx.converge.structure.clone() if not self.ctx.converge.settings.supplied_kmesh: self.ctx.inputs.kpoints = self.ctx.converge.kpoints.clone() else: self.ctx.inputs.kpoints = self.inputs.kpoints self.ctx.inputs_ready = prepare_process_inputs(self.ctx.inputs, namespaces=['verify', 'dynamics'], exclude_parameters=['converge'])
aiida-vasp
positive
def excutor(host, outpath, args): <DeepExtract> ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) try: ssh.connect(host, username='root', password='root', allow_agent=True) conn = ssh except: conn = None </DeepExtract> if not conn: return [host, None] <DeepExtract> cmd = '%s %s' % (outpath, args) cmd = cmd </DeepExtract> <DeepExtract> (stdin, stdout, stderr) = conn.exec_command(cmd) results = stdout.read() result = results </DeepExtract> result = json.dumps(result) return [host, result]
def excutor(host, outpath, args): ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) try: ssh.connect(host, username='root', password='root', allow_agent=True) conn = ssh except: conn = None if not conn: return [host, None] cmd = '%s %s' % (outpath, args) cmd = cmd (stdin, stdout, stderr) = conn.exec_command(cmd) results = stdout.read() result = results result = json.dumps(result) return [host, result]
AWD_FrameWork
positive
def __init__(self, image_set, devkit_path=None): imdb.__init__(self, 'cityscape_foggy_' + image_set) self._year = 2007 self._image_set = image_set self._devkit_path = cfg_d.FOGGYCITY self._data_path = os.path.join(self._devkit_path) self._classes = ('__background__', 'bus', 'bicycle', 'car', 'motorcycle', 'person', 'rider', 'train', 'truck') self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes))) self._image_ext = '.jpg' <DeepExtract> image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main', self._image_set + '.txt') print(image_set_file) assert os.path.exists(image_set_file), 'Path does not exist: {}'.format(image_set_file) with open(image_set_file) as f: image_index = [x.strip() for x in f.readlines()] new = [] for image in image_index: if not 'source' in image: new.append(image) self._image_index = new </DeepExtract> self._roidb_handler = self.gt_roidb self._salt = str(uuid.uuid4()) self._comp_id = 'comp4' self.config = {'cleanup': True, 'use_salt': True, 'use_diff': False, 'matlab_eval': False, 'rpn_file': None, 'min_size': 2} assert os.path.exists(self._devkit_path), 'VOCdevkit path does not exist: {}'.format(self._devkit_path) assert os.path.exists(self._data_path), 'Path does not exist: {}'.format(self._data_path)
def __init__(self, image_set, devkit_path=None): imdb.__init__(self, 'cityscape_foggy_' + image_set) self._year = 2007 self._image_set = image_set self._devkit_path = cfg_d.FOGGYCITY self._data_path = os.path.join(self._devkit_path) self._classes = ('__background__', 'bus', 'bicycle', 'car', 'motorcycle', 'person', 'rider', 'train', 'truck') self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes))) self._image_ext = '.jpg' image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main', self._image_set + '.txt') print(image_set_file) assert os.path.exists(image_set_file), 'Path does not exist: {}'.format(image_set_file) with open(image_set_file) as f: image_index = [x.strip() for x in f.readlines()] new = [] for image in image_index: if not 'source' in image: new.append(image) self._image_index = new self._roidb_handler = self.gt_roidb self._salt = str(uuid.uuid4()) self._comp_id = 'comp4' self.config = {'cleanup': True, 'use_salt': True, 'use_diff': False, 'matlab_eval': False, 'rpn_file': None, 'min_size': 2} assert os.path.exists(self._devkit_path), 'VOCdevkit path does not exist: {}'.format(self._devkit_path) assert os.path.exists(self._data_path), 'Path does not exist: {}'.format(self._data_path)
DA_Detection
positive
def _find_longest_flat_free_ends(points, threshold): """Returns `line, N` such that each of the first N points are no further than threshold from the line.""" assert len(points) > 1 N = 2 line = points[:N].copy() while N < len(points): p = points[N:N + 1] try: hull.add_points(p) except NameError: if points_are_on_line(points[:N + 1], line, threshold): N += 1 <DeepExtract> if len(np.append(line, p, axis=0)) == 1: line = np.append(line, p, axis=0)[:1].repeat(2, axis=0) points = np.append(line, p, axis=0) - np.append(line, p, axis=0)[0] for i in range(1, len(points)): l = points[i] if np.any(l != 0): break if np.all(l == 0): line = np.append(line, p, axis=0)[:2] disps = points.dot(l) line = np.append(line, p, axis=0)[[disps.argmin(), disps.argmax()]] </DeepExtract> continue else: hull = ConvexHull(points[:N + 1], incremental=True) vertices = points[hull.vertices] edge_directions = np.roll(vertices, -1, axis=0) - vertices edge_directions = edge_directions / np.linalg.norm(edge_directions, axis=-1, keepdims=True) edge_normals = np.roll(edge_directions, 1, axis=-1) edge_normals[..., 0] *= -1 widths = np.einsum('ijk,ik->ij', vertices[None] - vertices[:, None], edge_normals).max(axis=-1) best_width_id = widths.argmin() best_width = widths[best_width_id] if best_width > threshold * 2: break N += 1 p0_best = vertices[best_width_id] l_best = edge_directions[best_width_id] projections = (vertices - p0_best).dot(l_best) line[0] = p0_best + projections.min() * l_best line[1] = p0_best + projections.max() * l_best line = line + edge_normals[best_width_id] * best_width / 2 return (line, N)
def _find_longest_flat_free_ends(points, threshold): """Returns `line, N` such that each of the first N points are no further than threshold from the line.""" assert len(points) > 1 N = 2 line = points[:N].copy() while N < len(points): p = points[N:N + 1] try: hull.add_points(p) except NameError: if points_are_on_line(points[:N + 1], line, threshold): N += 1 if len(np.append(line, p, axis=0)) == 1: line = np.append(line, p, axis=0)[:1].repeat(2, axis=0) points = np.append(line, p, axis=0) - np.append(line, p, axis=0)[0] for i in range(1, len(points)): l = points[i] if np.any(l != 0): break if np.all(l == 0): line = np.append(line, p, axis=0)[:2] disps = points.dot(l) line = np.append(line, p, axis=0)[[disps.argmin(), disps.argmax()]] continue else: hull = ConvexHull(points[:N + 1], incremental=True) vertices = points[hull.vertices] edge_directions = np.roll(vertices, -1, axis=0) - vertices edge_directions = edge_directions / np.linalg.norm(edge_directions, axis=-1, keepdims=True) edge_normals = np.roll(edge_directions, 1, axis=-1) edge_normals[..., 0] *= -1 widths = np.einsum('ijk,ik->ij', vertices[None] - vertices[:, None], edge_normals).max(axis=-1) best_width_id = widths.argmin() best_width = widths[best_width_id] if best_width > threshold * 2: break N += 1 p0_best = vertices[best_width_id] l_best = edge_directions[best_width_id] projections = (vertices - p0_best).dot(l_best) line[0] = p0_best + projections.min() * l_best line[1] = p0_best + projections.max() * l_best line = line + edge_normals[best_width_id] * best_width / 2 return (line, N)
Deep-Vectorization-of-Technical-Drawings
positive
def ltl_generic(self, prop, k, k_min=0): lemmas = self.hts.lemmas self._init_at_time(self.hts.vars, k) <DeepExtract> self.vars_time = [] for t in range(k + 1): vars_at_t = [] for v in self.hts.vars: vars_at_t.append((v, TS.get_timed_name(v, t))) self.vars_time.append((t, dict(vars_at_t))) self.vars_time = dict(self.vars_time) </DeepExtract> <DeepExtract> if lemmas is not None: (self.hts, res) = self.add_lemmas(self.hts, prop, lemmas) if res: Logger.log('Lemmas imply the property', 1) Logger.log('', 0, not Logger.level(1)) (t, model) = (0, True) self.hts.reset_formulae() (t, model) = self.solve_inc(self.hts, prop, k) </DeepExtract> if model == True: return (VerificationStatus.TRUE, None, t) elif model is not None: model = self._remap_model(self.hts.vars, model, t) trace = self.generate_trace(model, t, get_free_variables(prop), find_loop=True) return (VerificationStatus.FALSE, trace, t) else: return (VerificationStatus.UNK, None, t)
def ltl_generic(self, prop, k, k_min=0): lemmas = self.hts.lemmas self._init_at_time(self.hts.vars, k) self.vars_time = [] for t in range(k + 1): vars_at_t = [] for v in self.hts.vars: vars_at_t.append((v, TS.get_timed_name(v, t))) self.vars_time.append((t, dict(vars_at_t))) self.vars_time = dict(self.vars_time) if lemmas is not None: (self.hts, res) = self.add_lemmas(self.hts, prop, lemmas) if res: Logger.log('Lemmas imply the property', 1) Logger.log('', 0, not Logger.level(1)) (t, model) = (0, True) self.hts.reset_formulae() (t, model) = self.solve_inc(self.hts, prop, k) if model == True: return (VerificationStatus.TRUE, None, t) elif model is not None: model = self._remap_model(self.hts.vars, model, t) trace = self.generate_trace(model, t, get_free_variables(prop), find_loop=True) return (VerificationStatus.FALSE, trace, t) else: return (VerificationStatus.UNK, None, t)
CoSA
positive
def _validate_link_reconstruction(self, samples, lbs): <DeepExtract> feat = [] emb_cache = utils.KeyDefaultDict(lambda x: self.embeddings_at(x)) for s in samples: feat.append(emb_cache[s[0]][s[1:]]) feat = np.array(feat) if gconf.debug: print('features shape: {}'.format(feat.shape)) feat = feat </DeepExtract> feat = np.abs(feat[:, 0] - feat[:, 1]) clf = LogisticRegression() try: cv = StratifiedKFold(lbs, n_folds=2, shuffle=True) parts = cv except TypeError: cv = StratifiedKFold(n_splits=2, shuffle=True) parts = cv.split(feat, lbs) val_score = [] for (tr, te) in parts: model = clf.fit(feat[tr], lbs[tr]) p = model.predict(feat[te]) val_score.append(f1_score(lbs[te], p)) return np.mean(val_score)
def _validate_link_reconstruction(self, samples, lbs): feat = [] emb_cache = utils.KeyDefaultDict(lambda x: self.embeddings_at(x)) for s in samples: feat.append(emb_cache[s[0]][s[1:]]) feat = np.array(feat) if gconf.debug: print('features shape: {}'.format(feat.shape)) feat = feat feat = np.abs(feat[:, 0] - feat[:, 1]) clf = LogisticRegression() try: cv = StratifiedKFold(lbs, n_folds=2, shuffle=True) parts = cv except TypeError: cv = StratifiedKFold(n_splits=2, shuffle=True) parts = cv.split(feat, lbs) val_score = [] for (tr, te) in parts: model = clf.fit(feat[tr], lbs[tr]) p = model.predict(feat[te]) val_score.append(f1_score(lbs[te], p)) return np.mean(val_score)
DynamicGEM
positive
def create_default_context(): """Creates a ExecutionContext with default operations.""" context = OperationContext() for modname in _default_op_modules: <DeepExtract> mod = __import__(modname) path = [] for token in modname.split('.')[1:]: path.append(token) try: mod = getattr(mod, token) except AttributeError: raise BubblesError('Unable to get %s' % (path,)) mod = mod </DeepExtract> context.add_operations_from(mod) return context
def create_default_context(): """Creates a ExecutionContext with default operations.""" context = OperationContext() for modname in _default_op_modules: mod = __import__(modname) path = [] for token in modname.split('.')[1:]: path.append(token) try: mod = getattr(mod, token) except AttributeError: raise BubblesError('Unable to get %s' % (path,)) mod = mod context.add_operations_from(mod) return context
bubbles
positive
def test_pcc_10207(): <DeepExtract> filepath = os.path.join(RS3TREE_DIR, 'maz-10207-excerpt.rs3') produced = RSTTree(filepath, debug=debug) </DeepExtract> prep_2_3 = ('preparation', [s(['2']), n(['3'])]) inter_2_4 = ('interpretation', [s([prep_2_3]), n(['4'])]) inter_2_5 = ('interpretation', [n([inter_2_4]), s(['5'])]) inter_2_6 = ('interpretation', [n([inter_2_5]), s(['6'])]) elab_7_8 = ('e-elaboration', [n(['7']), s(['8'])]) list_9_11 = ('list', [n(['9']), n(['10']), n(['11'])]) concession_7_11 = ('concession', [s([elab_7_8]), n([list_9_11])]) concession_14_15 = ('concession', [s(['14']), n(['15'])]) inter_16_17 = ('interpretation', [n(['16']), s(['17'])]) joint_14_17 = ('joint', [n([concession_14_15]), n([inter_16_17])]) inter_13_17 = ('interpretation', [n(['13']), s([joint_14_17])]) justify_12_17 = ('justify', [s(['12']), n([inter_13_17])]) list_7_17 = ('list', [n([concession_7_11]), n([justify_12_17])]) back_2_17 = ('background', [s([inter_2_6]), n([list_7_17])]) inter_2_18 = ('interpretation', [n([back_2_17]), s(['18'])]) expected = t(VIRTUAL_ROOT, [n(['1']), n([inter_2_18])]) assert produced.edu_strings == produced.tree.leaves() == ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18'] assert expected == produced.tree
def test_pcc_10207(): filepath = os.path.join(RS3TREE_DIR, 'maz-10207-excerpt.rs3') produced = RSTTree(filepath, debug=debug) prep_2_3 = ('preparation', [s(['2']), n(['3'])]) inter_2_4 = ('interpretation', [s([prep_2_3]), n(['4'])]) inter_2_5 = ('interpretation', [n([inter_2_4]), s(['5'])]) inter_2_6 = ('interpretation', [n([inter_2_5]), s(['6'])]) elab_7_8 = ('e-elaboration', [n(['7']), s(['8'])]) list_9_11 = ('list', [n(['9']), n(['10']), n(['11'])]) concession_7_11 = ('concession', [s([elab_7_8]), n([list_9_11])]) concession_14_15 = ('concession', [s(['14']), n(['15'])]) inter_16_17 = ('interpretation', [n(['16']), s(['17'])]) joint_14_17 = ('joint', [n([concession_14_15]), n([inter_16_17])]) inter_13_17 = ('interpretation', [n(['13']), s([joint_14_17])]) justify_12_17 = ('justify', [s(['12']), n([inter_13_17])]) list_7_17 = ('list', [n([concession_7_11]), n([justify_12_17])]) back_2_17 = ('background', [s([inter_2_6]), n([list_7_17])]) inter_2_18 = ('interpretation', [n([back_2_17]), s(['18'])]) expected = t(VIRTUAL_ROOT, [n(['1']), n([inter_2_18])]) assert produced.edu_strings == produced.tree.leaves() == ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18'] assert expected == produced.tree
discoursegraphs
positive
def append_flipped_images(self): num_images = self.num_images <DeepExtract> widths = [PIL.Image.open(self.image_path_at(i)).size[0] for i in range(self.num_images)] </DeepExtract> for i in range(num_images): boxes = self.roidb[i]['boxes'].copy() oldx1 = boxes[:, 0].copy() oldx2 = boxes[:, 2].copy() boxes[:, 0] = widths[i] - oldx2 - 1 boxes[:, 2] = widths[i] - oldx1 - 1 assert (boxes[:, 2] >= boxes[:, 0]).all() entry = {'boxes': boxes, 'gt_overlaps': self.roidb[i]['gt_overlaps'], 'gt_classes': self.roidb[i]['gt_classes'], 'flipped': True} self.roidb.append(entry) self._image_index = self._image_index * 2
def append_flipped_images(self): num_images = self.num_images widths = [PIL.Image.open(self.image_path_at(i)).size[0] for i in range(self.num_images)] for i in range(num_images): boxes = self.roidb[i]['boxes'].copy() oldx1 = boxes[:, 0].copy() oldx2 = boxes[:, 2].copy() boxes[:, 0] = widths[i] - oldx2 - 1 boxes[:, 2] = widths[i] - oldx1 - 1 assert (boxes[:, 2] >= boxes[:, 0]).all() entry = {'boxes': boxes, 'gt_overlaps': self.roidb[i]['gt_overlaps'], 'gt_classes': self.roidb[i]['gt_classes'], 'flipped': True} self.roidb.append(entry) self._image_index = self._image_index * 2
CenterNet
positive
def parse_sentinel_masters(response): result = {} for item in response: <DeepExtract> result = pairs_to_dict_typed(imap(nativestr, item), SENTINEL_STATE_TYPES) flags = set(result['flags'].split(',')) for (name, flag) in (('is_master', 'master'), ('is_slave', 'slave'), ('is_sdown', 's_down'), ('is_odown', 'o_down'), ('is_sentinel', 'sentinel'), ('is_disconnected', 'disconnected'), ('is_master_down', 'master_down')): result[name] = flag in flags state = result </DeepExtract> result[state['name']] = state return result
def parse_sentinel_masters(response): result = {} for item in response: result = pairs_to_dict_typed(imap(nativestr, item), SENTINEL_STATE_TYPES) flags = set(result['flags'].split(',')) for (name, flag) in (('is_master', 'master'), ('is_slave', 'slave'), ('is_sdown', 's_down'), ('is_odown', 'o_down'), ('is_sentinel', 'sentinel'), ('is_disconnected', 'disconnected'), ('is_master_down', 'master_down')): result[name] = flag in flags state = result result[state['name']] = state return result
AuditdPy
positive
def benchmark_logistic_regression(train_feats, train_targets, test_feats): t0 = time.time() <DeepExtract> Weights = af.constant(0, train_feats.dims()[1], train_targets.dims()[1]) for i in range(1000): (J, dJ) = cost(Weights, train_feats, train_targets, 1.0) err = af.max(af.abs(J)) if err < 0.01: print('Iteration {0:4d} Err: {1:4f}'.format(i + 1, err)) print('Training converged') Weights = Weights if verbose and (i + 1) % 10 == 0: print('Iteration {0:4d} Err: {1:4f}'.format(i + 1, err)) Weights = Weights - 0.1 * dJ if verbose: print('Training stopped after {0:d} iterations'.format(1000)) Weights = Weights </DeepExtract> af.eval(Weights) sync() t1 = time.time() dt = t1 - t0 print('Training time: {0:4.4f} s'.format(dt)) t0 = time.time() iters = 100 for i in range(iters): <DeepExtract> Z = af.matmul(test_feats, Weights) test_outputs = af.sigmoid(Z) </DeepExtract> af.eval(test_outputs) sync() t1 = time.time() dt = t1 - t0 print('Prediction time: {0:4.4f} s'.format(dt / iters))
def benchmark_logistic_regression(train_feats, train_targets, test_feats): t0 = time.time() Weights = af.constant(0, train_feats.dims()[1], train_targets.dims()[1]) for i in range(1000): (J, dJ) = cost(Weights, train_feats, train_targets, 1.0) err = af.max(af.abs(J)) if err < 0.01: print('Iteration {0:4d} Err: {1:4f}'.format(i + 1, err)) print('Training converged') Weights = Weights if verbose and (i + 1) % 10 == 0: print('Iteration {0:4d} Err: {1:4f}'.format(i + 1, err)) Weights = Weights - 0.1 * dJ if verbose: print('Training stopped after {0:d} iterations'.format(1000)) Weights = Weights af.eval(Weights) sync() t1 = time.time() dt = t1 - t0 print('Training time: {0:4.4f} s'.format(dt)) t0 = time.time() iters = 100 for i in range(iters): Z = af.matmul(test_feats, Weights) test_outputs = af.sigmoid(Z) af.eval(test_outputs) sync() t1 = time.time() dt = t1 - t0 print('Prediction time: {0:4.4f} s'.format(dt / iters))
arrayfire-python
positive
def stop_server(self): if self._server_started: if self._ready_to_serve and self._serve_in_loop: self._httpd.shutdown() self._serve_in_loop = False elif self._ready_to_serve: urllib2.urlopen(self.get_url()).read() self._ready_to_serve = False self._httpd.server_close() self._server_started = False <DeepExtract> try: os.remove(FakeServer.RESPONSE_FILE) os.remove(FakeServer.RESPONSE_CODE_FILE) os.remove(FakeServer.REQUEST_FILE) except: pass </DeepExtract> else: raise self.ServerStateException({'message': 'The server is already stopped.', 'state': 'off'})
def stop_server(self): if self._server_started: if self._ready_to_serve and self._serve_in_loop: self._httpd.shutdown() self._serve_in_loop = False elif self._ready_to_serve: urllib2.urlopen(self.get_url()).read() self._ready_to_serve = False self._httpd.server_close() self._server_started = False try: os.remove(FakeServer.RESPONSE_FILE) os.remove(FakeServer.RESPONSE_CODE_FILE) os.remove(FakeServer.REQUEST_FILE) except: pass else: raise self.ServerStateException({'message': 'The server is already stopped.', 'state': 'off'})
collectd-cloudwatch
positive
def orient_graph(self, df_data, graph, printout=None, **kwargs): """Orient an undirected graph using Jarfo, function modified for optimization. Args: df_data (pandas.DataFrame): Data umg (networkx.Graph): Graph to orient nruns (int): number of times to rerun for each pair (bootstrap) printout (str): (optional) Path to file where to save temporary results Returns: networkx.DiGraph: a directed graph, which might contain cycles .. warning: Requirement : Name of the nodes in the graph correspond to name of the variables in df_data """ if type(graph) == nx.DiGraph: edges = [a for a in list(graph.edges()) if (a[1], a[0]) in list(graph.edges())] oriented_edges = [a for a in list(graph.edges()) if (a[1], a[0]) not in list(graph.edges())] for a in edges: if (a[1], a[0]) in list(graph.edges()): edges.remove(a) output = nx.DiGraph() for i in oriented_edges: output.add_edge(*i) elif type(graph) == nx.Graph: edges = list(graph.edges()) output = nx.DiGraph() else: raise TypeError('Data type not understood.') res = [] df_task = DataFrame() for (idx, (a, b)) in enumerate(edges): df_task = df_task.append({'A': df_data[a].values.reshape((-1, 1)), 'B': df_data[b].values.reshape((-1, 1))}, ignore_index=True) <DeepExtract> def predict(df, model): df_task.columns = ['A', 'B'] df2 = model.extract(df_task) weights = model.predict(df2) if len(list(df_task.columns)) == 2: df_task.columns = ['A', 'B'] if self.model is None: raise AssertionError('Model has not been trained before predictions') df2 = DataFrame() for (idx, row) in df_task.iterrows(): df2 = df2.append(row, ignore_index=True) df2 = df2.append({'A': row['B'], 'B': row['A']}, ignore_index=True) weights = predict(deepcopy(df2), deepcopy(self.model))[::2] </DeepExtract> for (weight, (a, b)) in zip(weights, edges): if weight > 0: output.add_edge(a, b, weight=weight) else: output.add_edge(b, a, weight=abs(weight)) if printout is not None: res.append([str(a) + '-' + str(b), weight]) DataFrame(res, columns=['SampleID', 'Predictions']).to_csv(printout, index=False) for node in list(df_data.columns.values): if node not in output.nodes(): output.add_node(node) return output
def orient_graph(self, df_data, graph, printout=None, **kwargs): """Orient an undirected graph using Jarfo, function modified for optimization. Args: df_data (pandas.DataFrame): Data umg (networkx.Graph): Graph to orient nruns (int): number of times to rerun for each pair (bootstrap) printout (str): (optional) Path to file where to save temporary results Returns: networkx.DiGraph: a directed graph, which might contain cycles .. warning: Requirement : Name of the nodes in the graph correspond to name of the variables in df_data """ if type(graph) == nx.DiGraph: edges = [a for a in list(graph.edges()) if (a[1], a[0]) in list(graph.edges())] oriented_edges = [a for a in list(graph.edges()) if (a[1], a[0]) not in list(graph.edges())] for a in edges: if (a[1], a[0]) in list(graph.edges()): edges.remove(a) output = nx.DiGraph() for i in oriented_edges: output.add_edge(*i) elif type(graph) == nx.Graph: edges = list(graph.edges()) output = nx.DiGraph() else: raise TypeError('Data type not understood.') res = [] df_task = DataFrame() for (idx, (a, b)) in enumerate(edges): df_task = df_task.append({'A': df_data[a].values.reshape((-1, 1)), 'B': df_data[b].values.reshape((-1, 1))}, ignore_index=True) def predict(df, model): df_task.columns = ['A', 'B'] df2 = model.extract(df_task) weights = model.predict(df2) if len(list(df_task.columns)) == 2: df_task.columns = ['A', 'B'] if self.model is None: raise AssertionError('Model has not been trained before predictions') df2 = DataFrame() for (idx, row) in df_task.iterrows(): df2 = df2.append(row, ignore_index=True) df2 = df2.append({'A': row['B'], 'B': row['A']}, ignore_index=True) weights = predict(deepcopy(df2), deepcopy(self.model))[::2] for (weight, (a, b)) in zip(weights, edges): if weight > 0: output.add_edge(a, b, weight=weight) else: output.add_edge(b, a, weight=abs(weight)) if printout is not None: res.append([str(a) + '-' + str(b), weight]) DataFrame(res, columns=['SampleID', 'Predictions']).to_csv(printout, index=False) for node in list(df_data.columns.values): if node not in output.nodes(): output.add_node(node) return output
CausalDiscoveryToolbox
positive
def check_valid(self, obj: JsonElement, **kwargs: bool) -> ValidationResult: if obj is None: return None elif should_skip_env_var_str(obj, kwargs): return None elif isinstance(obj, (int, float)): return self.valid_fn(obj) <DeepExtract> </DeepExtract> if coerced is not None: self.valid_fn(coerced) return coerced else: raise AttributeError(f'Expected type {self.runtime_kind} but got {type(obj).__name__}')
def check_valid(self, obj: JsonElement, **kwargs: bool) -> ValidationResult: if obj is None: return None elif should_skip_env_var_str(obj, kwargs): return None elif isinstance(obj, (int, float)): return self.valid_fn(obj) if coerced is not None: self.valid_fn(coerced) return coerced else: raise AttributeError(f'Expected type {self.runtime_kind} but got {type(obj).__name__}')
cloudkeeper
positive
@classmethod def Create(cls, os_path, tree='SOURCE_ROOT', ignore_unknown_type=False): if tree not in cls.trees: <DeepExtract> if IS_DEBUG: print('Not a valid sourceTree type: %s' % tree) </DeepExtract> return None fr = cls() fr.id = cls.GenerateId() fr['path'] = os_path fr['name'] = os.path.split(os_path)[1] fr['sourceTree'] = '<absolute>' if os.path.isabs(os_path) else tree fr.guess_file_type(ignore_unknown_type=ignore_unknown_type) return fr
@classmethod def Create(cls, os_path, tree='SOURCE_ROOT', ignore_unknown_type=False): if tree not in cls.trees: if IS_DEBUG: print('Not a valid sourceTree type: %s' % tree) return None fr = cls() fr.id = cls.GenerateId() fr['path'] = os_path fr['name'] = os.path.split(os_path)[1] fr['sourceTree'] = '<absolute>' if os.path.isabs(os_path) else tree fr.guess_file_type(ignore_unknown_type=ignore_unknown_type) return fr
cocos2d-console
positive
def __init__(self): <DeepExtract> emulator_binaries = dict() emulator_dir = clean_path(add_ending_slash(str(GlobalConfig.SDK_DIR)) + 'emulator/') try: for the_file in list_files_in_dir(emulator_dir): file_path = os.path.join(emulator_dir, the_file) if os.path.isfile(file_path) and 'emulator' in file_path: binary_name = re.findall('emulator\\/(emulator*.+)', file_path) if binary_name: emulator_binaries[str(binary_name[0])] = file_path finally: if len(emulator_binaries) == 0: message = "Unable to find emulator binary files in direction '{}' of Android SDK." message = message.format(str(emulator_dir)) raise LauncherFlowInterruptedException(self.TAG, message) else: Printer.system_message(self.TAG, 'Emulator related binary files found in Android SDK:') for path in emulator_binaries.values(): Printer.system_message(self.TAG, ' * ' + Color.GREEN + path + Color.BLUE) self.emulator_bin_dict = emulator_binaries </DeepExtract> self.emulator_command_assembler = EmulatorCommandAssembler()
def __init__(self): emulator_binaries = dict() emulator_dir = clean_path(add_ending_slash(str(GlobalConfig.SDK_DIR)) + 'emulator/') try: for the_file in list_files_in_dir(emulator_dir): file_path = os.path.join(emulator_dir, the_file) if os.path.isfile(file_path) and 'emulator' in file_path: binary_name = re.findall('emulator\\/(emulator*.+)', file_path) if binary_name: emulator_binaries[str(binary_name[0])] = file_path finally: if len(emulator_binaries) == 0: message = "Unable to find emulator binary files in direction '{}' of Android SDK." message = message.format(str(emulator_dir)) raise LauncherFlowInterruptedException(self.TAG, message) else: Printer.system_message(self.TAG, 'Emulator related binary files found in Android SDK:') for path in emulator_binaries.values(): Printer.system_message(self.TAG, ' * ' + Color.GREEN + path + Color.BLUE) self.emulator_bin_dict = emulator_binaries self.emulator_command_assembler = EmulatorCommandAssembler()
AutomationTestSupervisor
positive
@patch('backend.lambdas.jobs.handlers.table') def test_it_lists_jobs_events(self, table): <DeepExtract> now = round(datetime.datetime.utcnow().timestamp()) if not sk: sk = '{}#{}'.format(str(now), '12345') stub = {'Id': job_id, 'Sk': sk, 'Type': 'JobEvent', 'CreatedAt': now, 'EventName': 'QuerySucceeded', **kwargs} </DeepExtract> table.get_item.return_value = {'Item': job_stub()} table.query.return_value = {'Items': [stub]} response = handlers.list_job_events_handler({'queryStringParameters': None, 'pathParameters': {'job_id': 'test'}, 'multiValueQueryStringParameters': None}, SimpleNamespace()) resp_body = json.loads(response['body']) assert 200 == response['statusCode'] assert 1 == len(resp_body['JobEvents']) assert stub == resp_body['JobEvents'][0]
@patch('backend.lambdas.jobs.handlers.table') def test_it_lists_jobs_events(self, table): now = round(datetime.datetime.utcnow().timestamp()) if not sk: sk = '{}#{}'.format(str(now), '12345') stub = {'Id': job_id, 'Sk': sk, 'Type': 'JobEvent', 'CreatedAt': now, 'EventName': 'QuerySucceeded', **kwargs} table.get_item.return_value = {'Item': job_stub()} table.query.return_value = {'Items': [stub]} response = handlers.list_job_events_handler({'queryStringParameters': None, 'pathParameters': {'job_id': 'test'}, 'multiValueQueryStringParameters': None}, SimpleNamespace()) resp_body = json.loads(response['body']) assert 200 == response['statusCode'] assert 1 == len(resp_body['JobEvents']) assert stub == resp_body['JobEvents'][0]
amazon-s3-find-and-forget
positive
def create_kwargs(cls: Type[T], params: Params, **extras) -> Dict[str, Any]: """ Given some class, a `Params` object, and potentially other keyword arguments, create a dict of keyword args suitable for passing to the class's constructor. The function does this by finding the class's constructor, matching the constructor arguments to entries in the `params` object, and instantiating values for the parameters using the type annotation and possibly a from_params method. Any values that are provided in the `extras` will just be used as is. For instance, you might provide an existing `Vocabulary` this way. """ signature = inspect.signature(cls.__init__) kwargs: Dict[str, Any] = {} for (name, param) in signature.parameters.items(): if name == 'self': continue <DeepExtract> origin = getattr(param.annotation, '__origin__', None) args = getattr(param.annotation, '__args__', ()) if origin == Union and len(args) == 2 and (args[1] == type(None)): param.annotation = args[0] else: param.annotation = param.annotation </DeepExtract> origin = getattr(annotation, '__origin__', None) args = getattr(annotation, '__args__', []) default = param.default optional = default != _NO_DEFAULT if name in extras: kwargs[name] = extras[name] elif hasattr(annotation, 'from_params'): if name in params: subparams = params.pop(name) if takes_arg(annotation.from_params, 'extras'): subextras = extras else: subextras = {k: v for (k, v) in extras.items() if takes_arg(annotation.from_params, k)} if isinstance(subparams, str): kwargs[name] = annotation.by_name(subparams)() else: kwargs[name] = annotation.from_params(params=subparams, **subextras) elif not optional: raise ConfigurationError(f'expected key {name} for {cls.__name__}') else: kwargs[name] = default elif annotation == str: kwargs[name] = params.pop(name, default) if optional else params.pop(name) elif annotation == int: kwargs[name] = params.pop_int(name, default) if optional else params.pop_int(name) elif annotation == bool: kwargs[name] = params.pop_bool(name, default) if optional else params.pop_bool(name) elif annotation == float: kwargs[name] = params.pop_float(name, default) if optional else params.pop_float(name) elif origin in (Dict, dict) and len(args) == 2 and hasattr(args[-1], 'from_params'): value_cls = annotation.__args__[-1] value_dict = {} for (key, value_params) in params.pop(name, Params({})).items(): value_dict[key] = value_cls.from_params(params=value_params, **extras) kwargs[name] = value_dict elif optional: kwargs[name] = params.pop(name, default) else: kwargs[name] = params.pop(name) params.assert_empty(cls.__name__) return kwargs
def create_kwargs(cls: Type[T], params: Params, **extras) -> Dict[str, Any]: """ Given some class, a `Params` object, and potentially other keyword arguments, create a dict of keyword args suitable for passing to the class's constructor. The function does this by finding the class's constructor, matching the constructor arguments to entries in the `params` object, and instantiating values for the parameters using the type annotation and possibly a from_params method. Any values that are provided in the `extras` will just be used as is. For instance, you might provide an existing `Vocabulary` this way. """ signature = inspect.signature(cls.__init__) kwargs: Dict[str, Any] = {} for (name, param) in signature.parameters.items(): if name == 'self': continue origin = getattr(param.annotation, '__origin__', None) args = getattr(param.annotation, '__args__', ()) if origin == Union and len(args) == 2 and (args[1] == type(None)): param.annotation = args[0] else: param.annotation = param.annotation origin = getattr(annotation, '__origin__', None) args = getattr(annotation, '__args__', []) default = param.default optional = default != _NO_DEFAULT if name in extras: kwargs[name] = extras[name] elif hasattr(annotation, 'from_params'): if name in params: subparams = params.pop(name) if takes_arg(annotation.from_params, 'extras'): subextras = extras else: subextras = {k: v for (k, v) in extras.items() if takes_arg(annotation.from_params, k)} if isinstance(subparams, str): kwargs[name] = annotation.by_name(subparams)() else: kwargs[name] = annotation.from_params(params=subparams, **subextras) elif not optional: raise ConfigurationError(f'expected key {name} for {cls.__name__}') else: kwargs[name] = default elif annotation == str: kwargs[name] = params.pop(name, default) if optional else params.pop(name) elif annotation == int: kwargs[name] = params.pop_int(name, default) if optional else params.pop_int(name) elif annotation == bool: kwargs[name] = params.pop_bool(name, default) if optional else params.pop_bool(name) elif annotation == float: kwargs[name] = params.pop_float(name, default) if optional else params.pop_float(name) elif origin in (Dict, dict) and len(args) == 2 and hasattr(args[-1], 'from_params'): value_cls = annotation.__args__[-1] value_dict = {} for (key, value_params) in params.pop(name, Params({})).items(): value_dict[key] = value_cls.from_params(params=value_params, **extras) kwargs[name] = value_dict elif optional: kwargs[name] = params.pop(name, default) else: kwargs[name] = params.pop(name) params.assert_empty(cls.__name__) return kwargs
ACE
positive
def test_unconfirmed_and_non_collaborative(self): <DeepExtract> self.project = prepare_project(DOCUMENT_CLASSIFICATION, collaborative_annotation=collaborative) self.example1 = mommy.make('ExportedExample', project=self.project.item, text='example1') self.example2 = mommy.make('ExportedExample', project=self.project.item, text='example2') self.category1 = mommy.make('ExportedCategory', example=self.example1, user=self.project.admin) self.category2 = mommy.make('ExportedCategory', example=self.example1, user=self.project.annotator) self.comment1 = mommy.make('ExportedComment', example=self.example1, user=self.project.admin) self.comment2 = mommy.make('ExportedComment', example=self.example1, user=self.project.annotator) mommy.make('ExampleState', example=self.example1, confirmed_by=self.project.admin) self.data1 = self.data_to_text(self.example1) self.data2 = self.data_to_text(self.example2) </DeepExtract> <DeepExtract> file = export_dataset(self.project.id, 'JSONL', confirmed_only) if self.project.item.collaborative_annotation: dataset = pd.read_json(file, lines=True).to_dict(orient='records') else: dataset = read_zip_content(file) os.remove(file) datasets = dataset </DeepExtract> expected_datasets = {self.project.admin.username: [{**self.data1, 'label': [self.category1.to_string()], 'Comments': [self.comment1.to_string()]}, {**self.data2, 'label': [], 'Comments': []}], self.project.approver.username: [{**self.data1, 'label': [], 'Comments': []}, {**self.data2, 'label': [], 'Comments': []}], self.project.annotator.username: [{**self.data1, 'label': [self.category2.to_string()], 'Comments': [self.comment2.to_string()]}, {**self.data2, 'label': [], 'Comments': []}]} for (username, dataset) in expected_datasets.items(): self.assertEqual(datasets[username], dataset)
def test_unconfirmed_and_non_collaborative(self): self.project = prepare_project(DOCUMENT_CLASSIFICATION, collaborative_annotation=collaborative) self.example1 = mommy.make('ExportedExample', project=self.project.item, text='example1') self.example2 = mommy.make('ExportedExample', project=self.project.item, text='example2') self.category1 = mommy.make('ExportedCategory', example=self.example1, user=self.project.admin) self.category2 = mommy.make('ExportedCategory', example=self.example1, user=self.project.annotator) self.comment1 = mommy.make('ExportedComment', example=self.example1, user=self.project.admin) self.comment2 = mommy.make('ExportedComment', example=self.example1, user=self.project.annotator) mommy.make('ExampleState', example=self.example1, confirmed_by=self.project.admin) self.data1 = self.data_to_text(self.example1) self.data2 = self.data_to_text(self.example2) file = export_dataset(self.project.id, 'JSONL', confirmed_only) if self.project.item.collaborative_annotation: dataset = pd.read_json(file, lines=True).to_dict(orient='records') else: dataset = read_zip_content(file) os.remove(file) datasets = dataset expected_datasets = {self.project.admin.username: [{**self.data1, 'label': [self.category1.to_string()], 'Comments': [self.comment1.to_string()]}, {**self.data2, 'label': [], 'Comments': []}], self.project.approver.username: [{**self.data1, 'label': [], 'Comments': []}, {**self.data2, 'label': [], 'Comments': []}], self.project.annotator.username: [{**self.data1, 'label': [self.category2.to_string()], 'Comments': [self.comment2.to_string()]}, {**self.data2, 'label': [], 'Comments': []}]} for (username, dataset) in expected_datasets.items(): self.assertEqual(datasets[username], dataset)
doccano
positive
def path_condition(self, n, path): """Reaction condition recommendation for a multiple-reaction path. Args: n (int): Number of conditions to recommend for each step. path (list of str): Reaction SMILES for each step. Returns: list: Reaction context with n options for each step. """ rxn_fps = [] for rxn in path: rxn_fps.append(fp.create_rxn_Morgan2FP(rxn, fpsize=256, useFeatures=True)) rxn_fps = np.array(rxn_fps) (dists, ids) = self.nnModel.kneighbors(rxn_fps) contexts = [] for (i, dist) in enumerate(dists): context = [] try: <DeepExtract> if n > int(ids[i].shape[0]): print('More rxn condition options requested than the number of NN, n is set to {}'.format(ids[i].shape[1])) if dist[0] > self.dist_limit: print('No neighbor is found within a cosine distance of {}'.format(self.dist_limit)) contexts = [] for (i, rid) in enumerate(ids[i]): if i >= n: break context = self.reaction_condition(self.rxn_ids[rid]) for c in context: if not c == []: contexts.append(c) if len(contexts) >= self.max_total_context: contexts = contexts[:self.max_total_context] break context = contexts </DeepExtract> contexts.append(context) except Exception as e: print('Step {} with an exception: {}'.format(i, e)) return contexts
def path_condition(self, n, path): """Reaction condition recommendation for a multiple-reaction path. Args: n (int): Number of conditions to recommend for each step. path (list of str): Reaction SMILES for each step. Returns: list: Reaction context with n options for each step. """ rxn_fps = [] for rxn in path: rxn_fps.append(fp.create_rxn_Morgan2FP(rxn, fpsize=256, useFeatures=True)) rxn_fps = np.array(rxn_fps) (dists, ids) = self.nnModel.kneighbors(rxn_fps) contexts = [] for (i, dist) in enumerate(dists): context = [] try: if n > int(ids[i].shape[0]): print('More rxn condition options requested than the number of NN, n is set to {}'.format(ids[i].shape[1])) if dist[0] > self.dist_limit: print('No neighbor is found within a cosine distance of {}'.format(self.dist_limit)) contexts = [] for (i, rid) in enumerate(ids[i]): if i >= n: break context = self.reaction_condition(self.rxn_ids[rid]) for c in context: if not c == []: contexts.append(c) if len(contexts) >= self.max_total_context: contexts = contexts[:self.max_total_context] break context = contexts contexts.append(context) except Exception as e: print('Step {} with an exception: {}'.format(i, e)) return contexts
ASKCOS
positive
def update(self, using=None, index=None, detect_noop=True, doc_as_upsert=False, refresh=False, retry_on_conflict=None, script=None, script_id=None, scripted_upsert=False, upsert=None, return_doc_meta=False, **fields): """ Partial update of the document, specify fields you wish to update and both the instance and the document in elasticsearch will be updated:: doc = MyDocument(title='Document Title!') doc.save() doc.update(title='New Document Title!') :arg index: elasticsearch index to use, if the ``Document`` is associated with an index this can be omitted. :arg using: connection alias to use, defaults to ``'default'`` :arg detect_noop: Set to ``False`` to disable noop detection. :arg refresh: Control when the changes made by this request are visible to search. Set to ``True`` for immediate effect. :arg retry_on_conflict: In between the get and indexing phases of the update, it is possible that another process might have already updated the same document. By default, the update will fail with a version conflict exception. The retry_on_conflict parameter controls how many times to retry the update before finally throwing an exception. :arg doc_as_upsert: Instead of sending a partial doc plus an upsert doc, setting doc_as_upsert to true will use the contents of doc as the upsert value :arg return_doc_meta: set to ``True`` to return all metadata from the index API call instead of only the operation result :return operation result noop/updated """ body = {'doc_as_upsert': doc_as_upsert, 'detect_noop': detect_noop} if script or script_id: if upsert is not None: body['upsert'] = upsert if script: script = {'source': script} else: script = {'id': script_id} script['params'] = fields body['script'] = script body['scripted_upsert'] = scripted_upsert else: if not fields: raise IllegalOperation('You cannot call update() without updating individual fields or a script. If you wish to update the entire object use save().') merge(self, fields) <DeepExtract> d = super().to_dict(skip_empty=skip_empty) if not include_meta: values = d meta = {'_' + k: self.meta[k] for k in DOC_META_FIELDS if k in self.meta} index = self._get_index(required=False) if index is not None: meta['_index'] = index meta['_source'] = d values = meta </DeepExtract> body['doc'] = {k: values.get(k) for k in fields.keys()} doc_meta = {k: self.meta[k] for k in DOC_META_FIELDS if k in self.meta} if retry_on_conflict is not None: doc_meta['retry_on_conflict'] = retry_on_conflict if retry_on_conflict in (None, 0) and 'seq_no' in self.meta and ('primary_term' in self.meta): doc_meta['if_seq_no'] = self.meta['seq_no'] doc_meta['if_primary_term'] = self.meta['primary_term'] meta = self._get_connection(using).update(index=self._get_index(index), body=body, refresh=refresh, **doc_meta) for k in META_FIELDS: if '_' + k in meta: setattr(self.meta, k, meta['_' + k]) return meta if return_doc_meta else meta['result']
def update(self, using=None, index=None, detect_noop=True, doc_as_upsert=False, refresh=False, retry_on_conflict=None, script=None, script_id=None, scripted_upsert=False, upsert=None, return_doc_meta=False, **fields): """ Partial update of the document, specify fields you wish to update and both the instance and the document in elasticsearch will be updated:: doc = MyDocument(title='Document Title!') doc.save() doc.update(title='New Document Title!') :arg index: elasticsearch index to use, if the ``Document`` is associated with an index this can be omitted. :arg using: connection alias to use, defaults to ``'default'`` :arg detect_noop: Set to ``False`` to disable noop detection. :arg refresh: Control when the changes made by this request are visible to search. Set to ``True`` for immediate effect. :arg retry_on_conflict: In between the get and indexing phases of the update, it is possible that another process might have already updated the same document. By default, the update will fail with a version conflict exception. The retry_on_conflict parameter controls how many times to retry the update before finally throwing an exception. :arg doc_as_upsert: Instead of sending a partial doc plus an upsert doc, setting doc_as_upsert to true will use the contents of doc as the upsert value :arg return_doc_meta: set to ``True`` to return all metadata from the index API call instead of only the operation result :return operation result noop/updated """ body = {'doc_as_upsert': doc_as_upsert, 'detect_noop': detect_noop} if script or script_id: if upsert is not None: body['upsert'] = upsert if script: script = {'source': script} else: script = {'id': script_id} script['params'] = fields body['script'] = script body['scripted_upsert'] = scripted_upsert else: if not fields: raise IllegalOperation('You cannot call update() without updating individual fields or a script. If you wish to update the entire object use save().') merge(self, fields) d = super().to_dict(skip_empty=skip_empty) if not include_meta: values = d meta = {'_' + k: self.meta[k] for k in DOC_META_FIELDS if k in self.meta} index = self._get_index(required=False) if index is not None: meta['_index'] = index meta['_source'] = d values = meta body['doc'] = {k: values.get(k) for k in fields.keys()} doc_meta = {k: self.meta[k] for k in DOC_META_FIELDS if k in self.meta} if retry_on_conflict is not None: doc_meta['retry_on_conflict'] = retry_on_conflict if retry_on_conflict in (None, 0) and 'seq_no' in self.meta and ('primary_term' in self.meta): doc_meta['if_seq_no'] = self.meta['seq_no'] doc_meta['if_primary_term'] = self.meta['primary_term'] meta = self._get_connection(using).update(index=self._get_index(index), body=body, refresh=refresh, **doc_meta) for k in META_FIELDS: if '_' + k in meta: setattr(self.meta, k, meta['_' + k]) return meta if return_doc_meta else meta['result']
elasticsearch-dsl-py
positive
def conv1d(inputs, num_output_channels, kernel_size, scope, stride=1, padding='SAME', data_format='NHWC', use_xavier=True, stddev=0.001, weight_decay=None, activation_fn=tf.nn.relu, bn=False, bn_decay=None, is_training=None): """ 1D convolution with non-linear operation. Args: inputs: 3-D tensor variable BxLxC num_output_channels: int kernel_size: int scope: string stride: int padding: 'SAME' or 'VALID' data_format: 'NHWC' or 'NCHW' use_xavier: bool, use xavier_initializer if true stddev: float, stddev for truncated_normal init weight_decay: float activation_fn: function bn: bool, whether to use batch norm bn_decay: float or float tensor variable in [0,1] is_training: bool Tensor variable Returns: Variable tensor """ with tf.variable_scope(scope) as sc: assert data_format == 'NHWC' or data_format == 'NCHW' if data_format == 'NHWC': num_in_channels = inputs.get_shape()[-1].value elif data_format == 'NCHW': num_in_channels = inputs.get_shape()[1].value kernel_shape = [kernel_size, num_in_channels, num_output_channels] <DeepExtract> if use_xavier: initializer = tf.contrib.layers.xavier_initializer() else: initializer = tf.truncated_normal_initializer(stddev=stddev) var = _variable_on_cpu('weights', kernel_shape, initializer) if weight_decay is not None: weight_decay = tf.multiply(tf.nn.l2_loss(var), weight_decay, name='weight_loss') tf.add_to_collection('losses', weight_decay) kernel = var </DeepExtract> outputs = tf.nn.conv1d(inputs, kernel, stride=stride, padding=padding, data_format=data_format) <DeepExtract> with tf.device('/cpu:0'): dtype = tf.float16 if use_fp16 else tf.float32 var = tf.get_variable('biases', [num_output_channels], initializer=tf.constant_initializer(0.0), dtype=dtype) biases = var </DeepExtract> outputs = tf.nn.bias_add(outputs, biases, data_format=data_format) if bn: <DeepExtract> outputs = batch_norm_template(outputs, is_training, 'bn', [0, 1], bn_decay, data_format) </DeepExtract> if activation_fn is not None: outputs = activation_fn(outputs) return outputs
def conv1d(inputs, num_output_channels, kernel_size, scope, stride=1, padding='SAME', data_format='NHWC', use_xavier=True, stddev=0.001, weight_decay=None, activation_fn=tf.nn.relu, bn=False, bn_decay=None, is_training=None): """ 1D convolution with non-linear operation. Args: inputs: 3-D tensor variable BxLxC num_output_channels: int kernel_size: int scope: string stride: int padding: 'SAME' or 'VALID' data_format: 'NHWC' or 'NCHW' use_xavier: bool, use xavier_initializer if true stddev: float, stddev for truncated_normal init weight_decay: float activation_fn: function bn: bool, whether to use batch norm bn_decay: float or float tensor variable in [0,1] is_training: bool Tensor variable Returns: Variable tensor """ with tf.variable_scope(scope) as sc: assert data_format == 'NHWC' or data_format == 'NCHW' if data_format == 'NHWC': num_in_channels = inputs.get_shape()[-1].value elif data_format == 'NCHW': num_in_channels = inputs.get_shape()[1].value kernel_shape = [kernel_size, num_in_channels, num_output_channels] if use_xavier: initializer = tf.contrib.layers.xavier_initializer() else: initializer = tf.truncated_normal_initializer(stddev=stddev) var = _variable_on_cpu('weights', kernel_shape, initializer) if weight_decay is not None: weight_decay = tf.multiply(tf.nn.l2_loss(var), weight_decay, name='weight_loss') tf.add_to_collection('losses', weight_decay) kernel = var outputs = tf.nn.conv1d(inputs, kernel, stride=stride, padding=padding, data_format=data_format) with tf.device('/cpu:0'): dtype = tf.float16 if use_fp16 else tf.float32 var = tf.get_variable('biases', [num_output_channels], initializer=tf.constant_initializer(0.0), dtype=dtype) biases = var outputs = tf.nn.bias_add(outputs, biases, data_format=data_format) if bn: outputs = batch_norm_template(outputs, is_training, 'bn', [0, 1], bn_decay, data_format) if activation_fn is not None: outputs = activation_fn(outputs) return outputs
dfc2019
positive
def cumulative_sum(t): """Mutates t where each node's root becomes the sum of all entries in the corresponding subtree rooted at t. >>> t = Tree(1, [Tree(3, [Tree(5)]), Tree(7)]) >>> cumulative_sum(t) >>> t Tree(16, [Tree(8, [Tree(5)]), Tree(7)]) """ for b in t.branches: <DeepExtract> for b in b.branches: cumulative_sum(b) b.label += sum([b.label for b in b.branches]) </DeepExtract> t.label += sum([b.label for b in t.branches])
def cumulative_sum(t): """Mutates t where each node's root becomes the sum of all entries in the corresponding subtree rooted at t. >>> t = Tree(1, [Tree(3, [Tree(5)]), Tree(7)]) >>> cumulative_sum(t) >>> t Tree(16, [Tree(8, [Tree(5)]), Tree(7)]) """ for b in t.branches: for b in b.branches: cumulative_sum(b) b.label += sum([b.label for b in b.branches]) t.label += sum([b.label for b in t.branches])
cs61a-2018-spring
positive
def test_show_both_borders_for_items_then_disable_borders(self): <DeepExtract> msg = '{title:{fill}^{width}}'.format(title='simulate screen edges', fill='-', width=width - 2) print('{edge}{msg}{edge}'.format(edge='|', msg=msg)) </DeepExtract> sect = MenuItemsSection(MenuStyle(border_style=LightBorderStyle()), items=self.large_list) sect.show_item_top_border('menu_item_4', True) sect.show_item_bottom_border('menu_item_4', True) sect.show_item_top_border('menu_item_8', True) sect.show_item_bottom_border('menu_item_8', True) sect.show_item_top_border('menu_item_12', True) sect.show_item_bottom_border('menu_item_12', True) print('This should show top and bottom borders on items 4, 8, and 12') for line in sect.generate(): print(line) sect.show_item_top_border('menu_item_4', False) sect.show_item_bottom_border('menu_item_4', False) sect.show_item_top_border('menu_item_8', False) sect.show_item_bottom_border('menu_item_8', False) sect.show_item_top_border('menu_item_12', False) sect.show_item_bottom_border('menu_item_12', False) print('This should not show any borders on any item') for line in sect.generate(): print(line)
def test_show_both_borders_for_items_then_disable_borders(self): msg = '{title:{fill}^{width}}'.format(title='simulate screen edges', fill='-', width=width - 2) print('{edge}{msg}{edge}'.format(edge='|', msg=msg)) sect = MenuItemsSection(MenuStyle(border_style=LightBorderStyle()), items=self.large_list) sect.show_item_top_border('menu_item_4', True) sect.show_item_bottom_border('menu_item_4', True) sect.show_item_top_border('menu_item_8', True) sect.show_item_bottom_border('menu_item_8', True) sect.show_item_top_border('menu_item_12', True) sect.show_item_bottom_border('menu_item_12', True) print('This should show top and bottom borders on items 4, 8, and 12') for line in sect.generate(): print(line) sect.show_item_top_border('menu_item_4', False) sect.show_item_bottom_border('menu_item_4', False) sect.show_item_top_border('menu_item_8', False) sect.show_item_bottom_border('menu_item_8', False) sect.show_item_top_border('menu_item_12', False) sect.show_item_bottom_border('menu_item_12', False) print('This should not show any borders on any item') for line in sect.generate(): print(line)
console-menu
positive
def remove(self, node): """Remove a `node` from the stream. Also all connections will be removed.""" if isinstance(node, basestring): name = node node = self.nodes[name] else: <DeepExtract> if not node: raise ValueError('No node provided') names = [key for (key, value) in self.nodes.items() if value == node] if len(names) == 1: name = names[0] elif len(names) > 1: raise Exception('There are more references to the same node') else: raise Exception("Can not find node '%s'" % node) </DeepExtract> del self.nodes[name] remove = [c for c in self.connections if c[0] == node or c[1] == node] for connection in remove: self.connections.remove(connection)
def remove(self, node): """Remove a `node` from the stream. Also all connections will be removed.""" if isinstance(node, basestring): name = node node = self.nodes[name] else: if not node: raise ValueError('No node provided') names = [key for (key, value) in self.nodes.items() if value == node] if len(names) == 1: name = names[0] elif len(names) > 1: raise Exception('There are more references to the same node') else: raise Exception("Can not find node '%s'" % node) del self.nodes[name] remove = [c for c in self.connections if c[0] == node or c[1] == node] for connection in remove: self.connections.remove(connection)
brewery
positive