before
stringlengths
0
955k
after
stringlengths
0
877k
repo
stringlengths
1
74
type
stringclasses
1 value
def test_glrm(): <DeepExtract> parsed = parse_graph_argument('bipartite', 'glrm 10 9 15') G = obtain_graph(parsed) </DeepExtract> assert G.order() == 19 assert G.number_of_edges() == 15 (left, right) = G.parts() assert len(left) == 10 assert len(right) == 9
def test_glrm(): parsed = parse_graph_argument('bipartite', 'glrm 10 9 15') G = obtain_graph(parsed) assert G.order() == 19 assert G.number_of_edges() == 15 (left, right) = G.parts() assert len(left) == 10 assert len(right) == 9
cnfgen
positive
def write_js_metadata(pkg_data, project_shortname, has_wildcards): """Write an internal (not exported) R function to return all JS dependencies as required by dash. Parameters ---------- project_shortname = hyphenated string, e.g. dash-html-components Returns ------- """ <DeepExtract> sys.path.insert(0, os.getcwd()) mod = importlib.import_module(project_shortname) alldist = getattr(mod, '_js_dist', []) + getattr(mod, '_css_dist', []) project_ver = pkg_data.get('version') rpkgname = snake_case_to_camel_case(project_shortname) function_frame_open = frame_open_template.format(rpkgname=rpkgname) function_frame = [] function_frame_body = [] if len(alldist) > 1: for dep in range(len(alldist)): curr_dep = alldist[dep] rpp = curr_dep['relative_package_path'] async_or_dynamic = get_async_type(curr_dep) if 'dash_' in rpp: dep_name = rpp.split('.')[0] else: dep_name = '{}'.format(project_shortname) if 'css' in rpp: css_name = "'{}'".format(rpp) script_name = 'NULL' else: script_name = "'{}'".format(rpp) css_name = 'NULL' function_frame += [frame_element_template.format(dep_name=dep_name, project_ver=project_ver, rpkgname=rpkgname, project_shortname=project_shortname, script_name=script_name, css_name=css_name, async_or_dynamic=async_or_dynamic)] function_frame_body = ',\n'.join(function_frame) elif len(alldist) == 1: dep = alldist[0] rpp = dep['relative_package_path'] async_or_dynamic = get_async_type(dep) if 'css' in rpp: css_name = "'{}'".format(rpp) script_name = 'NULL' else: script_name = "'{}'".format(rpp) css_name = 'NULL' function_frame_body = frame_body_template.format(project_shortname=project_shortname, project_ver=project_ver, rpkgname=rpkgname, script_name=script_name, css_name=css_name, async_or_dynamic=async_or_dynamic) function_string = ''.join([function_frame_open, function_frame_body, frame_close_template]) function_string = function_string </DeepExtract> file_name = 'internal.R' if not os.path.exists('R'): os.makedirs('R') file_path = os.path.join('R', file_name) with open(file_path, 'w', encoding='utf-8') as f: f.write(function_string) if has_wildcards: f.write(wildcard_helper) if os.path.exists('inst/deps'): shutil.rmtree('inst/deps') os.makedirs('inst/deps') for (rel_dirname, _, filenames) in os.walk(project_shortname): for filename in filenames: extension = os.path.splitext(filename)[1] if extension in ['.py', '.pyc', '.json']: continue target_dirname = os.path.join(os.path.join('inst/deps/', os.path.relpath(rel_dirname, project_shortname))) if not os.path.exists(target_dirname): os.makedirs(target_dirname) shutil.copy(os.path.join(rel_dirname, filename), target_dirname)
def write_js_metadata(pkg_data, project_shortname, has_wildcards): """Write an internal (not exported) R function to return all JS dependencies as required by dash. Parameters ---------- project_shortname = hyphenated string, e.g. dash-html-components Returns ------- """ sys.path.insert(0, os.getcwd()) mod = importlib.import_module(project_shortname) alldist = getattr(mod, '_js_dist', []) + getattr(mod, '_css_dist', []) project_ver = pkg_data.get('version') rpkgname = snake_case_to_camel_case(project_shortname) function_frame_open = frame_open_template.format(rpkgname=rpkgname) function_frame = [] function_frame_body = [] if len(alldist) > 1: for dep in range(len(alldist)): curr_dep = alldist[dep] rpp = curr_dep['relative_package_path'] async_or_dynamic = get_async_type(curr_dep) if 'dash_' in rpp: dep_name = rpp.split('.')[0] else: dep_name = '{}'.format(project_shortname) if 'css' in rpp: css_name = "'{}'".format(rpp) script_name = 'NULL' else: script_name = "'{}'".format(rpp) css_name = 'NULL' function_frame += [frame_element_template.format(dep_name=dep_name, project_ver=project_ver, rpkgname=rpkgname, project_shortname=project_shortname, script_name=script_name, css_name=css_name, async_or_dynamic=async_or_dynamic)] function_frame_body = ',\n'.join(function_frame) elif len(alldist) == 1: dep = alldist[0] rpp = dep['relative_package_path'] async_or_dynamic = get_async_type(dep) if 'css' in rpp: css_name = "'{}'".format(rpp) script_name = 'NULL' else: script_name = "'{}'".format(rpp) css_name = 'NULL' function_frame_body = frame_body_template.format(project_shortname=project_shortname, project_ver=project_ver, rpkgname=rpkgname, script_name=script_name, css_name=css_name, async_or_dynamic=async_or_dynamic) function_string = ''.join([function_frame_open, function_frame_body, frame_close_template]) function_string = function_string file_name = 'internal.R' if not os.path.exists('R'): os.makedirs('R') file_path = os.path.join('R', file_name) with open(file_path, 'w', encoding='utf-8') as f: f.write(function_string) if has_wildcards: f.write(wildcard_helper) if os.path.exists('inst/deps'): shutil.rmtree('inst/deps') os.makedirs('inst/deps') for (rel_dirname, _, filenames) in os.walk(project_shortname): for filename in filenames: extension = os.path.splitext(filename)[1] if extension in ['.py', '.pyc', '.json']: continue target_dirname = os.path.join(os.path.join('inst/deps/', os.path.relpath(rel_dirname, project_shortname))) if not os.path.exists(target_dirname): os.makedirs(target_dirname) shutil.copy(os.path.join(rel_dirname, filename), target_dirname)
dash
positive
def run(self): self.context = Context(self) <DeepExtract> if self.__environment: exec(self.__environment, self.hooks) if 'before_all' not in self.hooks: self.hooks['before_all'] = self.before_all_default_hook </DeepExtract> <DeepExtract> behave.step_registry.registry = self.step_registry = StepRegistry() step_globals = {'use_step_matcher': matchers.use_step_matcher, 'step_matcher': matchers.step_matcher} setup_step_decorators(step_globals, self.step_registry) default_matcher = matchers.current_matcher for step in self.__steps: step_module_globals = step_globals.copy() exec(step, step_module_globals) matchers.current_matcher = default_matcher </DeepExtract> <DeepExtract> self.features.extend((parse_feature(f) for f in self.__features)) </DeepExtract> <DeepExtract> opener = StreamOpener(stream=sys.stdout) allure_formatter = AllureFormatter(opener, self.config) pretty_formatter = PrettyFormatter(opener, self.config) self.formatters.append(allure_formatter) self.formatters.append(pretty_formatter) </DeepExtract> self.run_model()
def run(self): self.context = Context(self) if self.__environment: exec(self.__environment, self.hooks) if 'before_all' not in self.hooks: self.hooks['before_all'] = self.before_all_default_hook behave.step_registry.registry = self.step_registry = StepRegistry() step_globals = {'use_step_matcher': matchers.use_step_matcher, 'step_matcher': matchers.step_matcher} setup_step_decorators(step_globals, self.step_registry) default_matcher = matchers.current_matcher for step in self.__steps: step_module_globals = step_globals.copy() exec(step, step_module_globals) matchers.current_matcher = default_matcher self.features.extend((parse_feature(f) for f in self.__features)) opener = StreamOpener(stream=sys.stdout) allure_formatter = AllureFormatter(opener, self.config) pretty_formatter = PrettyFormatter(opener, self.config) self.formatters.append(allure_formatter) self.formatters.append(pretty_formatter) self.run_model()
allure-python
positive
def _compute_attributions_tensor_input(X: Union[np.ndarray, tf.Tensor], baselines: Union[np.ndarray, tf.Tensor], target: Optional[np.ndarray], step_sizes: List[float], alphas: List[float], nb_samples: int, forward_kwargs: Optional[dict], compute_layer_inputs_gradients: bool) -> List: """For a single input tensor, calculates the attributions for each input feature or element of layer. Parameters ---------- X Instance for which integrated gradients attribution are computed. baselines Baselines (starting point of the path integral) for each instance. target Defines which element of the model output is considered to compute the gradients. step_sizes Weights in the path integral sum. alphas Interpolation parameter defining the points of the integral path. nb_samples Total number of samples. forward_kwargs Inputs keywords args. compute_layer_inputs_gradients In case of layers gradients, controls whether the gradients are computed for the layer's inputs or outputs. If ``True``, gradients are computed for the layer's inputs, if ``False`` for the layer's outputs. Returns ------- Tuple with integrated gradients attributions, deltas and predictions. """ if forward_kwargs is None: forward_kwargs = {} paths = np.concatenate([baselines + alphas[i] * (X - baselines) for i in range(self.n_steps)], axis=0) if forward_kwargs: paths_kwargs: Optional[dict] = {k: np.concatenate([forward_kwargs[k] for _ in range(self.n_steps)], axis=0) for k in forward_kwargs.keys()} else: paths_kwargs = None if target is not None: target_paths = np.concatenate([target for _ in range(self.n_steps)], axis=0) else: target_paths = None if forward_kwargs: if target_paths is not None: ds_args = (paths, paths_kwargs, target_paths) else: ds_args = (paths, paths_kwargs) elif target_paths is not None: ds_args = (paths, target_paths) else: ds_args = paths paths_ds = tf.data.Dataset.from_tensor_slices(ds_args).batch(self.internal_batch_size) paths_ds.as_numpy_iterator() paths_ds.prefetch(tf.data.experimental.AUTOTUNE) batches = [] for path in paths_ds: if forward_kwargs: if target is not None: (paths_b, kwargs_b, target_b) = path else: (paths_b, kwargs_b) = path target_b = None else: kwargs_b = None if target is not None: (paths_b, target_b) = path else: (paths_b, target_b) = (path, None) if self.layer is None: <DeepExtract> if kwargs_b is None: kwargs_b = {} with tf.GradientTape() as tape: tape.watch(paths_b) preds = _run_forward(self.model, paths_b, target_b, forward_kwargs=kwargs_b) grads = tape.gradient(preds, paths_b) grads_b = grads </DeepExtract> else: <DeepExtract> def watch_layer(layer, tape): """ Make an intermediate hidden `layer` watchable by the `tape`. After calling this function, you can obtain the gradient with respect to the output of the `layer` by calling: grads = tape.gradient(..., layer.result) """ def decorator(func): def wrapper(*args, **kwargs): if compute_layer_inputs_gradients: self.layer.inp = paths_b self.layer.result = func(*paths_b, **kwargs) tape.watch(self.layer.inp) else: self.layer.inp = args self.layer.result = paths_b tape.watch(self.layer.result) grads_b = self.layer.result grads_b = wrapper self.layer.call = decorator(self.layer.call) if isinstance(self.orig_dummy_input, list): if isinstance(paths_b, list): self.orig_dummy_input = [np.repeat(inp, paths_b[0].shape[0], axis=0) for inp in self.orig_dummy_input] else: self.orig_dummy_input = [np.repeat(inp, paths_b.shape[0], axis=0) for inp in self.orig_dummy_input] elif isinstance(paths_b, list): self.orig_dummy_input = np.repeat(self.orig_dummy_input, paths_b[0].shape[0], axis=0) else: self.orig_dummy_input = np.repeat(self.orig_dummy_input, paths_b.shape[0], axis=0) if kwargs_b is None: kwargs_b = {} with tf.GradientTape() as tape: watch_layer(self.layer, tape) preds = _run_forward(self.model, self.orig_dummy_input, target_b, forward_kwargs=kwargs_b) if compute_layer_inputs_gradients: grads = tape.gradient(preds, self.layer.inp) else: grads = tape.gradient(preds, self.layer.result) delattr(self.layer, 'inp') delattr(self.layer, 'result') self.layer.call = self.orig_call grads_b = grads </DeepExtract> batches.append(grads_b) attributions = [] <DeepExtract> grads = tf.concat([batches][0], 0) shape = grads.shape[1:] if isinstance(shape, tf.TensorShape): shape = tuple(shape.as_list()) if (len(self.model.output_shape) == 1 or self.model.output_shape[-1] == 1) and target is not None: sign = 2 * target_paths - 1 grads = np.array([s * g for (s, g) in zip(sign, grads)]) grads = tf.reshape(grads, (self.n_steps, nb_samples) + shape) sum_int = _sum_integral_terms(step_sizes, grads.numpy()) sum_int = sum_int </DeepExtract> norm = X - baselines attribution = norm * sum_int attributions.append(attribution) return attributions
def _compute_attributions_tensor_input(X: Union[np.ndarray, tf.Tensor], baselines: Union[np.ndarray, tf.Tensor], target: Optional[np.ndarray], step_sizes: List[float], alphas: List[float], nb_samples: int, forward_kwargs: Optional[dict], compute_layer_inputs_gradients: bool) -> List: """For a single input tensor, calculates the attributions for each input feature or element of layer. Parameters ---------- X Instance for which integrated gradients attribution are computed. baselines Baselines (starting point of the path integral) for each instance. target Defines which element of the model output is considered to compute the gradients. step_sizes Weights in the path integral sum. alphas Interpolation parameter defining the points of the integral path. nb_samples Total number of samples. forward_kwargs Inputs keywords args. compute_layer_inputs_gradients In case of layers gradients, controls whether the gradients are computed for the layer's inputs or outputs. If ``True``, gradients are computed for the layer's inputs, if ``False`` for the layer's outputs. Returns ------- Tuple with integrated gradients attributions, deltas and predictions. """ if forward_kwargs is None: forward_kwargs = {} paths = np.concatenate([baselines + alphas[i] * (X - baselines) for i in range(self.n_steps)], axis=0) if forward_kwargs: paths_kwargs: Optional[dict] = {k: np.concatenate([forward_kwargs[k] for _ in range(self.n_steps)], axis=0) for k in forward_kwargs.keys()} else: paths_kwargs = None if target is not None: target_paths = np.concatenate([target for _ in range(self.n_steps)], axis=0) else: target_paths = None if forward_kwargs: if target_paths is not None: ds_args = (paths, paths_kwargs, target_paths) else: ds_args = (paths, paths_kwargs) elif target_paths is not None: ds_args = (paths, target_paths) else: ds_args = paths paths_ds = tf.data.Dataset.from_tensor_slices(ds_args).batch(self.internal_batch_size) paths_ds.as_numpy_iterator() paths_ds.prefetch(tf.data.experimental.AUTOTUNE) batches = [] for path in paths_ds: if forward_kwargs: if target is not None: (paths_b, kwargs_b, target_b) = path else: (paths_b, kwargs_b) = path target_b = None else: kwargs_b = None if target is not None: (paths_b, target_b) = path else: (paths_b, target_b) = (path, None) if self.layer is None: if kwargs_b is None: kwargs_b = {} with tf.GradientTape() as tape: tape.watch(paths_b) preds = _run_forward(self.model, paths_b, target_b, forward_kwargs=kwargs_b) grads = tape.gradient(preds, paths_b) grads_b = grads else: def watch_layer(layer, tape): """ Make an intermediate hidden `layer` watchable by the `tape`. After calling this function, you can obtain the gradient with respect to the output of the `layer` by calling: grads = tape.gradient(..., layer.result) """ def decorator(func): def wrapper(*args, **kwargs): if compute_layer_inputs_gradients: self.layer.inp = paths_b self.layer.result = func(*paths_b, **kwargs) tape.watch(self.layer.inp) else: self.layer.inp = args self.layer.result = paths_b tape.watch(self.layer.result) grads_b = self.layer.result grads_b = wrapper self.layer.call = decorator(self.layer.call) if isinstance(self.orig_dummy_input, list): if isinstance(paths_b, list): self.orig_dummy_input = [np.repeat(inp, paths_b[0].shape[0], axis=0) for inp in self.orig_dummy_input] else: self.orig_dummy_input = [np.repeat(inp, paths_b.shape[0], axis=0) for inp in self.orig_dummy_input] elif isinstance(paths_b, list): self.orig_dummy_input = np.repeat(self.orig_dummy_input, paths_b[0].shape[0], axis=0) else: self.orig_dummy_input = np.repeat(self.orig_dummy_input, paths_b.shape[0], axis=0) if kwargs_b is None: kwargs_b = {} with tf.GradientTape() as tape: watch_layer(self.layer, tape) preds = _run_forward(self.model, self.orig_dummy_input, target_b, forward_kwargs=kwargs_b) if compute_layer_inputs_gradients: grads = tape.gradient(preds, self.layer.inp) else: grads = tape.gradient(preds, self.layer.result) delattr(self.layer, 'inp') delattr(self.layer, 'result') self.layer.call = self.orig_call grads_b = grads batches.append(grads_b) attributions = [] grads = tf.concat([batches][0], 0) shape = grads.shape[1:] if isinstance(shape, tf.TensorShape): shape = tuple(shape.as_list()) if (len(self.model.output_shape) == 1 or self.model.output_shape[-1] == 1) and target is not None: sign = 2 * target_paths - 1 grads = np.array([s * g for (s, g) in zip(sign, grads)]) grads = tf.reshape(grads, (self.n_steps, nb_samples) + shape) sum_int = _sum_integral_terms(step_sizes, grads.numpy()) sum_int = sum_int norm = X - baselines attribution = norm * sum_int attributions.append(attribution) return attributions
alibi
positive
def fit(self, X, a, y, refit_weight_model=True, **kwargs): <DeepExtract> X_outcome = self._extract_outcome_model_data(X) X_weight = self._extract_weight_model_data(X) (X_outcome, X_weight) = (X_outcome, X_weight) </DeepExtract> weight_model_is_not_fitted = not self._is_weight_model_fitted() if refit_weight_model or weight_model_is_not_fitted: self.weight_model.fit(X=X_weight, a=a, y=y) weights = self.weight_model.compute_weights(X_weight, a) self.outcome_model.fit(X=X_outcome, y=y, a=a, sample_weight=weights) return self
def fit(self, X, a, y, refit_weight_model=True, **kwargs): X_outcome = self._extract_outcome_model_data(X) X_weight = self._extract_weight_model_data(X) (X_outcome, X_weight) = (X_outcome, X_weight) weight_model_is_not_fitted = not self._is_weight_model_fitted() if refit_weight_model or weight_model_is_not_fitted: self.weight_model.fit(X=X_weight, a=a, y=y) weights = self.weight_model.compute_weights(X_weight, a) self.outcome_model.fit(X=X_outcome, y=y, a=a, sample_weight=weights) return self
causallib
positive
def redraw(self): log.debug('Taglist REDRAW (%s)!\n', self.width) self.pad.erase() target_obj = self.callbacks['get_var']('target_obj') target_offset = self.callbacks['get_var']('target_offset') if target_obj == None: self.pad.addstr('All tags empty.') self.callbacks['refresh']() return if not target_obj.is_tag: tag = target_obj.parent_tag tl = tag.lines(self.width) if target_offset < tl: target_offset = tl elif target_offset < 0: target_offset = 0 tol = target_obj.lines(self.width) if target_offset > self.height - 1 - tol: target_offset = self.height - 1 - tol obj = target_obj curpos = target_offset top_adjusted = False while curpos > 0: if obj.prev_obj: curpos -= obj.prev_obj.lines(self.width) obj = obj.prev_obj else: top_adjusted = True target_offset -= curpos curpos = 0 last_obj = target_obj last_off = target_offset while last_off < self.height - 1: if last_obj: last_off += last_obj.lines(self.width) last_obj = last_obj.next_obj elif not top_adjusted: rem = self.height - 1 - last_off self.callbacks['set_var']('target_offset', target_offset + rem) <DeepExtract> log.debug('Taglist REDRAW (%s)!\n', self.width) self.pad.erase() target_obj = self.callbacks['get_var']('target_obj') target_offset = self.callbacks['get_var']('target_offset') if target_obj == None: self.pad.addstr('All tags empty.') self.callbacks['refresh']() return if not target_obj.is_tag: tag = target_obj.parent_tag tl = tag.lines(self.width) if target_offset < tl: target_offset = tl elif target_offset < 0: target_offset = 0 tol = target_obj.lines(self.width) if target_offset > self.height - 1 - tol: target_offset = self.height - 1 - tol obj = target_obj curpos = target_offset top_adjusted = False while curpos > 0: if obj.prev_obj: curpos -= obj.prev_obj.lines(self.width) obj = obj.prev_obj else: top_adjusted = True target_offset -= curpos curpos = 0 last_obj = target_obj last_off = target_offset while last_off < self.height - 1: if last_obj: last_off += last_obj.lines(self.width) last_obj = last_obj.next_obj elif not top_adjusted: rem = self.height - 1 - last_off self.callbacks['set_var']('target_offset', target_offset + rem) self.redraw() return else: break self.callbacks['set_var']('target_offset', target_offset) self.first_sel = obj while self.first_sel.is_tag: if self.callbacks['get_tag_opt'](obj.tag, 'collapsed'): break if self.first_sel.next_obj: self.first_sel = self.first_sel.next_obj else: break rendered_header = False w_offset = 0 while obj != None: obj.lines(self.width) obj.curpos = curpos (w_offset, curpos) = self._partial_render(obj, w_offset, curpos) if not rendered_header and curpos > 0: tag = self.tag_by_obj(obj) if curpos >= tag.lines(self.width): self._partial_render(tag, 0, 0) rendered_header = True obj.extra_lines = 0 if not obj.next_obj or obj.next_obj.is_tag: if obj.is_tag: tag = obj else: tag = self.tag_by_item(obj) tag.lines(self.width) obj.extra_lines = tag.footlines (w_offset, curpos) = self._partial_render(tag, w_offset, curpos, True) rendered_header = True elif not obj.is_tag and self.spacing: curpos += self.spacing w_offset += self.spacing obj.extra_lines += self.spacing if w_offset >= self.height: break obj = obj.next_obj self.callbacks['refresh']() </DeepExtract> return else: break self.callbacks['set_var']('target_offset', target_offset) self.first_sel = obj while self.first_sel.is_tag: if self.callbacks['get_tag_opt'](obj.tag, 'collapsed'): break if self.first_sel.next_obj: self.first_sel = self.first_sel.next_obj else: break rendered_header = False w_offset = 0 while obj != None: obj.lines(self.width) obj.curpos = curpos <DeepExtract> lines = obj.pads(self.width) pad = obj.pad if footer: lines = obj.footlines pad = obj.footpad draw_lines = lines if curpos + lines > 0: start = 0 if curpos < 0: start = -1 * curpos draw_lines += curpos if w_offset + draw_lines > self.height: draw_lines = self.height - w_offset if draw_lines: pad.overwrite(self.pad, start, 0, w_offset, 0, w_offset + (draw_lines - 1), self.width - 1) (w_offset, curpos) = (w_offset + draw_lines, curpos + lines) (w_offset, curpos) = (w_offset, curpos + lines) </DeepExtract> if not rendered_header and curpos > 0: <DeepExtract> if obj.is_tag: tag = obj tag = obj.parent_tag </DeepExtract> if curpos >= tag.lines(self.width): <DeepExtract> lines = tag.pads(self.width) pad = tag.pad if footer: lines = tag.footlines pad = tag.footpad draw_lines = lines if 0 + lines > 0: start = 0 if 0 < 0: start = -1 * 0 draw_lines += 0 if 0 + draw_lines > self.height: draw_lines = self.height - 0 if draw_lines: pad.overwrite(self.pad, start, 0, 0, 0, 0 + (draw_lines - 1), self.width - 1) return (0 + draw_lines, 0 + lines) return (0, 0 + lines) </DeepExtract> rendered_header = True obj.extra_lines = 0 if not obj.next_obj or obj.next_obj.is_tag: if obj.is_tag: tag = obj else: <DeepExtract> tag = obj.parent_tag </DeepExtract> tag.lines(self.width) obj.extra_lines = tag.footlines <DeepExtract> lines = tag.pads(self.width) pad = tag.pad if True: lines = tag.footlines pad = tag.footpad draw_lines = lines if curpos + lines > 0: start = 0 if curpos < 0: start = -1 * curpos draw_lines += curpos if w_offset + draw_lines > self.height: draw_lines = self.height - w_offset if draw_lines: pad.overwrite(self.pad, start, 0, w_offset, 0, w_offset + (draw_lines - 1), self.width - 1) (w_offset, curpos) = (w_offset + draw_lines, curpos + lines) (w_offset, curpos) = (w_offset, curpos + lines) </DeepExtract> rendered_header = True elif not obj.is_tag and self.spacing: curpos += self.spacing w_offset += self.spacing obj.extra_lines += self.spacing if w_offset >= self.height: break obj = obj.next_obj self.callbacks['refresh']()
def redraw(self): log.debug('Taglist REDRAW (%s)!\n', self.width) self.pad.erase() target_obj = self.callbacks['get_var']('target_obj') target_offset = self.callbacks['get_var']('target_offset') if target_obj == None: self.pad.addstr('All tags empty.') self.callbacks['refresh']() return if not target_obj.is_tag: tag = target_obj.parent_tag tl = tag.lines(self.width) if target_offset < tl: target_offset = tl elif target_offset < 0: target_offset = 0 tol = target_obj.lines(self.width) if target_offset > self.height - 1 - tol: target_offset = self.height - 1 - tol obj = target_obj curpos = target_offset top_adjusted = False while curpos > 0: if obj.prev_obj: curpos -= obj.prev_obj.lines(self.width) obj = obj.prev_obj else: top_adjusted = True target_offset -= curpos curpos = 0 last_obj = target_obj last_off = target_offset while last_off < self.height - 1: if last_obj: last_off += last_obj.lines(self.width) last_obj = last_obj.next_obj elif not top_adjusted: rem = self.height - 1 - last_off self.callbacks['set_var']('target_offset', target_offset + rem) log.debug('Taglist REDRAW (%s)!\n', self.width) self.pad.erase() target_obj = self.callbacks['get_var']('target_obj') target_offset = self.callbacks['get_var']('target_offset') if target_obj == None: self.pad.addstr('All tags empty.') self.callbacks['refresh']() return if not target_obj.is_tag: tag = target_obj.parent_tag tl = tag.lines(self.width) if target_offset < tl: target_offset = tl elif target_offset < 0: target_offset = 0 tol = target_obj.lines(self.width) if target_offset > self.height - 1 - tol: target_offset = self.height - 1 - tol obj = target_obj curpos = target_offset top_adjusted = False while curpos > 0: if obj.prev_obj: curpos -= obj.prev_obj.lines(self.width) obj = obj.prev_obj else: top_adjusted = True target_offset -= curpos curpos = 0 last_obj = target_obj last_off = target_offset while last_off < self.height - 1: if last_obj: last_off += last_obj.lines(self.width) last_obj = last_obj.next_obj elif not top_adjusted: rem = self.height - 1 - last_off self.callbacks['set_var']('target_offset', target_offset + rem) self.redraw() return else: break self.callbacks['set_var']('target_offset', target_offset) self.first_sel = obj while self.first_sel.is_tag: if self.callbacks['get_tag_opt'](obj.tag, 'collapsed'): break if self.first_sel.next_obj: self.first_sel = self.first_sel.next_obj else: break rendered_header = False w_offset = 0 while obj != None: obj.lines(self.width) obj.curpos = curpos (w_offset, curpos) = self._partial_render(obj, w_offset, curpos) if not rendered_header and curpos > 0: tag = self.tag_by_obj(obj) if curpos >= tag.lines(self.width): self._partial_render(tag, 0, 0) rendered_header = True obj.extra_lines = 0 if not obj.next_obj or obj.next_obj.is_tag: if obj.is_tag: tag = obj else: tag = self.tag_by_item(obj) tag.lines(self.width) obj.extra_lines = tag.footlines (w_offset, curpos) = self._partial_render(tag, w_offset, curpos, True) rendered_header = True elif not obj.is_tag and self.spacing: curpos += self.spacing w_offset += self.spacing obj.extra_lines += self.spacing if w_offset >= self.height: break obj = obj.next_obj self.callbacks['refresh']() return else: break self.callbacks['set_var']('target_offset', target_offset) self.first_sel = obj while self.first_sel.is_tag: if self.callbacks['get_tag_opt'](obj.tag, 'collapsed'): break if self.first_sel.next_obj: self.first_sel = self.first_sel.next_obj else: break rendered_header = False w_offset = 0 while obj != None: obj.lines(self.width) obj.curpos = curpos lines = obj.pads(self.width) pad = obj.pad if footer: lines = obj.footlines pad = obj.footpad draw_lines = lines if curpos + lines > 0: start = 0 if curpos < 0: start = -1 * curpos draw_lines += curpos if w_offset + draw_lines > self.height: draw_lines = self.height - w_offset if draw_lines: pad.overwrite(self.pad, start, 0, w_offset, 0, w_offset + (draw_lines - 1), self.width - 1) (w_offset, curpos) = (w_offset + draw_lines, curpos + lines) (w_offset, curpos) = (w_offset, curpos + lines) if not rendered_header and curpos > 0: if obj.is_tag: tag = obj tag = obj.parent_tag if curpos >= tag.lines(self.width): lines = tag.pads(self.width) pad = tag.pad if footer: lines = tag.footlines pad = tag.footpad draw_lines = lines if 0 + lines > 0: start = 0 if 0 < 0: start = -1 * 0 draw_lines += 0 if 0 + draw_lines > self.height: draw_lines = self.height - 0 if draw_lines: pad.overwrite(self.pad, start, 0, 0, 0, 0 + (draw_lines - 1), self.width - 1) return (0 + draw_lines, 0 + lines) return (0, 0 + lines) rendered_header = True obj.extra_lines = 0 if not obj.next_obj or obj.next_obj.is_tag: if obj.is_tag: tag = obj else: tag = obj.parent_tag tag.lines(self.width) obj.extra_lines = tag.footlines lines = tag.pads(self.width) pad = tag.pad if True: lines = tag.footlines pad = tag.footpad draw_lines = lines if curpos + lines > 0: start = 0 if curpos < 0: start = -1 * curpos draw_lines += curpos if w_offset + draw_lines > self.height: draw_lines = self.height - w_offset if draw_lines: pad.overwrite(self.pad, start, 0, w_offset, 0, w_offset + (draw_lines - 1), self.width - 1) (w_offset, curpos) = (w_offset + draw_lines, curpos + lines) (w_offset, curpos) = (w_offset, curpos + lines) rendered_header = True elif not obj.is_tag and self.spacing: curpos += self.spacing w_offset += self.spacing obj.extra_lines += self.spacing if w_offset >= self.height: break obj = obj.next_obj self.callbacks['refresh']()
canto-curses
positive
def move_mouse(self, x, y, reference='absolute', proportional=False, phantom=None): original_location = self.libxdo.get_mouse_location() <DeepExtract> if window_id is None: window_id = self.libxdo.get_focused_window_sane() window_location = self.libxdo.get_window_location(window_id) window_size = self.libxdo.get_window_size(window_id) geo = {'x': int(window_location.x), 'y': int(window_location.y), 'screen': window_location.screen.display, 'height': int(window_size.height), 'width': int(window_size.width)} </DeepExtract> if proportional: x = int(geo['width'] * x) y = int(geo['height'] * y) if reference == 'absolute': x = x if x > 0 else x y = y if y > 0 else y self.libxdo.move_mouse(x, y) elif reference == 'relative_active': window_location = self.libxdo.get_window_location(self.libxdo.get_active_window()) self.libxdo.move_mouse(window_location.x + x, window_location.y + y) elif reference == 'relative': self.libxdo.move_mouse_relative(x, y) else: raise ValueError('invalid "reference" parameter "%s"' % reference) if phantom is not None: self.libxdo.click_window(0, _MOUSE_BUTTONS[phantom]) self.libxdo.move_mouse(original_location.x, original_location.y)
def move_mouse(self, x, y, reference='absolute', proportional=False, phantom=None): original_location = self.libxdo.get_mouse_location() if window_id is None: window_id = self.libxdo.get_focused_window_sane() window_location = self.libxdo.get_window_location(window_id) window_size = self.libxdo.get_window_size(window_id) geo = {'x': int(window_location.x), 'y': int(window_location.y), 'screen': window_location.screen.display, 'height': int(window_size.height), 'width': int(window_size.width)} if proportional: x = int(geo['width'] * x) y = int(geo['height'] * y) if reference == 'absolute': x = x if x > 0 else x y = y if y > 0 else y self.libxdo.move_mouse(x, y) elif reference == 'relative_active': window_location = self.libxdo.get_window_location(self.libxdo.get_active_window()) self.libxdo.move_mouse(window_location.x + x, window_location.y + y) elif reference == 'relative': self.libxdo.move_mouse_relative(x, y) else: raise ValueError('invalid "reference" parameter "%s"' % reference) if phantom is not None: self.libxdo.click_window(0, _MOUSE_BUTTONS[phantom]) self.libxdo.move_mouse(original_location.x, original_location.y)
aenea
positive
def test_with_public_client(self): <DeepExtract> r = self.client.get('/oauth/client') oauth_client_credentials = self.json(r) oauth_client_ = Client.query.get(oauth_client_credentials['client_id']) oauth_client_._allowed_grant_types = 'password' self.db.session.commit() (client_id, client_secret) = (oauth_client_credentials['client_id'], oauth_client_credentials['client_secret']) </DeepExtract> r = self.client.post('/oauth/token', data={'client_id': client_id, 'client_secret': client_secret, 'grant_type': 'password', 'username': self.userdata['email'], 'password': self.userdata['password']}) data = self.json(r) self.assertEqual(data['error'], 'invalid_client')
def test_with_public_client(self): r = self.client.get('/oauth/client') oauth_client_credentials = self.json(r) oauth_client_ = Client.query.get(oauth_client_credentials['client_id']) oauth_client_._allowed_grant_types = 'password' self.db.session.commit() (client_id, client_secret) = (oauth_client_credentials['client_id'], oauth_client_credentials['client_secret']) r = self.client.post('/oauth/token', data={'client_id': client_id, 'client_secret': client_secret, 'grant_type': 'password', 'username': self.userdata['email'], 'password': self.userdata['password']}) data = self.json(r) self.assertEqual(data['error'], 'invalid_client')
argos
positive
def forward(self, input): qinput = self.quantize_input(input) <DeepExtract> alpha_gaus = {1: 1.24, 2: 1.71, 3: 2.215, 4: 2.55, 5: 2.93, 6: 3.28, 7: 3.61, 8: 3.92} alpha_gaus_positive = {1: 1.71, 2: 2.215, 3: 2.55, 4: 2.93, 5: 3.28, 6: 3.61, 7: 3.92, 8: 4.2} alpha_laplas = {1: 1.05, 2: 1.86, 3: 2.83, 4: 5.03, 5: 6.2, 6: 7.41, 7: 8.64, 8: 9.89} alpha_laplas_positive = {1: 1.86, 2: 2.83, 3: 5.03, 4: 6.2, 5: 7.41, 6: 8.64, 7: 9.89, 8: 11.16} if per_ch_input: self.weight = self.weight.transpose(0, 1) with torch.no_grad(): x_flat = self.weight.flatten(*(1, -1)) if quant_mode == 'mean_std' and self.num_bits_weight < 8: mu = x_flat.mean() if x_flat.dim() == 1 else x_flat.mean(-1) std = x_flat.std() if x_flat.dim() == 1 else x_flat.std(-1) b = torch.abs(x_flat - mu).mean() if x_flat.dim() == 1 else torch.mean(torch.abs(x_flat - mu.unsqueeze(1)), -1) minv = x_flat.min() if x_flat.dim() == 1 else x_flat.min(-1)[0] maxv = x_flat.max() if x_flat.dim() == 1 else x_flat.max(-1)[0] min_values = _deflatten_as(torch.max(mu - 6 * std, minv), self.weight) max_values = _deflatten_as(torch.min(mu + 6 * std, maxv), self.weight) elif x_flat.dim() == 1: min_values = _deflatten_as(x_flat.min(), self.weight) max_values = _deflatten_as(x_flat.max(), self.weight) else: min_values = _deflatten_as(x_flat.min(-1)[0], self.weight) max_values = _deflatten_as(x_flat.max(-1)[0], self.weight) if None is not None: if reduce_type == 'mean': min_values = min_values.mean(None, keepdim=keepdim) max_values = max_values.mean(None, keepdim=keepdim) else: min_values = min_values.min(None, keepdim=keepdim)[0] max_values = max_values.max(None, keepdim=keepdim)[0] min_values[min_values > 0] = 0 max_values[max_values < 0] = 0 range_values = max_values - min_values range_values[range_values == 0] = 1 weight_qparams = QParams(range=range_values, zero_point=min_values, num_bits=self.num_bits_weight) </DeepExtract> qweight = quantize(self.weight, qparams=weight_qparams) if not self.measure else self.weight if self.bias is not None: qbias = self.bias if self.measure else quantize(self.bias, num_bits=self.num_bits_weight + self.num_bits, flatten_dims=(0, -1)) else: qbias = None if not self.biprecision or self.num_bits_grad is None: output = F.conv2d(qinput, qweight, qbias, self.stride, self.padding, self.dilation, self.groups) if self.num_bits_grad is not None: <DeepExtract> output = UniformQuantizeGrad().apply(output, self.num_bits_grad, qparams, (1, -1), reduce_dim, dequantize, signed, stochastic) </DeepExtract> else: <DeepExtract> out1 = F.conv2d(qinput.detach(), qweight, qbias, self.stride, self.padding, self.dilation, self.groups) out2 = F.conv2d(qinput, qweight.detach(), qbias.detach() if qbias is not None else None, self.stride, self.padding, self.dilation, self.groups) out2 = quantize_grad(out2, num_bits=self.num_bits_grad, flatten_dims=(1, -1)) output = out1 + out2 - out1.detach() </DeepExtract> return output
def forward(self, input): qinput = self.quantize_input(input) alpha_gaus = {1: 1.24, 2: 1.71, 3: 2.215, 4: 2.55, 5: 2.93, 6: 3.28, 7: 3.61, 8: 3.92} alpha_gaus_positive = {1: 1.71, 2: 2.215, 3: 2.55, 4: 2.93, 5: 3.28, 6: 3.61, 7: 3.92, 8: 4.2} alpha_laplas = {1: 1.05, 2: 1.86, 3: 2.83, 4: 5.03, 5: 6.2, 6: 7.41, 7: 8.64, 8: 9.89} alpha_laplas_positive = {1: 1.86, 2: 2.83, 3: 5.03, 4: 6.2, 5: 7.41, 6: 8.64, 7: 9.89, 8: 11.16} if per_ch_input: self.weight = self.weight.transpose(0, 1) with torch.no_grad(): x_flat = self.weight.flatten(*(1, -1)) if quant_mode == 'mean_std' and self.num_bits_weight < 8: mu = x_flat.mean() if x_flat.dim() == 1 else x_flat.mean(-1) std = x_flat.std() if x_flat.dim() == 1 else x_flat.std(-1) b = torch.abs(x_flat - mu).mean() if x_flat.dim() == 1 else torch.mean(torch.abs(x_flat - mu.unsqueeze(1)), -1) minv = x_flat.min() if x_flat.dim() == 1 else x_flat.min(-1)[0] maxv = x_flat.max() if x_flat.dim() == 1 else x_flat.max(-1)[0] min_values = _deflatten_as(torch.max(mu - 6 * std, minv), self.weight) max_values = _deflatten_as(torch.min(mu + 6 * std, maxv), self.weight) elif x_flat.dim() == 1: min_values = _deflatten_as(x_flat.min(), self.weight) max_values = _deflatten_as(x_flat.max(), self.weight) else: min_values = _deflatten_as(x_flat.min(-1)[0], self.weight) max_values = _deflatten_as(x_flat.max(-1)[0], self.weight) if None is not None: if reduce_type == 'mean': min_values = min_values.mean(None, keepdim=keepdim) max_values = max_values.mean(None, keepdim=keepdim) else: min_values = min_values.min(None, keepdim=keepdim)[0] max_values = max_values.max(None, keepdim=keepdim)[0] min_values[min_values > 0] = 0 max_values[max_values < 0] = 0 range_values = max_values - min_values range_values[range_values == 0] = 1 weight_qparams = QParams(range=range_values, zero_point=min_values, num_bits=self.num_bits_weight) qweight = quantize(self.weight, qparams=weight_qparams) if not self.measure else self.weight if self.bias is not None: qbias = self.bias if self.measure else quantize(self.bias, num_bits=self.num_bits_weight + self.num_bits, flatten_dims=(0, -1)) else: qbias = None if not self.biprecision or self.num_bits_grad is None: output = F.conv2d(qinput, qweight, qbias, self.stride, self.padding, self.dilation, self.groups) if self.num_bits_grad is not None: output = UniformQuantizeGrad().apply(output, self.num_bits_grad, qparams, (1, -1), reduce_dim, dequantize, signed, stochastic) else: out1 = F.conv2d(qinput.detach(), qweight, qbias, self.stride, self.padding, self.dilation, self.groups) out2 = F.conv2d(qinput, qweight.detach(), qbias.detach() if qbias is not None else None, self.stride, self.padding, self.dilation, self.groups) out2 = quantize_grad(out2, num_bits=self.num_bits_grad, flatten_dims=(1, -1)) output = out1 + out2 - out1.detach() return output
CalibTIP
positive
@pytest.mark.django_db @pytest.mark.parametrize('lookup,value', [('', '2020-01-02 03:04:05'), ('year', 2020), ('quarter', 1), ('month', 'January'), ('month_start', '2020-01-01'), ('day', 2), ('week_day', 'Thursday'), ('hour', 3), ('minute', 4), ('second', 5), ('date', '2020-01-02')]) def test_all_datetime_functions(get_product_flat, lookup, value): models.Product.objects.create(producer=models.Producer.objects.create(), created_time=datetime(2020, 1, 2, 3, 4, 5, 6, tzinfo=UTC)) fields = f'created_time__{lookup}' if lookup else 'created_time' <DeepExtract> def helper(queries, *args, **kwargs): res = 1(queries, *args, **kwargs) if res['body'] == []: assert res['cols'] == [] assert res['rows'] == [] data = [] else: assert res['cols'] == [[]] data = [r + b for (r, b) in zip(res['rows'], res['body'][0])] data = helper </DeepExtract> assert data == [[value]]
@pytest.mark.django_db @pytest.mark.parametrize('lookup,value', [('', '2020-01-02 03:04:05'), ('year', 2020), ('quarter', 1), ('month', 'January'), ('month_start', '2020-01-01'), ('day', 2), ('week_day', 'Thursday'), ('hour', 3), ('minute', 4), ('second', 5), ('date', '2020-01-02')]) def test_all_datetime_functions(get_product_flat, lookup, value): models.Product.objects.create(producer=models.Producer.objects.create(), created_time=datetime(2020, 1, 2, 3, 4, 5, 6, tzinfo=UTC)) fields = f'created_time__{lookup}' if lookup else 'created_time' def helper(queries, *args, **kwargs): res = 1(queries, *args, **kwargs) if res['body'] == []: assert res['cols'] == [] assert res['rows'] == [] data = [] else: assert res['cols'] == [[]] data = [r + b for (r, b) in zip(res['rows'], res['body'][0])] data = helper assert data == [[value]]
django-data-browser
positive
def format_results(self, results, jsonfile_prefix=None, **kwargs): """Format the results to json (standard format for COCO evaluation). Args: results (list): Testing results of the dataset. jsonfile_prefix (str | None): The prefix of json files. It includes the file path and the prefix of filename, e.g., "a/b/prefix". If not specified, a temp file will be created. Default: None. Returns: tuple: (result_files, tmp_dir), result_files is a dict containing the json filepaths, tmp_dir is the temporal directory created for saving json files when jsonfile_prefix is not specified. """ assert isinstance(results, list), 'results must be a list' assert len(results) == len(self), 'The length of results is not equal to the dataset len: {} != {}'.format(len(results), len(self)) if jsonfile_prefix is None: tmp_dir = tempfile.TemporaryDirectory() jsonfile_prefix = osp.join(tmp_dir.name, 'results') else: tmp_dir = None <DeepExtract> result_files = dict() if isinstance(results[0], list): json_results = self._det2json(results) result_files['bbox'] = '{}.{}.json'.format(jsonfile_prefix, 'bbox') result_files['proposal'] = '{}.{}.json'.format(jsonfile_prefix, 'bbox') mmcv.dump(json_results, result_files['bbox']) elif isinstance(results[0], tuple): json_results = self._segm2json(results) result_files['bbox'] = '{}.{}.json'.format(jsonfile_prefix, 'bbox') result_files['proposal'] = '{}.{}.json'.format(jsonfile_prefix, 'bbox') result_files['segm'] = '{}.{}.json'.format(jsonfile_prefix, 'segm') mmcv.dump(json_results[0], result_files['bbox']) mmcv.dump(json_results[1], result_files['segm']) elif isinstance(results[0], np.ndarray): json_results = self._proposal2json(results) result_files['proposal'] = '{}.{}.json'.format(jsonfile_prefix, 'proposal') mmcv.dump(json_results, result_files['proposal']) else: raise TypeError('invalid type of results') result_files = result_files </DeepExtract> return (result_files, tmp_dir)
def format_results(self, results, jsonfile_prefix=None, **kwargs): """Format the results to json (standard format for COCO evaluation). Args: results (list): Testing results of the dataset. jsonfile_prefix (str | None): The prefix of json files. It includes the file path and the prefix of filename, e.g., "a/b/prefix". If not specified, a temp file will be created. Default: None. Returns: tuple: (result_files, tmp_dir), result_files is a dict containing the json filepaths, tmp_dir is the temporal directory created for saving json files when jsonfile_prefix is not specified. """ assert isinstance(results, list), 'results must be a list' assert len(results) == len(self), 'The length of results is not equal to the dataset len: {} != {}'.format(len(results), len(self)) if jsonfile_prefix is None: tmp_dir = tempfile.TemporaryDirectory() jsonfile_prefix = osp.join(tmp_dir.name, 'results') else: tmp_dir = None result_files = dict() if isinstance(results[0], list): json_results = self._det2json(results) result_files['bbox'] = '{}.{}.json'.format(jsonfile_prefix, 'bbox') result_files['proposal'] = '{}.{}.json'.format(jsonfile_prefix, 'bbox') mmcv.dump(json_results, result_files['bbox']) elif isinstance(results[0], tuple): json_results = self._segm2json(results) result_files['bbox'] = '{}.{}.json'.format(jsonfile_prefix, 'bbox') result_files['proposal'] = '{}.{}.json'.format(jsonfile_prefix, 'bbox') result_files['segm'] = '{}.{}.json'.format(jsonfile_prefix, 'segm') mmcv.dump(json_results[0], result_files['bbox']) mmcv.dump(json_results[1], result_files['segm']) elif isinstance(results[0], np.ndarray): json_results = self._proposal2json(results) result_files['proposal'] = '{}.{}.json'.format(jsonfile_prefix, 'proposal') mmcv.dump(json_results, result_files['proposal']) else: raise TypeError('invalid type of results') result_files = result_files return (result_files, tmp_dir)
D2Det
positive
def _add_metadata(ret_dict, this_file, columns, sample0, sample1, is_edge): """Read metadata from a single file and add it to `ret_dict`. Parameters ---------- ret_dict : OrderedDict Dictionary to which metadata will be added. this_file : string Full path to the file from which metadata will be read. columns : None | string | list of strings A string or list of strings giving the field/column name of metadata to return. If None, all available columns will be read. sample0 : int Sample index for start of read, given in the number of samples since the epoch (time_since_epoch*sample_rate). sample1 : int Sample index for end of read (inclusive), given in the number of samples since the epoch (time_since_epoch*sample_rate). is_edge : bool If True, then this is the first or last file in a sequence spanning indices from `sample0` to `sample1` and those read boundaries must be taken into account. If False, all samples from the file will be read ignoring `sample0` and `sample1`. """ try: with h5py.File(this_file, 'r') as f: keys = list(f.keys()) idxs = np.fromiter(keys, np.int64, count=len(keys)) idxs.sort() if is_edge: valid = np.logical_and(idxs >= sample0, idxs <= sample1) idxs = idxs[valid] for idx in idxs: value = f[str(idx)] if columns is None: <DeepExtract> if isinstance(value, h5py.Dataset): val = value[()] if isinstance(val, np.generic): val = val.item() elif isinstance(val, bytes): try: val = val.decode() except UnicodeDecodeError: pass ret_dict[idx] = val else: ret_dict[idx] = {} for (key, value) in value.items(): self._populate_data(ret_dict[idx], value, key) </DeepExtract> elif isinstance(columns, six.string_types): <DeepExtract> if isinstance(value[columns], h5py.Dataset): val = value[columns][()] if isinstance(val, np.generic): val = val.item() elif isinstance(val, bytes): try: val = val.decode() except UnicodeDecodeError: pass ret_dict[idx] = val else: ret_dict[idx] = {} for (key, value) in value[columns].items(): self._populate_data(ret_dict[idx], value, key) </DeepExtract> else: ret_dict[idx] = {} for column in columns: <DeepExtract> if isinstance(value[column], h5py.Dataset): val = value[column][()] if isinstance(val, np.generic): val = val.item() elif isinstance(val, bytes): try: val = val.decode() except UnicodeDecodeError: pass ret_dict[idx][column] = val else: ret_dict[idx][column] = {} for (key, value) in value[column].items(): self._populate_data(ret_dict[idx][column], value, key) </DeepExtract> except IOError: if os.access(this_file, os.R_OK) and os.access(this_file, os.W_OK): if time.time() - os.path.getmtime(this_file) > self._file_cadence_secs: traceback.print_exc() errstr = 'WARNING: %s being deleted because it raised an error and is not new' print(errstr % this_file) os.remove(this_file)
def _add_metadata(ret_dict, this_file, columns, sample0, sample1, is_edge): """Read metadata from a single file and add it to `ret_dict`. Parameters ---------- ret_dict : OrderedDict Dictionary to which metadata will be added. this_file : string Full path to the file from which metadata will be read. columns : None | string | list of strings A string or list of strings giving the field/column name of metadata to return. If None, all available columns will be read. sample0 : int Sample index for start of read, given in the number of samples since the epoch (time_since_epoch*sample_rate). sample1 : int Sample index for end of read (inclusive), given in the number of samples since the epoch (time_since_epoch*sample_rate). is_edge : bool If True, then this is the first or last file in a sequence spanning indices from `sample0` to `sample1` and those read boundaries must be taken into account. If False, all samples from the file will be read ignoring `sample0` and `sample1`. """ try: with h5py.File(this_file, 'r') as f: keys = list(f.keys()) idxs = np.fromiter(keys, np.int64, count=len(keys)) idxs.sort() if is_edge: valid = np.logical_and(idxs >= sample0, idxs <= sample1) idxs = idxs[valid] for idx in idxs: value = f[str(idx)] if columns is None: if isinstance(value, h5py.Dataset): val = value[()] if isinstance(val, np.generic): val = val.item() elif isinstance(val, bytes): try: val = val.decode() except UnicodeDecodeError: pass ret_dict[idx] = val else: ret_dict[idx] = {} for (key, value) in value.items(): self._populate_data(ret_dict[idx], value, key) elif isinstance(columns, six.string_types): if isinstance(value[columns], h5py.Dataset): val = value[columns][()] if isinstance(val, np.generic): val = val.item() elif isinstance(val, bytes): try: val = val.decode() except UnicodeDecodeError: pass ret_dict[idx] = val else: ret_dict[idx] = {} for (key, value) in value[columns].items(): self._populate_data(ret_dict[idx], value, key) else: ret_dict[idx] = {} for column in columns: if isinstance(value[column], h5py.Dataset): val = value[column][()] if isinstance(val, np.generic): val = val.item() elif isinstance(val, bytes): try: val = val.decode() except UnicodeDecodeError: pass ret_dict[idx][column] = val else: ret_dict[idx][column] = {} for (key, value) in value[column].items(): self._populate_data(ret_dict[idx][column], value, key) except IOError: if os.access(this_file, os.R_OK) and os.access(this_file, os.W_OK): if time.time() - os.path.getmtime(this_file) > self._file_cadence_secs: traceback.print_exc() errstr = 'WARNING: %s being deleted because it raised an error and is not new' print(errstr % this_file) os.remove(this_file)
digital_rf
positive
def Start(self): """ Starts the playback. Called as a thread from dialog boxes """ self.paused = False wx.PostEvent(self.parentWin, JetStatusEvent(JetDefs.PST_PLAY, None)) FileKillClean(JetDefs.TEMP_JET_CONFIG_FILE) self.jet_file = JetFile(JetDefs.TEMP_JET_CONFIG_FILE, '') self.jet_file.AddSegment(self.segment.segname, self.segment.filename, self.segment.start, self.segment.end, self.segment.length, SegmentOutputFile(self.segment.segname, JetDefs.TEMP_JET_CONFIG_FILE), self.segment.quantize, self.segment.jetevents, self.segment.dlsfile, None, self.segment.transpose, self.segment.repeat, self.segment.mute_flags) userID = 0 dls_num = -1 seg_num = 0 if len(self.segment.dlsfile) > 0: self.jet_file.libraries.append(self.segment.dlsfile) dls_num = 0 self.jet_file.SaveJetConfig(JetDefs.TEMP_JET_CONFIG_FILE) self.jet_file.WriteJetFileFromConfig(JetDefs.TEMP_JET_CONFIG_FILE) if not ValidateConfig(self.jet_file): return self.queueSegs = [] self.queueSegs.append(QueueSeg(self.segment.segname, userID, seg_num, dls_num, self.segment.repeat, self.segment.transpose, self.segment.mute_flags)) self.jet = JET() self.jet.eas.StartWave() self.jet.OpenFile(self.jet_file.config.filename) index = 0 Queue(self.jet, self.queueSegs[index]) index += 1 self.jet.Play() <DeepExtract> with self.playerLock: self.keepPlaying = True </DeepExtract> while self.GetKeepPlayingFlag(): self.jet.Render() status = self.jet.Status() if status.numQueuedSegments == 0: break self.jet.GetAppEvent() if index < len(self.queueSegs) and status.numQueuedSegments < 2: Queue(self.jet, self.queueSegs[index]) index += 1 wx.PostEvent(self.parentWin, JetStatusEvent(JetDefs.PST_UPD_LOCATION, status.location)) SafeJetShutdown(self.playerLock, self.jet) FileKillClean(SegmentOutputFile(self.segment.segname, JetDefs.TEMP_JET_CONFIG_FILE)) FileKillClean(JetDefs.TEMP_JET_CONFIG_FILE) FileKillClean(self.jet_file.config.filename) <DeepExtract> with self.playerLock: self.keepPlaying = False </DeepExtract> wx.PostEvent(self.parentWin, JetStatusEvent(JetDefs.PST_DONE, None)) wx.PostEvent(self.parentWin, JetStatusEvent(JetDefs.PST_UPD_LOCATION, 0))
def Start(self): """ Starts the playback. Called as a thread from dialog boxes """ self.paused = False wx.PostEvent(self.parentWin, JetStatusEvent(JetDefs.PST_PLAY, None)) FileKillClean(JetDefs.TEMP_JET_CONFIG_FILE) self.jet_file = JetFile(JetDefs.TEMP_JET_CONFIG_FILE, '') self.jet_file.AddSegment(self.segment.segname, self.segment.filename, self.segment.start, self.segment.end, self.segment.length, SegmentOutputFile(self.segment.segname, JetDefs.TEMP_JET_CONFIG_FILE), self.segment.quantize, self.segment.jetevents, self.segment.dlsfile, None, self.segment.transpose, self.segment.repeat, self.segment.mute_flags) userID = 0 dls_num = -1 seg_num = 0 if len(self.segment.dlsfile) > 0: self.jet_file.libraries.append(self.segment.dlsfile) dls_num = 0 self.jet_file.SaveJetConfig(JetDefs.TEMP_JET_CONFIG_FILE) self.jet_file.WriteJetFileFromConfig(JetDefs.TEMP_JET_CONFIG_FILE) if not ValidateConfig(self.jet_file): return self.queueSegs = [] self.queueSegs.append(QueueSeg(self.segment.segname, userID, seg_num, dls_num, self.segment.repeat, self.segment.transpose, self.segment.mute_flags)) self.jet = JET() self.jet.eas.StartWave() self.jet.OpenFile(self.jet_file.config.filename) index = 0 Queue(self.jet, self.queueSegs[index]) index += 1 self.jet.Play() with self.playerLock: self.keepPlaying = True while self.GetKeepPlayingFlag(): self.jet.Render() status = self.jet.Status() if status.numQueuedSegments == 0: break self.jet.GetAppEvent() if index < len(self.queueSegs) and status.numQueuedSegments < 2: Queue(self.jet, self.queueSegs[index]) index += 1 wx.PostEvent(self.parentWin, JetStatusEvent(JetDefs.PST_UPD_LOCATION, status.location)) SafeJetShutdown(self.playerLock, self.jet) FileKillClean(SegmentOutputFile(self.segment.segname, JetDefs.TEMP_JET_CONFIG_FILE)) FileKillClean(JetDefs.TEMP_JET_CONFIG_FILE) FileKillClean(self.jet_file.config.filename) with self.playerLock: self.keepPlaying = False wx.PostEvent(self.parentWin, JetStatusEvent(JetDefs.PST_DONE, None)) wx.PostEvent(self.parentWin, JetStatusEvent(JetDefs.PST_UPD_LOCATION, 0))
Browsers
positive
def install_update(github_slug, current_version): """If a newer release is available, download and install it :param github_slug: ``username/repo`` for workflow's GitHub repo :param current_version: the currently installed version of the workflow. :ref:`Semantic versioning <semver>` is required. :type current_version: ``unicode`` If an update is available, it will be downloaded and installed. :returns: ``True`` if an update is installed, else ``False`` """ update_data = wf().cached_data('__workflow_update_status', max_age=0) if not update_data or not update_data.get('available'): wf().logger.info('No update available') return False <DeepExtract> filename = update_data['download_url'].split('/')[-1] if not update_data['download_url'].endswith('.alfredworkflow') or not filename.endswith('.alfredworkflow'): raise ValueError('Attachment `{}` not a workflow'.format(filename)) local_path = os.path.join(tempfile.gettempdir(), filename) wf().logger.debug('Downloading updated workflow from `{0}` to `{1}` ...'.format(update_data['download_url'], local_path)) response = web.get(update_data['download_url']) with open(local_path, 'wb') as output: output.write(response.content) local_file = local_path </DeepExtract> wf().logger.info('Installing updated workflow ...') subprocess.call(['open', local_file]) update_data['available'] = False wf().cache_data('__workflow_update_status', update_data) return True
def install_update(github_slug, current_version): """If a newer release is available, download and install it :param github_slug: ``username/repo`` for workflow's GitHub repo :param current_version: the currently installed version of the workflow. :ref:`Semantic versioning <semver>` is required. :type current_version: ``unicode`` If an update is available, it will be downloaded and installed. :returns: ``True`` if an update is installed, else ``False`` """ update_data = wf().cached_data('__workflow_update_status', max_age=0) if not update_data or not update_data.get('available'): wf().logger.info('No update available') return False filename = update_data['download_url'].split('/')[-1] if not update_data['download_url'].endswith('.alfredworkflow') or not filename.endswith('.alfredworkflow'): raise ValueError('Attachment `{}` not a workflow'.format(filename)) local_path = os.path.join(tempfile.gettempdir(), filename) wf().logger.debug('Downloading updated workflow from `{0}` to `{1}` ...'.format(update_data['download_url'], local_path)) response = web.get(update_data['download_url']) with open(local_path, 'wb') as output: output.write(response.content) local_file = local_path wf().logger.info('Installing updated workflow ...') subprocess.call(['open', local_file]) update_data['available'] = False wf().cache_data('__workflow_update_status', update_data) return True
alfred-rates
positive
def fetch_teams_data(self) -> None: _logger.debug('Fetching Teams data...') for team in self.teams: team.fetch_team_data() <DeepExtract> _logger.debug('Fetching Cognito External IdP data...') client = boto3_client(service_name='cognito-idp') response: Dict[str, Any] = client.describe_user_pool(UserPoolId=self.user_pool_id) domain: str = response['UserPool'].get('Domain') self.cognito_external_provider_domain = f'{domain}.auth.{self.region}.amazoncognito.com' _logger.debug('cognito_external_provider_domain: %s', self.cognito_external_provider_domain) response = client.describe_user_pool_client(UserPoolId=self.user_pool_id, ClientId=self.user_pool_client_id) self.cognito_external_provider_redirect = response['UserPoolClient']['CallbackURLs'][0] _logger.debug('cognito_external_provider_redirect: %s', self.cognito_external_provider_redirect) _logger.debug('Cognito External IdP data fetched successfully.') </DeepExtract> _logger.debug('Env data fetched successfully.')
def fetch_teams_data(self) -> None: _logger.debug('Fetching Teams data...') for team in self.teams: team.fetch_team_data() _logger.debug('Fetching Cognito External IdP data...') client = boto3_client(service_name='cognito-idp') response: Dict[str, Any] = client.describe_user_pool(UserPoolId=self.user_pool_id) domain: str = response['UserPool'].get('Domain') self.cognito_external_provider_domain = f'{domain}.auth.{self.region}.amazoncognito.com' _logger.debug('cognito_external_provider_domain: %s', self.cognito_external_provider_domain) response = client.describe_user_pool_client(UserPoolId=self.user_pool_id, ClientId=self.user_pool_client_id) self.cognito_external_provider_redirect = response['UserPoolClient']['CallbackURLs'][0] _logger.debug('cognito_external_provider_redirect: %s', self.cognito_external_provider_redirect) _logger.debug('Cognito External IdP data fetched successfully.') _logger.debug('Env data fetched successfully.')
aws-orbit-workbench
positive
def pipeline(root): """Beam pipeline for preprocessing Kepler events.""" config_json = json.dumps(config, indent=2) root | beam.Create([config_json]) | 'write_config' >> beam.io.WriteToText(os.path.join(FLAGS.output_dir, 'config.json'), num_shards=1, shard_name_template='') <DeepExtract> with tf.gfile.Open(config.input_event_csv_file) as f: events = pd.read_csv(f, comment='#') events.tce_duration /= 24 logging.info('Read event table with %d rows.', len(events)) events['tce_id'] = events.apply(lambda event: '%d_%d' % (event.kepid, event.tce_plnt_num), axis=1) if len(set(events['tce_id'])) != len(events): raise ValueError('TCE ids are not unique.') for (column, whitelist) in config.column_value_whitelists.items(): allowed_events = events[column].isin(whitelist) events = events[allowed_events] logging.info('Filtered to %d events satisfying whitelists %s', len(events), config.column_value_whitelists) events = events </DeepExtract> read_light_curve = light_curve_fns.ReadLightCurveDoFn(config.kepler_data_dir, injected_group=config.injected_group, scramble_type=config.scramble_type, invert=config.invert_light_curves) process_light_curve = light_curve_fns.ProcessLightCurveDoFn(gap_width=config.gap_width, normalize_method=config.normalize_method, normalize_args=config.normalize_args, upward_outlier_sigma_cut=config.upward_outlier_sigma_cut, remove_events_width_factor=config.remove_events_width_factor) generate_example = GenerateExampleDoFn() partition_fn = utils.TrainValTestPartitionFn(key_name='tce_id', partitions={'train': 0.8, 'val': 0.1, 'test': 0.1}, keys=events.tce_id.values) <DeepExtract> def _prepare_event(event): """Maps an event to a dict of pipeline inputs.""" kepler_id = event['kepid'] tce_id = '%d_%d' % (kepler_id, event['tce_plnt_num']) result = {'kepler_id': kepler_id, 'tce_id': tce_id, 'event': event} if config.remove_event_for_spline: result['events_to_mask_for_spline'] = [light_curve_pb2.PeriodicEvent(period=event.tce_period, t0=event.tce_time0bk, duration=event.tce_duration)] pipeline_inputs = result pipeline_inputs = [_prepare_event(event) for (_, event) in events.iterrows()] </DeepExtract> results = root | 'create_pcollection' >> beam.Create(pipeline_inputs) | 'read_light_curves' >> beam.ParDo(read_light_curve) | 'process_light_curves' >> beam.ParDo(process_light_curve) | 'generate_examples' >> beam.ParDo(generate_example) | 'reshuffle' >> beam.Reshuffle() | 'partition_results' >> beam.Partition(partition_fn, partition_fn.num_partitions) for (name, subset) in zip(partition_fn.partition_names, results): if name == 'train': num_shards = FLAGS.num_shards_train elif name == 'val': num_shards = FLAGS.num_shards_val elif name == 'test': num_shards = FLAGS.num_shards_test else: raise ValueError('Unrecognized subset name: %s' % name) utils.write_to_tfrecord(subset, output_dir=FLAGS.output_dir, output_name=name, value_name='example', value_coder=beam.coders.ProtoCoder(tf.train.Example), num_shards=num_shards)
def pipeline(root): """Beam pipeline for preprocessing Kepler events.""" config_json = json.dumps(config, indent=2) root | beam.Create([config_json]) | 'write_config' >> beam.io.WriteToText(os.path.join(FLAGS.output_dir, 'config.json'), num_shards=1, shard_name_template='') with tf.gfile.Open(config.input_event_csv_file) as f: events = pd.read_csv(f, comment='#') events.tce_duration /= 24 logging.info('Read event table with %d rows.', len(events)) events['tce_id'] = events.apply(lambda event: '%d_%d' % (event.kepid, event.tce_plnt_num), axis=1) if len(set(events['tce_id'])) != len(events): raise ValueError('TCE ids are not unique.') for (column, whitelist) in config.column_value_whitelists.items(): allowed_events = events[column].isin(whitelist) events = events[allowed_events] logging.info('Filtered to %d events satisfying whitelists %s', len(events), config.column_value_whitelists) events = events read_light_curve = light_curve_fns.ReadLightCurveDoFn(config.kepler_data_dir, injected_group=config.injected_group, scramble_type=config.scramble_type, invert=config.invert_light_curves) process_light_curve = light_curve_fns.ProcessLightCurveDoFn(gap_width=config.gap_width, normalize_method=config.normalize_method, normalize_args=config.normalize_args, upward_outlier_sigma_cut=config.upward_outlier_sigma_cut, remove_events_width_factor=config.remove_events_width_factor) generate_example = GenerateExampleDoFn() partition_fn = utils.TrainValTestPartitionFn(key_name='tce_id', partitions={'train': 0.8, 'val': 0.1, 'test': 0.1}, keys=events.tce_id.values) def _prepare_event(event): """Maps an event to a dict of pipeline inputs.""" kepler_id = event['kepid'] tce_id = '%d_%d' % (kepler_id, event['tce_plnt_num']) result = {'kepler_id': kepler_id, 'tce_id': tce_id, 'event': event} if config.remove_event_for_spline: result['events_to_mask_for_spline'] = [light_curve_pb2.PeriodicEvent(period=event.tce_period, t0=event.tce_time0bk, duration=event.tce_duration)] pipeline_inputs = result pipeline_inputs = [_prepare_event(event) for (_, event) in events.iterrows()] results = root | 'create_pcollection' >> beam.Create(pipeline_inputs) | 'read_light_curves' >> beam.ParDo(read_light_curve) | 'process_light_curves' >> beam.ParDo(process_light_curve) | 'generate_examples' >> beam.ParDo(generate_example) | 'reshuffle' >> beam.Reshuffle() | 'partition_results' >> beam.Partition(partition_fn, partition_fn.num_partitions) for (name, subset) in zip(partition_fn.partition_names, results): if name == 'train': num_shards = FLAGS.num_shards_train elif name == 'val': num_shards = FLAGS.num_shards_val elif name == 'test': num_shards = FLAGS.num_shards_test else: raise ValueError('Unrecognized subset name: %s' % name) utils.write_to_tfrecord(subset, output_dir=FLAGS.output_dir, output_name=name, value_name='example', value_coder=beam.coders.ProtoCoder(tf.train.Example), num_shards=num_shards)
exoplanet-ml
positive
def test_interpolate_3d_cubic_extrapolate_linear_xinfymidzsup(self): """3D cubic interpolation. Test values in the extrapolation area with x below and y inside and z above the interpolation area. """ <DeepExtract> if x is None: x = self.x if y is None: y = self.y if z is None: z = self.z if data is None: data = self.data self.interp_data = data_file.cubic_interpolated_data self.extrap_data_nea = data_file.cubic_nearest_extrapolated_data self.extrap_data_lin = data_file.cubic_linear_extrapolated_data self.extrap_data_qua = data_file.cubic_quadratic_extrapolated_data self.interp_func = interpolators3d.Interpolate3DCubic(x, y, z, data, extrapolate=True, extrapolation_range=10, extrapolation_type='linear', tolerate_single_value=tolerate_single_value) </DeepExtract> <DeepExtract> (mini, maxi) = self.extrapol_xdomains[0] (minj, maxj) = self.extrapol_ydomains[1] (mink, maxk) = self.extrapol_zdomains[2] for iex in range(mini, maxi): for jex in range(minj, maxj): for kex in range(mink, maxk): self.assertAlmostEqual(self.interp_func(self.xsamples_ex[iex], self.ysamples_ex[jex], self.zsamples_ex[kex]), self.extrap_data_lin[0][1][2][iex - mini, jex - minj, kex - mink], delta=1e-08) </DeepExtract>
def test_interpolate_3d_cubic_extrapolate_linear_xinfymidzsup(self): """3D cubic interpolation. Test values in the extrapolation area with x below and y inside and z above the interpolation area. """ if x is None: x = self.x if y is None: y = self.y if z is None: z = self.z if data is None: data = self.data self.interp_data = data_file.cubic_interpolated_data self.extrap_data_nea = data_file.cubic_nearest_extrapolated_data self.extrap_data_lin = data_file.cubic_linear_extrapolated_data self.extrap_data_qua = data_file.cubic_quadratic_extrapolated_data self.interp_func = interpolators3d.Interpolate3DCubic(x, y, z, data, extrapolate=True, extrapolation_range=10, extrapolation_type='linear', tolerate_single_value=tolerate_single_value) (mini, maxi) = self.extrapol_xdomains[0] (minj, maxj) = self.extrapol_ydomains[1] (mink, maxk) = self.extrapol_zdomains[2] for iex in range(mini, maxi): for jex in range(minj, maxj): for kex in range(mink, maxk): self.assertAlmostEqual(self.interp_func(self.xsamples_ex[iex], self.ysamples_ex[jex], self.zsamples_ex[kex]), self.extrap_data_lin[0][1][2][iex - mini, jex - minj, kex - mink], delta=1e-08) </DeepExtract>
core
positive
def hide_groups(self, group, hide): <DeepExtract> groups = {'Chain Bones': [self.bones[0].source, self.bones[1].source], 'Gizmo Bones': [self.bones[0].gizmo, self.bones[1].gizmo], 'Mechanic Bones': [self.bones[0].stretch, self.bones[1].stretch], 'Offset Bones': [self.target.offset], 'Control Bones': [self.target.source], 'Floor Targets': [self.floor.bone], 'Kinematic Targets': [self.target.bone, self.pole.bone]} bone_groups = groups </DeepExtract> bones = self.id_data.data.edit_bones if self.id_data.mode == 'EDIT' else self.id_data.data.bones if group in bone_groups: for name in bone_groups[group]: bone = bones.get(name) if bone: bone.hide = hide
def hide_groups(self, group, hide): groups = {'Chain Bones': [self.bones[0].source, self.bones[1].source], 'Gizmo Bones': [self.bones[0].gizmo, self.bones[1].gizmo], 'Mechanic Bones': [self.bones[0].stretch, self.bones[1].stretch], 'Offset Bones': [self.target.offset], 'Control Bones': [self.target.source], 'Floor Targets': [self.floor.bone], 'Kinematic Targets': [self.target.bone, self.pole.bone]} bone_groups = groups bones = self.id_data.data.edit_bones if self.id_data.mode == 'EDIT' else self.id_data.data.bones if group in bone_groups: for name in bone_groups[group]: bone = bones.get(name) if bone: bone.hide = hide
B.L.E.N.D
positive
def load_config(self, config, path): """load_config(config, path) - Load a config object from an alternate path.""" if self.debug: <DeepExtract> self._write_message('note', 'load_config from %r' % path) </DeepExtract> config.load_from_path(path, self) return config
def load_config(self, config, path): """load_config(config, path) - Load a config object from an alternate path.""" if self.debug: self._write_message('note', 'load_config from %r' % path) config.load_from_path(path, self) return config
alive-nj
positive
def get_bsp_from_alist(alist_path, tcoh=5.0, tav='scan', phaseType='resid_phas', typeA='alist', band='', data_int_time=1.0): if typeA == 'alist': alist = hops.read_alist(alist_path) elif typeA == 'pickle': alist = pd.read_pickle(alist_path) if data_int_time < tcoh: <DeepExtract> if 'scan_no_tot' not in alist.columns: alist.loc[:, 'scan_no_tot'] = alist.loc[:, 'scan_id'] if 'sigma' not in alist.columns: alist.loc[:, 'sigma'] = alist.loc[:, 'amp'] / alist.loc[:, 'snr'] if 'std' not in alist.columns: alist.loc[:, 'std'] = alist.loc[:, 'sigma'] if 'band' not in alist.columns: alist.loc[:, 'band'] = ['unknown'] * np.shape(alist)[0] alist['track'] = list(map(lambda x: a2a.expt2track[x], alist['expt_no'])) alist['round_time'] = list(map(lambda x: np.round((x - datetime.datetime(2017, 4, 4)).total_seconds() / tcoh), alist['datetime'])) alist['vis'] = alist['vis'] = alist['amp'] * np.exp(1j * alist[phaseType] * np.pi / 180) if 'snr' in alist.columns: alist = alist[['datetime', 'band', 'baseline', 'source', 'polarization', 'vis', 'snr', 'std', 'sigma', 'track', 'expt_no', 'scan_no_tot', 'round_time']] alist = alist.groupby(('baseline', 'band', 'source', 'polarization', 'track', 'expt_no', 'scan_no_tot', 'round_time')).agg({'datetime': 'min', 'vis': np.mean, 'sigma': lambda x: np.sqrt(np.sum(x ** 2)) / len(x), 'std': lambda x: np.sqrt(np.sum(x ** 2)) / len(x), 'snr': lambda x: np.sqrt(np.sum(x ** 2))}) else: alist = alist[['datetime', 'band', 'baseline', 'source', 'polarization', 'vis', 'std', 'sigma', 'track', 'expt_no', 'scan_no_tot', 'round_time']] alist = alist.groupby(('baseline', 'band', 'source', 'polarization', 'track', 'expt_no', 'scan_no_tot', 'round_time')).agg({'datetime': 'min', 'vis': np.mean, 'sigma': lambda x: np.sqrt(np.sum(x ** 2)) / len(x), 'std': lambda x: np.sqrt(np.sum(x ** 2)) / len(x)}) alist = alist.reset_index() alist['amp'] = np.abs(alist['vis']) alist[phaseType] = np.angle(alist['vis']) * 180 / np.pi if 'snr' in alist.columns: alist = alist[['datetime', 'band', 'baseline', 'source', 'polarization', 'amp', phaseType, 'snr', 'std', 'sigma', 'track', 'expt_no', 'scan_no_tot']] else: alist = alist[['datetime', 'band', 'baseline', 'source', 'polarization', 'amp', phaseType, 'std', 'sigma', 'track', 'expt_no', 'scan_no_tot']] alist = alist </DeepExtract> <DeepExtract> if match_by_scan: alist = ut.coh_avg_vis(alist, tavg='scan', phase_type=phaseType) if 'snr' not in alist.columns: alist.loc[:, 'snr'] = alist.loc[:, 'amp'] / alist.loc[:, 'sigma'] if debias_snr == True: foo = np.maximum(np.asarray(alist['snr']) ** 2 - 1, 0) alist['snr'] = np.sqrt(foo) alist.drop(list(alist[alist.snr < snr_cut].index.values), inplace=True) alist = alist[alist['polarization'] == 'LL'] alist = alist.loc[:, ~alist.columns.duplicated()] if 'scan_id' not in alist.columns: alist.loc[:, 'scan_id'] = alist.loc[:, 'scan_no_tot'] if 'band' not in alist.columns: alist.loc[:, 'band'] = np.nan alist['amps'] = alist['amp'] alist['snrs'] = alist['snr'] if 'fracpol' in alist.columns: alist['fracpols'] = alist['fracpol'] else: alist['fracpols'] = 0 triL = list_all_triangles(alist) (tri_baseL, sgnL) = triangles2baselines(triL, alist) triL = baselines2triangles(tri_baseL) (tri_baseL, sgnL) = triangles2baselines(triL, alist) bsp_out = pd.DataFrame({}) for cou in range(len(triL)): Tri = tri_baseL[cou] if verbose: print(Tri) signat = sgnL[cou] condB1 = alist['baseline'] == Tri[0] condB2 = alist['baseline'] == Tri[1] condB3 = alist['baseline'] == Tri[2] condB = condB1 | condB2 | condB3 alist_Tri = alist.loc[condB, ['expt_no', 'scan_id', 'source', 'datetime', 'baseline', phaseType, 'amp', 'snr', 'gmst', 'band', 'amps', 'snrs', 'fracpols']] if match_by_scan: tlist = alist_Tri.groupby(['band', 'scan_id']).filter(lambda x: len(x) == 3) else: tlist = alist_Tri.groupby(['band', 'datetime']).filter(lambda x: len(x) == 3) tlist.loc[:, 'sigma'] = tlist.loc[:, 'amp'] / tlist.loc[:, 'snr'] for cou2 in range(3): tlist.loc[tlist.loc[:, 'baseline'] == Tri[cou2], phaseType] *= signat[cou2] * np.pi / 180.0 tlist.loc[:, 'sigma'] = 1.0 / tlist.loc[:, 'snr'] ** 2 if match_by_scan: bsp = tlist.groupby(['expt_no', 'source', 'band', 'scan_id']).agg({phaseType: lambda x: np.sum(x), 'amp': lambda x: np.prod(x), 'sigma': lambda x: np.sqrt(np.sum(x)), 'amps': lambda x: tuple(x), 'snrs': lambda x: tuple(x), 'fracpols': lambda x: tuple(x), 'datetime': min}) else: bsp = tlist.groupby(['expt_no', 'source', 'band', 'scan_id', 'datetime']).agg({phaseType: lambda x: np.sum(x), 'amp': lambda x: np.prod(x), 'sigma': lambda x: np.sqrt(np.sum(x)), 'amps': lambda x: tuple(x), 'snrs': lambda x: tuple(x), 'fracpols': lambda x: tuple(x)}) bsp.loc[:, 'bsp'] = bsp.loc[:, 'amp'] * np.exp(1j * bsp.loc[:, phaseType]) bsp.loc[:, 'snr'] = 1.0 / bsp.loc[:, 'sigma'] bsp.loc[:, 'sigma'] = bsp.loc[:, 'amp'] * bsp.loc[:, 'sigma'] bsp.loc[:, 'triangle'] = [triL[cou]] * np.shape(bsp)[0] bsp.loc[:, 'polarization'] = ['LL'] * np.shape(bsp)[0] bsp.loc[:, 'cphase'] = np.angle(bsp.loc[:, 'bsp']) * 180.0 / np.pi bsp.loc[:, 'amp'] = np.abs(bsp.loc[:, 'bsp']) bsp.loc[:, 'sigmaCP'] = 1.0 / bsp.loc[:, 'snr'] * 180.0 / np.pi bsp_out = pd.concat([bsp_out, bsp]) if verbose: print(triL[cou] + ': ' + str(np.shape(bsp)[0]) + ' closure phases') bsp_out = bsp_out.reset_index() bsp_out['rel_err'] = np.asarray(bsp_out['cphase']) / np.asarray(bsp_out['sigmaCP']) bsp_ll = bsp_out </DeepExtract> <DeepExtract> if match_by_scan: alist = ut.coh_avg_vis(alist, tavg='scan', phase_type=phaseType) if 'snr' not in alist.columns: alist.loc[:, 'snr'] = alist.loc[:, 'amp'] / alist.loc[:, 'sigma'] if debias_snr == True: foo = np.maximum(np.asarray(alist['snr']) ** 2 - 1, 0) alist['snr'] = np.sqrt(foo) alist.drop(list(alist[alist.snr < snr_cut].index.values), inplace=True) alist = alist[alist['polarization'] == 'RR'] alist = alist.loc[:, ~alist.columns.duplicated()] if 'scan_id' not in alist.columns: alist.loc[:, 'scan_id'] = alist.loc[:, 'scan_no_tot'] if 'band' not in alist.columns: alist.loc[:, 'band'] = np.nan alist['amps'] = alist['amp'] alist['snrs'] = alist['snr'] if 'fracpol' in alist.columns: alist['fracpols'] = alist['fracpol'] else: alist['fracpols'] = 0 triL = list_all_triangles(alist) (tri_baseL, sgnL) = triangles2baselines(triL, alist) triL = baselines2triangles(tri_baseL) (tri_baseL, sgnL) = triangles2baselines(triL, alist) bsp_out = pd.DataFrame({}) for cou in range(len(triL)): Tri = tri_baseL[cou] if verbose: print(Tri) signat = sgnL[cou] condB1 = alist['baseline'] == Tri[0] condB2 = alist['baseline'] == Tri[1] condB3 = alist['baseline'] == Tri[2] condB = condB1 | condB2 | condB3 alist_Tri = alist.loc[condB, ['expt_no', 'scan_id', 'source', 'datetime', 'baseline', phaseType, 'amp', 'snr', 'gmst', 'band', 'amps', 'snrs', 'fracpols']] if match_by_scan: tlist = alist_Tri.groupby(['band', 'scan_id']).filter(lambda x: len(x) == 3) else: tlist = alist_Tri.groupby(['band', 'datetime']).filter(lambda x: len(x) == 3) tlist.loc[:, 'sigma'] = tlist.loc[:, 'amp'] / tlist.loc[:, 'snr'] for cou2 in range(3): tlist.loc[tlist.loc[:, 'baseline'] == Tri[cou2], phaseType] *= signat[cou2] * np.pi / 180.0 tlist.loc[:, 'sigma'] = 1.0 / tlist.loc[:, 'snr'] ** 2 if match_by_scan: bsp = tlist.groupby(['expt_no', 'source', 'band', 'scan_id']).agg({phaseType: lambda x: np.sum(x), 'amp': lambda x: np.prod(x), 'sigma': lambda x: np.sqrt(np.sum(x)), 'amps': lambda x: tuple(x), 'snrs': lambda x: tuple(x), 'fracpols': lambda x: tuple(x), 'datetime': min}) else: bsp = tlist.groupby(['expt_no', 'source', 'band', 'scan_id', 'datetime']).agg({phaseType: lambda x: np.sum(x), 'amp': lambda x: np.prod(x), 'sigma': lambda x: np.sqrt(np.sum(x)), 'amps': lambda x: tuple(x), 'snrs': lambda x: tuple(x), 'fracpols': lambda x: tuple(x)}) bsp.loc[:, 'bsp'] = bsp.loc[:, 'amp'] * np.exp(1j * bsp.loc[:, phaseType]) bsp.loc[:, 'snr'] = 1.0 / bsp.loc[:, 'sigma'] bsp.loc[:, 'sigma'] = bsp.loc[:, 'amp'] * bsp.loc[:, 'sigma'] bsp.loc[:, 'triangle'] = [triL[cou]] * np.shape(bsp)[0] bsp.loc[:, 'polarization'] = ['RR'] * np.shape(bsp)[0] bsp.loc[:, 'cphase'] = np.angle(bsp.loc[:, 'bsp']) * 180.0 / np.pi bsp.loc[:, 'amp'] = np.abs(bsp.loc[:, 'bsp']) bsp.loc[:, 'sigmaCP'] = 1.0 / bsp.loc[:, 'snr'] * 180.0 / np.pi bsp_out = pd.concat([bsp_out, bsp]) if verbose: print(triL[cou] + ': ' + str(np.shape(bsp)[0]) + ' closure phases') bsp_out = bsp_out.reset_index() bsp_out['rel_err'] = np.asarray(bsp_out['cphase']) / np.asarray(bsp_out['sigmaCP']) bsp_rr = bsp_out </DeepExtract> bsp = pd.concat([bsp_ll, bsp_rr], ignore_index=True) <DeepExtract> bsp.loc[:, 'vis'] = bsp.loc[:, 'vis'] = bsp.loc[:, 'amp'] * np.exp(1j * bsp.loc[:, 'cphase'] * np.pi / 180) bsp.loc[:, 'circ_sigma'] = bsp.loc[:, 'cphase'] if 'band' not in bsp.columns: bsp.loc[:, 'band'] = ['unknown'] * np.shape(bsp)[0] if tav == 'scan': bsp = bsp[['datetime', 'band', 'triangle', 'source', 'polarization', 'vis', 'sigmaCP', 'snr', 'scan_id', 'expt_no', 'circ_sigma']] bsp = bsp.groupby(['triangle', 'band', 'source', 'polarization', 'expt_no', 'scan_id']).agg({'datetime': 'min', 'vis': np.mean, 'sigmaCP': lambda x: np.sqrt(np.sum(x ** 2)) / len(x), 'snr': lambda x: np.sqrt(np.sum(x ** 2)), 'circ_sigma': circular_std_of_mean_dif}) else: bsp.loc[:, 'round_time'] = list(map(lambda x: np.round((x - datetime.datetime(2017, 4, 4)).total_seconds() / tav), bsp.loc[:, 'datetime'])) bsp = bsp[['datetime', 'band', 'triangle', 'source', 'polarization', 'vis', 'sigma', 'sigmaCP', 'scan_id', 'expt_no', 'round_time', 'snr', 'circ_sigma']] bsp = bsp.groupby(['triangle', 'band', 'source', 'polarization', 'expt_no', 'scan_id', 'round_time']).agg({'datetime': 'min', 'vis': np.mean, 'sigmaCP': lambda x: np.sqrt(np.sum(x ** 2)) / len(x), 'snr': lambda x: np.sqrt(np.sum(x ** 2)), 'circ_sigma': circular_std_of_mean_dif}) bsp = bsp.reset_index() bsp['amp'] = np.abs(bsp['vis']) bsp['cphase'] = np.angle(bsp['vis']) * 180 / np.pi bsp = bsp[['datetime', 'band', 'triangle', 'source', 'polarization', 'amp', 'cphase', 'sigmaCP', 'snr', 'expt_no', 'scan_id', 'circ_sigma']] bsp_av = bsp </DeepExtract> <DeepExtract> bsp_av.loc[:, 'sigmaCP'] = bsp_av.loc[:, 'circ_sigma'] bsp_av = bsp_av </DeepExtract> <DeepExtract> if to_what == 'cphase': bsp_av['TotErr'] = np.mod(np.asarray(bsp_av['cphase']), 360.0) bsp_av['TotErr'] = np.minimum(np.asarray(bsp_av['TotErr']), np.abs(np.asarray(bsp_av['TotErr']) - 360.0)) bsp_av['RelErr'] = np.asarray(bsp_av['TotErr']) / np.asarray(bsp_av['sigmaCP']) elif to_what == 'amp': bsp_av['TotErr'] = bsp_av['amp'] bsp_av['RelErr'] = np.asarray(bsp_av['TotErr']) / np.asarray(bsp_av['sigma']) bsp_av = bsp_av </DeepExtract> if band != '': <DeepExtract> bsp_av['band'] = [band] * np.shape(bsp_av)[0] bsp_av = bsp_av </DeepExtract> return bsp_av
def get_bsp_from_alist(alist_path, tcoh=5.0, tav='scan', phaseType='resid_phas', typeA='alist', band='', data_int_time=1.0): if typeA == 'alist': alist = hops.read_alist(alist_path) elif typeA == 'pickle': alist = pd.read_pickle(alist_path) if data_int_time < tcoh: if 'scan_no_tot' not in alist.columns: alist.loc[:, 'scan_no_tot'] = alist.loc[:, 'scan_id'] if 'sigma' not in alist.columns: alist.loc[:, 'sigma'] = alist.loc[:, 'amp'] / alist.loc[:, 'snr'] if 'std' not in alist.columns: alist.loc[:, 'std'] = alist.loc[:, 'sigma'] if 'band' not in alist.columns: alist.loc[:, 'band'] = ['unknown'] * np.shape(alist)[0] alist['track'] = list(map(lambda x: a2a.expt2track[x], alist['expt_no'])) alist['round_time'] = list(map(lambda x: np.round((x - datetime.datetime(2017, 4, 4)).total_seconds() / tcoh), alist['datetime'])) alist['vis'] = alist['vis'] = alist['amp'] * np.exp(1j * alist[phaseType] * np.pi / 180) if 'snr' in alist.columns: alist = alist[['datetime', 'band', 'baseline', 'source', 'polarization', 'vis', 'snr', 'std', 'sigma', 'track', 'expt_no', 'scan_no_tot', 'round_time']] alist = alist.groupby(('baseline', 'band', 'source', 'polarization', 'track', 'expt_no', 'scan_no_tot', 'round_time')).agg({'datetime': 'min', 'vis': np.mean, 'sigma': lambda x: np.sqrt(np.sum(x ** 2)) / len(x), 'std': lambda x: np.sqrt(np.sum(x ** 2)) / len(x), 'snr': lambda x: np.sqrt(np.sum(x ** 2))}) else: alist = alist[['datetime', 'band', 'baseline', 'source', 'polarization', 'vis', 'std', 'sigma', 'track', 'expt_no', 'scan_no_tot', 'round_time']] alist = alist.groupby(('baseline', 'band', 'source', 'polarization', 'track', 'expt_no', 'scan_no_tot', 'round_time')).agg({'datetime': 'min', 'vis': np.mean, 'sigma': lambda x: np.sqrt(np.sum(x ** 2)) / len(x), 'std': lambda x: np.sqrt(np.sum(x ** 2)) / len(x)}) alist = alist.reset_index() alist['amp'] = np.abs(alist['vis']) alist[phaseType] = np.angle(alist['vis']) * 180 / np.pi if 'snr' in alist.columns: alist = alist[['datetime', 'band', 'baseline', 'source', 'polarization', 'amp', phaseType, 'snr', 'std', 'sigma', 'track', 'expt_no', 'scan_no_tot']] else: alist = alist[['datetime', 'band', 'baseline', 'source', 'polarization', 'amp', phaseType, 'std', 'sigma', 'track', 'expt_no', 'scan_no_tot']] alist = alist if match_by_scan: alist = ut.coh_avg_vis(alist, tavg='scan', phase_type=phaseType) if 'snr' not in alist.columns: alist.loc[:, 'snr'] = alist.loc[:, 'amp'] / alist.loc[:, 'sigma'] if debias_snr == True: foo = np.maximum(np.asarray(alist['snr']) ** 2 - 1, 0) alist['snr'] = np.sqrt(foo) alist.drop(list(alist[alist.snr < snr_cut].index.values), inplace=True) alist = alist[alist['polarization'] == 'LL'] alist = alist.loc[:, ~alist.columns.duplicated()] if 'scan_id' not in alist.columns: alist.loc[:, 'scan_id'] = alist.loc[:, 'scan_no_tot'] if 'band' not in alist.columns: alist.loc[:, 'band'] = np.nan alist['amps'] = alist['amp'] alist['snrs'] = alist['snr'] if 'fracpol' in alist.columns: alist['fracpols'] = alist['fracpol'] else: alist['fracpols'] = 0 triL = list_all_triangles(alist) (tri_baseL, sgnL) = triangles2baselines(triL, alist) triL = baselines2triangles(tri_baseL) (tri_baseL, sgnL) = triangles2baselines(triL, alist) bsp_out = pd.DataFrame({}) for cou in range(len(triL)): Tri = tri_baseL[cou] if verbose: print(Tri) signat = sgnL[cou] condB1 = alist['baseline'] == Tri[0] condB2 = alist['baseline'] == Tri[1] condB3 = alist['baseline'] == Tri[2] condB = condB1 | condB2 | condB3 alist_Tri = alist.loc[condB, ['expt_no', 'scan_id', 'source', 'datetime', 'baseline', phaseType, 'amp', 'snr', 'gmst', 'band', 'amps', 'snrs', 'fracpols']] if match_by_scan: tlist = alist_Tri.groupby(['band', 'scan_id']).filter(lambda x: len(x) == 3) else: tlist = alist_Tri.groupby(['band', 'datetime']).filter(lambda x: len(x) == 3) tlist.loc[:, 'sigma'] = tlist.loc[:, 'amp'] / tlist.loc[:, 'snr'] for cou2 in range(3): tlist.loc[tlist.loc[:, 'baseline'] == Tri[cou2], phaseType] *= signat[cou2] * np.pi / 180.0 tlist.loc[:, 'sigma'] = 1.0 / tlist.loc[:, 'snr'] ** 2 if match_by_scan: bsp = tlist.groupby(['expt_no', 'source', 'band', 'scan_id']).agg({phaseType: lambda x: np.sum(x), 'amp': lambda x: np.prod(x), 'sigma': lambda x: np.sqrt(np.sum(x)), 'amps': lambda x: tuple(x), 'snrs': lambda x: tuple(x), 'fracpols': lambda x: tuple(x), 'datetime': min}) else: bsp = tlist.groupby(['expt_no', 'source', 'band', 'scan_id', 'datetime']).agg({phaseType: lambda x: np.sum(x), 'amp': lambda x: np.prod(x), 'sigma': lambda x: np.sqrt(np.sum(x)), 'amps': lambda x: tuple(x), 'snrs': lambda x: tuple(x), 'fracpols': lambda x: tuple(x)}) bsp.loc[:, 'bsp'] = bsp.loc[:, 'amp'] * np.exp(1j * bsp.loc[:, phaseType]) bsp.loc[:, 'snr'] = 1.0 / bsp.loc[:, 'sigma'] bsp.loc[:, 'sigma'] = bsp.loc[:, 'amp'] * bsp.loc[:, 'sigma'] bsp.loc[:, 'triangle'] = [triL[cou]] * np.shape(bsp)[0] bsp.loc[:, 'polarization'] = ['LL'] * np.shape(bsp)[0] bsp.loc[:, 'cphase'] = np.angle(bsp.loc[:, 'bsp']) * 180.0 / np.pi bsp.loc[:, 'amp'] = np.abs(bsp.loc[:, 'bsp']) bsp.loc[:, 'sigmaCP'] = 1.0 / bsp.loc[:, 'snr'] * 180.0 / np.pi bsp_out = pd.concat([bsp_out, bsp]) if verbose: print(triL[cou] + ': ' + str(np.shape(bsp)[0]) + ' closure phases') bsp_out = bsp_out.reset_index() bsp_out['rel_err'] = np.asarray(bsp_out['cphase']) / np.asarray(bsp_out['sigmaCP']) bsp_ll = bsp_out if match_by_scan: alist = ut.coh_avg_vis(alist, tavg='scan', phase_type=phaseType) if 'snr' not in alist.columns: alist.loc[:, 'snr'] = alist.loc[:, 'amp'] / alist.loc[:, 'sigma'] if debias_snr == True: foo = np.maximum(np.asarray(alist['snr']) ** 2 - 1, 0) alist['snr'] = np.sqrt(foo) alist.drop(list(alist[alist.snr < snr_cut].index.values), inplace=True) alist = alist[alist['polarization'] == 'RR'] alist = alist.loc[:, ~alist.columns.duplicated()] if 'scan_id' not in alist.columns: alist.loc[:, 'scan_id'] = alist.loc[:, 'scan_no_tot'] if 'band' not in alist.columns: alist.loc[:, 'band'] = np.nan alist['amps'] = alist['amp'] alist['snrs'] = alist['snr'] if 'fracpol' in alist.columns: alist['fracpols'] = alist['fracpol'] else: alist['fracpols'] = 0 triL = list_all_triangles(alist) (tri_baseL, sgnL) = triangles2baselines(triL, alist) triL = baselines2triangles(tri_baseL) (tri_baseL, sgnL) = triangles2baselines(triL, alist) bsp_out = pd.DataFrame({}) for cou in range(len(triL)): Tri = tri_baseL[cou] if verbose: print(Tri) signat = sgnL[cou] condB1 = alist['baseline'] == Tri[0] condB2 = alist['baseline'] == Tri[1] condB3 = alist['baseline'] == Tri[2] condB = condB1 | condB2 | condB3 alist_Tri = alist.loc[condB, ['expt_no', 'scan_id', 'source', 'datetime', 'baseline', phaseType, 'amp', 'snr', 'gmst', 'band', 'amps', 'snrs', 'fracpols']] if match_by_scan: tlist = alist_Tri.groupby(['band', 'scan_id']).filter(lambda x: len(x) == 3) else: tlist = alist_Tri.groupby(['band', 'datetime']).filter(lambda x: len(x) == 3) tlist.loc[:, 'sigma'] = tlist.loc[:, 'amp'] / tlist.loc[:, 'snr'] for cou2 in range(3): tlist.loc[tlist.loc[:, 'baseline'] == Tri[cou2], phaseType] *= signat[cou2] * np.pi / 180.0 tlist.loc[:, 'sigma'] = 1.0 / tlist.loc[:, 'snr'] ** 2 if match_by_scan: bsp = tlist.groupby(['expt_no', 'source', 'band', 'scan_id']).agg({phaseType: lambda x: np.sum(x), 'amp': lambda x: np.prod(x), 'sigma': lambda x: np.sqrt(np.sum(x)), 'amps': lambda x: tuple(x), 'snrs': lambda x: tuple(x), 'fracpols': lambda x: tuple(x), 'datetime': min}) else: bsp = tlist.groupby(['expt_no', 'source', 'band', 'scan_id', 'datetime']).agg({phaseType: lambda x: np.sum(x), 'amp': lambda x: np.prod(x), 'sigma': lambda x: np.sqrt(np.sum(x)), 'amps': lambda x: tuple(x), 'snrs': lambda x: tuple(x), 'fracpols': lambda x: tuple(x)}) bsp.loc[:, 'bsp'] = bsp.loc[:, 'amp'] * np.exp(1j * bsp.loc[:, phaseType]) bsp.loc[:, 'snr'] = 1.0 / bsp.loc[:, 'sigma'] bsp.loc[:, 'sigma'] = bsp.loc[:, 'amp'] * bsp.loc[:, 'sigma'] bsp.loc[:, 'triangle'] = [triL[cou]] * np.shape(bsp)[0] bsp.loc[:, 'polarization'] = ['RR'] * np.shape(bsp)[0] bsp.loc[:, 'cphase'] = np.angle(bsp.loc[:, 'bsp']) * 180.0 / np.pi bsp.loc[:, 'amp'] = np.abs(bsp.loc[:, 'bsp']) bsp.loc[:, 'sigmaCP'] = 1.0 / bsp.loc[:, 'snr'] * 180.0 / np.pi bsp_out = pd.concat([bsp_out, bsp]) if verbose: print(triL[cou] + ': ' + str(np.shape(bsp)[0]) + ' closure phases') bsp_out = bsp_out.reset_index() bsp_out['rel_err'] = np.asarray(bsp_out['cphase']) / np.asarray(bsp_out['sigmaCP']) bsp_rr = bsp_out bsp = pd.concat([bsp_ll, bsp_rr], ignore_index=True) bsp.loc[:, 'vis'] = bsp.loc[:, 'vis'] = bsp.loc[:, 'amp'] * np.exp(1j * bsp.loc[:, 'cphase'] * np.pi / 180) bsp.loc[:, 'circ_sigma'] = bsp.loc[:, 'cphase'] if 'band' not in bsp.columns: bsp.loc[:, 'band'] = ['unknown'] * np.shape(bsp)[0] if tav == 'scan': bsp = bsp[['datetime', 'band', 'triangle', 'source', 'polarization', 'vis', 'sigmaCP', 'snr', 'scan_id', 'expt_no', 'circ_sigma']] bsp = bsp.groupby(['triangle', 'band', 'source', 'polarization', 'expt_no', 'scan_id']).agg({'datetime': 'min', 'vis': np.mean, 'sigmaCP': lambda x: np.sqrt(np.sum(x ** 2)) / len(x), 'snr': lambda x: np.sqrt(np.sum(x ** 2)), 'circ_sigma': circular_std_of_mean_dif}) else: bsp.loc[:, 'round_time'] = list(map(lambda x: np.round((x - datetime.datetime(2017, 4, 4)).total_seconds() / tav), bsp.loc[:, 'datetime'])) bsp = bsp[['datetime', 'band', 'triangle', 'source', 'polarization', 'vis', 'sigma', 'sigmaCP', 'scan_id', 'expt_no', 'round_time', 'snr', 'circ_sigma']] bsp = bsp.groupby(['triangle', 'band', 'source', 'polarization', 'expt_no', 'scan_id', 'round_time']).agg({'datetime': 'min', 'vis': np.mean, 'sigmaCP': lambda x: np.sqrt(np.sum(x ** 2)) / len(x), 'snr': lambda x: np.sqrt(np.sum(x ** 2)), 'circ_sigma': circular_std_of_mean_dif}) bsp = bsp.reset_index() bsp['amp'] = np.abs(bsp['vis']) bsp['cphase'] = np.angle(bsp['vis']) * 180 / np.pi bsp = bsp[['datetime', 'band', 'triangle', 'source', 'polarization', 'amp', 'cphase', 'sigmaCP', 'snr', 'expt_no', 'scan_id', 'circ_sigma']] bsp_av = bsp bsp_av.loc[:, 'sigmaCP'] = bsp_av.loc[:, 'circ_sigma'] bsp_av = bsp_av if to_what == 'cphase': bsp_av['TotErr'] = np.mod(np.asarray(bsp_av['cphase']), 360.0) bsp_av['TotErr'] = np.minimum(np.asarray(bsp_av['TotErr']), np.abs(np.asarray(bsp_av['TotErr']) - 360.0)) bsp_av['RelErr'] = np.asarray(bsp_av['TotErr']) / np.asarray(bsp_av['sigmaCP']) elif to_what == 'amp': bsp_av['TotErr'] = bsp_av['amp'] bsp_av['RelErr'] = np.asarray(bsp_av['TotErr']) / np.asarray(bsp_av['sigma']) bsp_av = bsp_av if band != '': bsp_av['band'] = [band] * np.shape(bsp_av)[0] bsp_av = bsp_av return bsp_av
eat
positive
@contextlib.contextmanager def section(self, key): <DeepExtract> key = key.replace('-', '_') if self._dict_stack: stacktop = self._dict_stack[-1] else: stacktop = self if key not in stacktop: stacktop[key] = {} newtop = stacktop[key] self._dict_stack.append(newtop) </DeepExtract> yield None <DeepExtract> self._dict_stack.pop(-1) </DeepExtract>
@contextlib.contextmanager def section(self, key): key = key.replace('-', '_') if self._dict_stack: stacktop = self._dict_stack[-1] else: stacktop = self if key not in stacktop: stacktop[key] = {} newtop = stacktop[key] self._dict_stack.append(newtop) yield None self._dict_stack.pop(-1) </DeepExtract>
cmake_format
positive
def load_param(prefix, epoch, convert=False, ctx=None, process=False): """ wrapper for load checkpoint :param prefix: Prefix of model name. :param epoch: Epoch number of model we would like to load. :param convert: reference model should be converted to GPU NDArray first :param ctx: if convert then ctx must be designated. :param process: model should drop any test :return: (arg_params, aux_params) """ <DeepExtract> save_dict = mx.nd.load('%s-%04d.params' % (prefix, epoch)) arg_params = {} aux_params = {} for (k, v) in save_dict.items(): (tp, name) = k.split(':', 1) if tp == 'arg': arg_params[name] = v if tp == 'aux': aux_params[name] = v (arg_params, aux_params) = (arg_params, aux_params) </DeepExtract> if convert: if ctx is None: ctx = mx.cpu() <DeepExtract> new_params = dict() for (k, v) in arg_params.items(): new_params[k] = v.as_in_context(ctx) arg_params = new_params </DeepExtract> <DeepExtract> new_params = dict() for (k, v) in aux_params.items(): new_params[k] = v.as_in_context(ctx) aux_params = new_params </DeepExtract> if process: tests = [k for k in arg_params.keys() if '_test' in k] for test in tests: arg_params[test.replace('_test', '')] = arg_params.pop(test) return (arg_params, aux_params)
def load_param(prefix, epoch, convert=False, ctx=None, process=False): """ wrapper for load checkpoint :param prefix: Prefix of model name. :param epoch: Epoch number of model we would like to load. :param convert: reference model should be converted to GPU NDArray first :param ctx: if convert then ctx must be designated. :param process: model should drop any test :return: (arg_params, aux_params) """ save_dict = mx.nd.load('%s-%04d.params' % (prefix, epoch)) arg_params = {} aux_params = {} for (k, v) in save_dict.items(): (tp, name) = k.split(':', 1) if tp == 'arg': arg_params[name] = v if tp == 'aux': aux_params[name] = v (arg_params, aux_params) = (arg_params, aux_params) if convert: if ctx is None: ctx = mx.cpu() new_params = dict() for (k, v) in arg_params.items(): new_params[k] = v.as_in_context(ctx) arg_params = new_params new_params = dict() for (k, v) in aux_params.items(): new_params[k] = v.as_in_context(ctx) aux_params = new_params if process: tests = [k for k in arg_params.keys() if '_test' in k] for test in tests: arg_params[test.replace('_test', '')] = arg_params.pop(test) return (arg_params, aux_params)
DRN
positive
def unsuppress(self): """Cancels suppression and makes this search run as scheduled. :return: The :class:`SavedSearch`. """ <DeepExtract> if 'suppress'.startswith('/'): path = 'suppress' else: path = self.service._abspath(self.path + 'suppress', owner=owner, app=app, sharing=sharing) return self.service.post(path, owner=owner, app=app, sharing=sharing, **query) </DeepExtract> return self
def unsuppress(self): """Cancels suppression and makes this search run as scheduled. :return: The :class:`SavedSearch`. """ if 'suppress'.startswith('/'): path = 'suppress' else: path = self.service._abspath(self.path + 'suppress', owner=owner, app=app, sharing=sharing) return self.service.post(path, owner=owner, app=app, sharing=sharing, **query) return self
CobaltSplunk
positive
def setup_mocked_unauthenticated(): """Setup test directory and a MockedAdminCork instance""" global aaa global cookie_name <DeepExtract> global testdir tstamp = '%f' % time() testdir = '%s/fl_%s' % (tmproot, tstamp) os.mkdir(testdir) os.mkdir(testdir + '/views') with open('%s/users.json' % testdir, 'w') as f: f.write('{"admin": {"email_addr": null, "desc": null, "role": "admin", "hash": "69f75f38ac3bfd6ac813794f3d8c47acc867adb10b806e8979316ddbf6113999b6052efe4ba95c0fa9f6a568bddf60e8e5572d9254dbf3d533085e9153265623", "creation_date": "2012-04-09 14:22:27.075596"}}') with open('%s/roles.json' % testdir, 'w') as f: f.write('{"special": 200, "admin": 100, "user": 50}') with open('%s/register.json' % testdir, 'w') as f: f.write('{}') with open('%s/views/registration_email.tpl' % testdir, 'w') as f: f.write('Username:{{username}} Email:{{email_addr}} Code:{{registration_code}}') with open('%s/views/password_reset_email.tpl' % testdir, 'w') as f: f.write('Username:{{username}} Email:{{email_addr}} Code:{{reset_code}}') print('setup done in %s' % testdir) </DeepExtract> aaa = MockedUnauthenticatedCork(testdir, preferred_hashing_algorithm='scrypt') cookie_name = None
def setup_mocked_unauthenticated(): """Setup test directory and a MockedAdminCork instance""" global aaa global cookie_name global testdir tstamp = '%f' % time() testdir = '%s/fl_%s' % (tmproot, tstamp) os.mkdir(testdir) os.mkdir(testdir + '/views') with open('%s/users.json' % testdir, 'w') as f: f.write('{"admin": {"email_addr": null, "desc": null, "role": "admin", "hash": "69f75f38ac3bfd6ac813794f3d8c47acc867adb10b806e8979316ddbf6113999b6052efe4ba95c0fa9f6a568bddf60e8e5572d9254dbf3d533085e9153265623", "creation_date": "2012-04-09 14:22:27.075596"}}') with open('%s/roles.json' % testdir, 'w') as f: f.write('{"special": 200, "admin": 100, "user": 50}') with open('%s/register.json' % testdir, 'w') as f: f.write('{}') with open('%s/views/registration_email.tpl' % testdir, 'w') as f: f.write('Username:{{username}} Email:{{email_addr}} Code:{{registration_code}}') with open('%s/views/password_reset_email.tpl' % testdir, 'w') as f: f.write('Username:{{username}} Email:{{email_addr}} Code:{{reset_code}}') print('setup done in %s' % testdir) aaa = MockedUnauthenticatedCork(testdir, preferred_hashing_algorithm='scrypt') cookie_name = None
bottle-cork
positive
def cat_boxlist(bboxes): """ Concatenates a list of BoxList (having the same image size) into a single BoxList Arguments: bboxes (list[BoxList]) """ assert isinstance(bboxes, (list, tuple)) assert all((isinstance(bbox, BoxList) for bbox in bboxes)) size = bboxes[0].size assert all((bbox.size == size for bbox in bboxes)) mode = bboxes[0].mode assert all((bbox.mode == mode for bbox in bboxes)) fields = set(bboxes[0].fields()) assert all((set(bbox.fields()) == fields for bbox in bboxes)) cat_boxes = BoxList(_cat([bbox.bbox for bbox in bboxes], dim=0), size, mode) for field in fields: <DeepExtract> assert isinstance([bbox.get_field(field) for bbox in bboxes], (list, tuple)) if len([bbox.get_field(field) for bbox in bboxes]) == 1: data = [bbox.get_field(field) for bbox in bboxes][0] data = torch.cat([bbox.get_field(field) for bbox in bboxes], 0) </DeepExtract> cat_boxes.add_field(field, data) return cat_boxes
def cat_boxlist(bboxes): """ Concatenates a list of BoxList (having the same image size) into a single BoxList Arguments: bboxes (list[BoxList]) """ assert isinstance(bboxes, (list, tuple)) assert all((isinstance(bbox, BoxList) for bbox in bboxes)) size = bboxes[0].size assert all((bbox.size == size for bbox in bboxes)) mode = bboxes[0].mode assert all((bbox.mode == mode for bbox in bboxes)) fields = set(bboxes[0].fields()) assert all((set(bbox.fields()) == fields for bbox in bboxes)) cat_boxes = BoxList(_cat([bbox.bbox for bbox in bboxes], dim=0), size, mode) for field in fields: assert isinstance([bbox.get_field(field) for bbox in bboxes], (list, tuple)) if len([bbox.get_field(field) for bbox in bboxes]) == 1: data = [bbox.get_field(field) for bbox in bboxes][0] data = torch.cat([bbox.get_field(field) for bbox in bboxes], 0) cat_boxes.add_field(field, data) return cat_boxes
Clothing-Detection
positive
def get_state(self): state = self.game.get_state() if state is None: self.game_variables = dict() self.player = None self.monsters = [] self.ammo = [] <DeepExtract> self.actors = {} </DeepExtract> return self.game_variable_values = dict(zip(self.game_variable_strings, state.game_variables)) self.monsters = [] self.ammo = [] self.weapons = [] self.actors = {} for l in state.labels: if l.object_name in PLAYER_NAME: self.player = l elif l.object_name in MONSTER_LIST: self.monsters.append(l) <DeepExtract> if l.object_name not in self.actors: self.actors[l.object_name] = [] self.actors[l.object_name].append(l) </DeepExtract> else: <DeepExtract> if l.object_name not in self.actors: self.actors[l.object_name] = [] self.actors[l.object_name].append(l) </DeepExtract> self.labels = state.labels self.screen = np.transpose(state.screen_buffer, [1, 2, 0]).copy()
def get_state(self): state = self.game.get_state() if state is None: self.game_variables = dict() self.player = None self.monsters = [] self.ammo = [] self.actors = {} return self.game_variable_values = dict(zip(self.game_variable_strings, state.game_variables)) self.monsters = [] self.ammo = [] self.weapons = [] self.actors = {} for l in state.labels: if l.object_name in PLAYER_NAME: self.player = l elif l.object_name in MONSTER_LIST: self.monsters.append(l) if l.object_name not in self.actors: self.actors[l.object_name] = [] self.actors[l.object_name].append(l) else: if l.object_name not in self.actors: self.actors[l.object_name] = [] self.actors[l.object_name].append(l) self.labels = state.labels self.screen = np.transpose(state.screen_buffer, [1, 2, 0]).copy()
demo2program
positive
def write(self, stream): stream.u16(len(self.vals)) assert self.bot <= self.top for item in self.vals[:self.bot]: <DeepExtract> if item is None: return (tag, val) = item stream.u8(tag) if tag == CONSTANT_Utf8: stream.u16(len(val)) stream.write(val) elif tag in (CONSTANT_Integer, CONSTANT_Float): stream.u32(val) elif tag in (CONSTANT_Long, CONSTANT_Double): stream.u64(val) elif tag in (CONSTANT_Class, CONSTANT_String): stream.u16(val) else: stream.u16(val[0]) stream.u16(val[1]) </DeepExtract> stream.write(PLACEHOLDER_ENTRY * self.space()) for item in self.vals[self.top:]: <DeepExtract> if item is None: return (tag, val) = item stream.u8(tag) if tag == CONSTANT_Utf8: stream.u16(len(val)) stream.write(val) elif tag in (CONSTANT_Integer, CONSTANT_Float): stream.u32(val) elif tag in (CONSTANT_Long, CONSTANT_Double): stream.u64(val) elif tag in (CONSTANT_Class, CONSTANT_String): stream.u16(val) else: stream.u16(val[0]) stream.u16(val[1]) </DeepExtract>
def write(self, stream): stream.u16(len(self.vals)) assert self.bot <= self.top for item in self.vals[:self.bot]: if item is None: return (tag, val) = item stream.u8(tag) if tag == CONSTANT_Utf8: stream.u16(len(val)) stream.write(val) elif tag in (CONSTANT_Integer, CONSTANT_Float): stream.u32(val) elif tag in (CONSTANT_Long, CONSTANT_Double): stream.u64(val) elif tag in (CONSTANT_Class, CONSTANT_String): stream.u16(val) else: stream.u16(val[0]) stream.u16(val[1]) stream.write(PLACEHOLDER_ENTRY * self.space()) for item in self.vals[self.top:]: if item is None: return (tag, val) = item stream.u8(tag) if tag == CONSTANT_Utf8: stream.u16(len(val)) stream.write(val) elif tag in (CONSTANT_Integer, CONSTANT_Float): stream.u32(val) elif tag in (CONSTANT_Long, CONSTANT_Double): stream.u64(val) elif tag in (CONSTANT_Class, CONSTANT_String): stream.u16(val) else: stream.u16(val[0]) stream.u16(val[1]) </DeepExtract>
Apk-Changer
positive
def segment_len(p1, p2): <DeepExtract> (dx, dy) = (p2[0] - p1[0], p2[1] - p1[1]) </DeepExtract> return math.sqrt(dx * dx + dy * dy)
def segment_len(p1, p2): (dx, dy) = (p2[0] - p1[0], p2[1] - p1[1]) return math.sqrt(dx * dx + dy * dy)
elsie
positive
def parse(self, msg, name): """Parses the message. We check that the message is properly formatted. :param msg: a json-encoded value containing a JWS or JWE+JWS token :raises InvalidMessage: if the message cannot be parsed or validated :returns: A verified payload """ try: jtok = JWT(jwt=msg) except Exception as e: raise InvalidMessage('Failed to parse message: %s' % str(e)) try: token = jtok.token if isinstance(token, JWE): token.decrypt(self.kkstore.server_keys[KEY_USAGE_ENC]) payload = token.payload.decode('utf-8') token = JWS() token.deserialize(payload) elif isinstance(token, JWS): pass else: raise TypeError('Invalid Token type: %s' % type(jtok)) self.client_keys = [JWK(**self._get_key(token.jose_header, KEY_USAGE_SIG)), JWK(**self._get_key(token.jose_header, KEY_USAGE_ENC))] token.verify(self.client_keys[KEY_USAGE_SIG]) claims = json_decode(token.payload) except Exception as e: logger.debug('Failed to validate message', exc_info=True) raise InvalidMessage('Failed to validate message: %s' % str(e)) <DeepExtract> if 'sub' not in claims: raise InvalidMessage('Missing subject in payload') if claims['sub'] != name: raise InvalidMessage('Key name %s does not match subject %s' % (name, claims['sub'])) if 'exp' not in claims: raise InvalidMessage('Missing expiration time in payload') if claims['exp'] - 10 * 60 > int(time.time()): raise InvalidMessage('Message expiration too far in the future') if claims['exp'] < int(time.time()): raise InvalidMessage('Message Expired') </DeepExtract> self.name = name self.payload = claims.get('value') self.msg_type = 'kem' return {'type': self.msg_type, 'value': {'kid': self.client_keys[KEY_USAGE_ENC].key_id, 'claims': claims}}
def parse(self, msg, name): """Parses the message. We check that the message is properly formatted. :param msg: a json-encoded value containing a JWS or JWE+JWS token :raises InvalidMessage: if the message cannot be parsed or validated :returns: A verified payload """ try: jtok = JWT(jwt=msg) except Exception as e: raise InvalidMessage('Failed to parse message: %s' % str(e)) try: token = jtok.token if isinstance(token, JWE): token.decrypt(self.kkstore.server_keys[KEY_USAGE_ENC]) payload = token.payload.decode('utf-8') token = JWS() token.deserialize(payload) elif isinstance(token, JWS): pass else: raise TypeError('Invalid Token type: %s' % type(jtok)) self.client_keys = [JWK(**self._get_key(token.jose_header, KEY_USAGE_SIG)), JWK(**self._get_key(token.jose_header, KEY_USAGE_ENC))] token.verify(self.client_keys[KEY_USAGE_SIG]) claims = json_decode(token.payload) except Exception as e: logger.debug('Failed to validate message', exc_info=True) raise InvalidMessage('Failed to validate message: %s' % str(e)) if 'sub' not in claims: raise InvalidMessage('Missing subject in payload') if claims['sub'] != name: raise InvalidMessage('Key name %s does not match subject %s' % (name, claims['sub'])) if 'exp' not in claims: raise InvalidMessage('Missing expiration time in payload') if claims['exp'] - 10 * 60 > int(time.time()): raise InvalidMessage('Message expiration too far in the future') if claims['exp'] < int(time.time()): raise InvalidMessage('Message Expired') self.name = name self.payload = claims.get('value') self.msg_type = 'kem' return {'type': self.msg_type, 'value': {'kid': self.client_keys[KEY_USAGE_ENC].key_id, 'claims': claims}}
custodia
positive
def forward(self, observations): <DeepExtract> resnet = observations[self.resnet_uuid] goal = observations[self.goal_uuid] use_agent = False nagent = 1 if len(resnet.shape) == 6: use_agent = True (nstep, nsampler, nagent) = resnet.shape[:3] else: (nstep, nsampler) = resnet.shape[:2] observations[self.resnet_uuid] = resnet.view(-1, *resnet.shape[-3:]) observations[self.goal_uuid] = goal.view(-1, goal.shape[-1]) (observations, use_agent, nstep, nsampler, nagent) = (observations, use_agent, nstep, nsampler, nagent) </DeepExtract> if self.blind: return self.embed_goal(observations[self.goal_uuid]) rgb_embs = [self.compress_rgb_resnet(observations), self.distribute_target(observations)] rgb_x = self.rgb_target_obs_combiner(torch.cat(rgb_embs, dim=1)) depth_embs = [self.compress_depth_resnet(observations), self.distribute_target(observations)] depth_x = self.depth_target_obs_combiner(torch.cat(depth_embs, dim=1)) x = torch.cat([rgb_x, depth_x], dim=1) x = x.reshape(x.shape[0], -1) return self.adapt_output(x, use_agent, nstep, nsampler, nagent)
def forward(self, observations): resnet = observations[self.resnet_uuid] goal = observations[self.goal_uuid] use_agent = False nagent = 1 if len(resnet.shape) == 6: use_agent = True (nstep, nsampler, nagent) = resnet.shape[:3] else: (nstep, nsampler) = resnet.shape[:2] observations[self.resnet_uuid] = resnet.view(-1, *resnet.shape[-3:]) observations[self.goal_uuid] = goal.view(-1, goal.shape[-1]) (observations, use_agent, nstep, nsampler, nagent) = (observations, use_agent, nstep, nsampler, nagent) if self.blind: return self.embed_goal(observations[self.goal_uuid]) rgb_embs = [self.compress_rgb_resnet(observations), self.distribute_target(observations)] rgb_x = self.rgb_target_obs_combiner(torch.cat(rgb_embs, dim=1)) depth_embs = [self.compress_depth_resnet(observations), self.distribute_target(observations)] depth_x = self.depth_target_obs_combiner(torch.cat(depth_embs, dim=1)) x = torch.cat([rgb_x, depth_x], dim=1) x = x.reshape(x.shape[0], -1) return self.adapt_output(x, use_agent, nstep, nsampler, nagent)
allenact
positive
def call(self): res = ParseResult() atom = res.register(self.atom()) if res.error: return res if self.current_tok.type == TT_LPAREN: res.register_advancement() <DeepExtract> self.idx += 1 self.col += 1 if current_char == '\n': self.ln += 1 self.col = 0 return self </DeepExtract> arg_nodes = [] if self.current_tok.type == TT_RPAREN: res.register_advancement() <DeepExtract> self.idx += 1 self.col += 1 if current_char == '\n': self.ln += 1 self.col = 0 return self </DeepExtract> else: arg_nodes.append(res.register(self.expr())) if res.error: return res.failure(InvalidSyntaxError(self.current_tok.pos_start, self.current_tok.pos_end, "Expected ')', 'VAR', 'IF', 'FOR', 'WHILE', 'FUN', int, float, identifier, '+', '-', '(', '[' or 'NOT'")) while self.current_tok.type == TT_COMMA: res.register_advancement() <DeepExtract> self.idx += 1 self.col += 1 if current_char == '\n': self.ln += 1 self.col = 0 return self </DeepExtract> arg_nodes.append(res.register(self.expr())) if res.error: return res if self.current_tok.type != TT_RPAREN: return res.failure(InvalidSyntaxError(self.current_tok.pos_start, self.current_tok.pos_end, f"Expected ',' or ')'")) res.register_advancement() <DeepExtract> self.idx += 1 self.col += 1 if current_char == '\n': self.ln += 1 self.col = 0 return self </DeepExtract> return res.success(CallNode(atom, arg_nodes)) return res.success(atom)
def call(self): res = ParseResult() atom = res.register(self.atom()) if res.error: return res if self.current_tok.type == TT_LPAREN: res.register_advancement() self.idx += 1 self.col += 1 if current_char == '\n': self.ln += 1 self.col = 0 return self arg_nodes = [] if self.current_tok.type == TT_RPAREN: res.register_advancement() self.idx += 1 self.col += 1 if current_char == '\n': self.ln += 1 self.col = 0 return self else: arg_nodes.append(res.register(self.expr())) if res.error: return res.failure(InvalidSyntaxError(self.current_tok.pos_start, self.current_tok.pos_end, "Expected ')', 'VAR', 'IF', 'FOR', 'WHILE', 'FUN', int, float, identifier, '+', '-', '(', '[' or 'NOT'")) while self.current_tok.type == TT_COMMA: res.register_advancement() self.idx += 1 self.col += 1 if current_char == '\n': self.ln += 1 self.col = 0 return self arg_nodes.append(res.register(self.expr())) if res.error: return res if self.current_tok.type != TT_RPAREN: return res.failure(InvalidSyntaxError(self.current_tok.pos_start, self.current_tok.pos_end, f"Expected ',' or ')'")) res.register_advancement() self.idx += 1 self.col += 1 if current_char == '\n': self.ln += 1 self.col = 0 return self return res.success(CallNode(atom, arg_nodes)) return res.success(atom)
Dip
positive
def __set_func_4D(func_4D): """ parse the input func file and test it's validity """ if not os.path.isfile(func_4D): logger.error('fMRI input {} does not exist :(..Exiting'.format(func_4D)) sys.exit(1) <DeepExtract> get_stdout(['fslval', func_4D, 'dim4']) = get_stdout(['fslval', func_4D, 'dim4']).replace(os.linesep, '') FirstWord = get_stdout(['fslval', func_4D, 'dim4']).split(' ', 1)[0] num_TR = get_stdout(['fslval', func_4D, 'dim4']) </DeepExtract> <DeepExtract> get_stdout(['fslval', func_4D, 'pixdim4']) = get_stdout(['fslval', func_4D, 'pixdim4']).replace(os.linesep, '') FirstWord = get_stdout(['fslval', func_4D, 'pixdim4']).split(' ', 1)[0] TR_in_ms = get_stdout(['fslval', func_4D, 'pixdim4']) </DeepExtract> return (func_4D, num_TR, TR_in_ms)
def __set_func_4D(func_4D): """ parse the input func file and test it's validity """ if not os.path.isfile(func_4D): logger.error('fMRI input {} does not exist :(..Exiting'.format(func_4D)) sys.exit(1) get_stdout(['fslval', func_4D, 'dim4']) = get_stdout(['fslval', func_4D, 'dim4']).replace(os.linesep, '') FirstWord = get_stdout(['fslval', func_4D, 'dim4']).split(' ', 1)[0] num_TR = get_stdout(['fslval', func_4D, 'dim4']) get_stdout(['fslval', func_4D, 'pixdim4']) = get_stdout(['fslval', func_4D, 'pixdim4']).replace(os.linesep, '') FirstWord = get_stdout(['fslval', func_4D, 'pixdim4']).split(' ', 1)[0] TR_in_ms = get_stdout(['fslval', func_4D, 'pixdim4']) return (func_4D, num_TR, TR_in_ms)
ciftify
positive
def main(): """Run.""" args = parser.parse_args() <DeepExtract> legacy_experiments_json = fetch(EXPERIMENTER_API_URL_V1) legacy_experiments = [] for experiment in legacy_experiments_json: if experiment['type'] != 'rapid': try: legacy_experiments.append(ExperimentV1.from_dict(experiment).to_experiment()) except Exception as e: print(f'Cannot import experiment: {experiment}: {e}') nimbus_experiments_json = fetch(EXPERIMENTER_API_URL_V6) nimbus_experiments = [] for experiment in nimbus_experiments_json: try: nimbus_experiments.append(ExperimentV6.from_dict(experiment).to_experiment()) except Exception as e: print(f'Cannot import experiment: {experiment}: {e}') experiments = nimbus_experiments + legacy_experiments </DeepExtract> destination_table = f'{args.project}.{args.destination_dataset}.{args.destination_table}' bq_schema = (bigquery.SchemaField('experimenter_slug', 'STRING'), bigquery.SchemaField('normandy_slug', 'STRING'), bigquery.SchemaField('type', 'STRING'), bigquery.SchemaField('status', 'STRING'), bigquery.SchemaField('start_date', 'DATE'), bigquery.SchemaField('end_date', 'DATE'), bigquery.SchemaField('enrollment_end_date', 'DATE'), bigquery.SchemaField('proposed_enrollment', 'INTEGER'), bigquery.SchemaField('reference_branch', 'STRING'), bigquery.SchemaField('is_high_population', 'BOOL'), bigquery.SchemaField('branches', 'RECORD', mode='REPEATED', fields=[bigquery.SchemaField('slug', 'STRING'), bigquery.SchemaField('ratio', 'INTEGER'), bigquery.SchemaField('value', 'JSON')]), bigquery.SchemaField('app_id', 'STRING'), bigquery.SchemaField('app_name', 'STRING'), bigquery.SchemaField('channel', 'STRING'), bigquery.SchemaField('targeting', 'STRING'), bigquery.SchemaField('targeted_percent', 'FLOAT'), bigquery.SchemaField('namespace', 'STRING')) job_config = bigquery.LoadJobConfig(write_disposition=bigquery.job.WriteDisposition.WRITE_TRUNCATE) job_config.schema = bq_schema converter = cattrs.BaseConverter() converter.register_unstructure_hook(datetime.datetime, lambda d: datetime.datetime.strftime(d, format='%Y-%m-%d')) blob = converter.unstructure(experiments) if args.dry_run: print(json.dumps(blob)) sys.exit(0) client = bigquery.Client(args.project) client.load_table_from_json(blob, destination_table, job_config=job_config).result() print(f'Loaded {len(experiments)} experiments')
def main(): """Run.""" args = parser.parse_args() legacy_experiments_json = fetch(EXPERIMENTER_API_URL_V1) legacy_experiments = [] for experiment in legacy_experiments_json: if experiment['type'] != 'rapid': try: legacy_experiments.append(ExperimentV1.from_dict(experiment).to_experiment()) except Exception as e: print(f'Cannot import experiment: {experiment}: {e}') nimbus_experiments_json = fetch(EXPERIMENTER_API_URL_V6) nimbus_experiments = [] for experiment in nimbus_experiments_json: try: nimbus_experiments.append(ExperimentV6.from_dict(experiment).to_experiment()) except Exception as e: print(f'Cannot import experiment: {experiment}: {e}') experiments = nimbus_experiments + legacy_experiments destination_table = f'{args.project}.{args.destination_dataset}.{args.destination_table}' bq_schema = (bigquery.SchemaField('experimenter_slug', 'STRING'), bigquery.SchemaField('normandy_slug', 'STRING'), bigquery.SchemaField('type', 'STRING'), bigquery.SchemaField('status', 'STRING'), bigquery.SchemaField('start_date', 'DATE'), bigquery.SchemaField('end_date', 'DATE'), bigquery.SchemaField('enrollment_end_date', 'DATE'), bigquery.SchemaField('proposed_enrollment', 'INTEGER'), bigquery.SchemaField('reference_branch', 'STRING'), bigquery.SchemaField('is_high_population', 'BOOL'), bigquery.SchemaField('branches', 'RECORD', mode='REPEATED', fields=[bigquery.SchemaField('slug', 'STRING'), bigquery.SchemaField('ratio', 'INTEGER'), bigquery.SchemaField('value', 'JSON')]), bigquery.SchemaField('app_id', 'STRING'), bigquery.SchemaField('app_name', 'STRING'), bigquery.SchemaField('channel', 'STRING'), bigquery.SchemaField('targeting', 'STRING'), bigquery.SchemaField('targeted_percent', 'FLOAT'), bigquery.SchemaField('namespace', 'STRING')) job_config = bigquery.LoadJobConfig(write_disposition=bigquery.job.WriteDisposition.WRITE_TRUNCATE) job_config.schema = bq_schema converter = cattrs.BaseConverter() converter.register_unstructure_hook(datetime.datetime, lambda d: datetime.datetime.strftime(d, format='%Y-%m-%d')) blob = converter.unstructure(experiments) if args.dry_run: print(json.dumps(blob)) sys.exit(0) client = bigquery.Client(args.project) client.load_table_from_json(blob, destination_table, job_config=job_config).result() print(f'Loaded {len(experiments)} experiments')
bigquery-etl
positive
def findall(search_file, image_file, threshold=None, maxcnt=0): <DeepExtract> if isinstance(search_file, basestring): obj = cv2.imread(search_file, 0) else: obj = search_file if obj == None: raise IOError('cv2 read file error:' + search_file) sch = obj </DeepExtract> <DeepExtract> if isinstance(image_file, basestring): obj = cv2.imread(image_file, 0) else: obj = image_file if obj == None: raise IOError('cv2 read file error:' + image_file) img = obj </DeepExtract> (kp_sch, des_sch) = sift.detectAndCompute(sch, None) (kp_img, des_img) = sift.detectAndCompute(img, None) if len(kp_sch) < MIN_MATCH_COUNT or len(kp_img) < MIN_MATCH_COUNT: return None FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) search_params = dict(checks=50) flann = cv2.FlannBasedMatcher(index_params, search_params) points = [] while True: matches = flann.knnMatch(des_sch, des_img, k=2) good = [] for (m, n) in matches: if m.distance < 0.7 * n.distance: good.append(m) if len(good) < MIN_MATCH_COUNT: break if maxcnt and len(points) > maxcnt: break sch_pts = np.float32([kp_sch[m.queryIdx].pt for m in good]).reshape(-1, 1, 2) img_pts = np.float32([kp_img[m.trainIdx].pt for m in good]).reshape(-1, 1, 2) (M, mask) = cv2.findHomography(sch_pts, img_pts, cv2.RANSAC, 5.0) (h, w) = sch.shape pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2) dst = cv2.perspectiveTransform(pts, M) (lt, br) = (dst[0][0], dst[2][0]) pt = map(int, (lt[0] + w / 2, lt[1] + h / 2)) qindexes = [] tindexes = [] for m in good: qindexes.append(m.queryIdx) tindexes.append(m.trainIdx) def filter_index(indexes, arr): r = np.ndarray(0, np.float32) for (i, item) in enumerate(arr): if i not in qindexes: r = np.append(r, item) return r <DeepExtract> r = np.ndarray(0, np.float32) for (i, item) in enumerate(kp_sch): if i not in qindexes: r = np.append(r, item) kp_sch = r </DeepExtract> <DeepExtract> r = np.ndarray(0, np.float32) for (i, item) in enumerate(des_sch): if i not in qindexes: r = np.append(r, item) des_sch = r </DeepExtract> <DeepExtract> r = np.ndarray(0, np.float32) for (i, item) in enumerate(kp_img): if i not in qindexes: r = np.append(r, item) kp_img = r </DeepExtract> <DeepExtract> r = np.ndarray(0, np.float32) for (i, item) in enumerate(des_img): if i not in qindexes: r = np.append(r, item) des_img = r </DeepExtract> points.append(pt) return points
def findall(search_file, image_file, threshold=None, maxcnt=0): if isinstance(search_file, basestring): obj = cv2.imread(search_file, 0) else: obj = search_file if obj == None: raise IOError('cv2 read file error:' + search_file) sch = obj if isinstance(image_file, basestring): obj = cv2.imread(image_file, 0) else: obj = image_file if obj == None: raise IOError('cv2 read file error:' + image_file) img = obj (kp_sch, des_sch) = sift.detectAndCompute(sch, None) (kp_img, des_img) = sift.detectAndCompute(img, None) if len(kp_sch) < MIN_MATCH_COUNT or len(kp_img) < MIN_MATCH_COUNT: return None FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) search_params = dict(checks=50) flann = cv2.FlannBasedMatcher(index_params, search_params) points = [] while True: matches = flann.knnMatch(des_sch, des_img, k=2) good = [] for (m, n) in matches: if m.distance < 0.7 * n.distance: good.append(m) if len(good) < MIN_MATCH_COUNT: break if maxcnt and len(points) > maxcnt: break sch_pts = np.float32([kp_sch[m.queryIdx].pt for m in good]).reshape(-1, 1, 2) img_pts = np.float32([kp_img[m.trainIdx].pt for m in good]).reshape(-1, 1, 2) (M, mask) = cv2.findHomography(sch_pts, img_pts, cv2.RANSAC, 5.0) (h, w) = sch.shape pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2) dst = cv2.perspectiveTransform(pts, M) (lt, br) = (dst[0][0], dst[2][0]) pt = map(int, (lt[0] + w / 2, lt[1] + h / 2)) qindexes = [] tindexes = [] for m in good: qindexes.append(m.queryIdx) tindexes.append(m.trainIdx) def filter_index(indexes, arr): r = np.ndarray(0, np.float32) for (i, item) in enumerate(arr): if i not in qindexes: r = np.append(r, item) return r r = np.ndarray(0, np.float32) for (i, item) in enumerate(kp_sch): if i not in qindexes: r = np.append(r, item) kp_sch = r r = np.ndarray(0, np.float32) for (i, item) in enumerate(des_sch): if i not in qindexes: r = np.append(r, item) des_sch = r r = np.ndarray(0, np.float32) for (i, item) in enumerate(kp_img): if i not in qindexes: r = np.append(r, item) kp_img = r r = np.ndarray(0, np.float32) for (i, item) in enumerate(des_img): if i not in qindexes: r = np.append(r, item) des_img = r points.append(pt) return points
airtest
positive
def generate(self, profile, parameters, projectpath, inputfiles, provenancedata=None): """Yields (inputtemplate, inputfilename, inputmetadata, outputfilename, metadata) tuples""" project = os.path.basename(projectpath) if self.parent: <DeepExtract> assert self.parent for inputtemplate in profile.input: if inputtemplate == self.parent: parent = inputtemplate raise Exception("Parent InputTemplate '" + self.parent + "' not found!") </DeepExtract> parentinputfiles = parent.matchingfiles(projectpath) if not parentinputfiles: raise Exception("OutputTemplate '" + self.id + "' has parent '" + self.parent + "', but no matching input files were found!") for (seqnr, inputfilename, inputtemplate) in parentinputfiles: if self.filename: filename = self.filename parentfile = CLAMInputFile(projectpath, inputfilename) elif parent: filename = inputfilename parentfile = CLAMInputFile(projectpath, inputfilename) else: raise Exception("OutputTemplate '" + self.id + "' has no parent nor filename defined!") relevantinputfiles = [] for (seqnr2, inputfilename2, inputtemplate2) in inputfiles: if seqnr2 == 0 or seqnr2 == seqnr: relevantinputfiles.append((inputtemplate2, CLAMInputFile(projectpath, inputfilename2))) if not self.filename and self.removeextensions: if self.removeextensions is True: filename = filename.split('.')[0] elif isinstance(self.removeextensions, list): for ext in self.removeextensions: if ext: if ext[0] != '.' and filename[-len(ext) - 1:] == '.' + ext: filename = filename[:-len(ext) - 1] elif ext[0] == '.' and filename[-len(ext):] == ext: filename = filename[:-len(ext)] if self.extension and (not self.filename) and (filename[-len(self.extension) - 1:] != '.' + self.extension): filename += '.' + self.extension <DeepExtract> assert isinstance(provenancedata, CLAMProvenanceData) or provenancedata is None data = {} if self.copymetadata: for (key, value) in parentfile.metadata.items(): data[key] = value for metafield in self.metafields: if isinstance(metafield, ParameterCondition): metafield = metafield.evaluate(parameters) if not metafield: continue assert isinstance(metafield, AbstractMetaField) metafield.resolve(data, parameters, parentfile, relevantinputfiles) if provenancedata: data['provenance'] = copy(provenancedata) data['constraints'] = self.constraints data['skipvalidation'] = self.skipvalidation metadata = self.formatclass(None, **data) </DeepExtract> <DeepExtract> if filename.find('$') != -1: for (parameter_id, parameter) in sorted(parameters.items(), key=lambda x: len(x[0]), reverse=True): if parameter and parameter.hasvalue: filename = filename.replace('$' + parameter.id, str(parameter.value)) if filename.find('$') != -1: for (parameter_id, value) in sorted(metadata.items(), key=lambda x: len(x[0]), reverse=True): if value != None: filename = filename.replace('$' + parameter_id, str(value)) if filename.find('$') != -1: if inputfilename: inputfilename = os.path.basename(inputfilename) raw = inputfilename.split('.', 1) inputstrippedfilename = raw[0] if len(raw) > 1: inputextension = raw[1] else: inputextension = '' filename = filename.replace('$INPUTFILENAME', inputfilename) filename = filename.replace('$INPUTSTRIPPEDFILENAME', inputstrippedfilename) filename = filename.replace('$INPUTEXTENSION', inputextension) if project: filename = filename.replace('$PROJECT', project) if not self.unique: filename = filename.replace('$SEQNR', str(seqnr)) if not self.unique: if '#' in filename: filename = filename.replace('#', str(seqnr)) clam.common.util.printdebug('Determined output filename: ' + filename) filename = filename </DeepExtract> yield (inputtemplate, inputfilename, parentfile.metadata, filename, metadata) elif self.unique and self.filename: <DeepExtract> assert isinstance(provenancedata, CLAMProvenanceData) or provenancedata is None data = {} if self.copymetadata: for (key, value) in None.metadata.items(): data[key] = value for metafield in self.metafields: if isinstance(metafield, ParameterCondition): metafield = metafield.evaluate(parameters) if not metafield: continue assert isinstance(metafield, AbstractMetaField) metafield.resolve(data, parameters, None, []) if provenancedata: data['provenance'] = copy(provenancedata) data['constraints'] = self.constraints data['skipvalidation'] = self.skipvalidation metadata = self.formatclass(None, **data) </DeepExtract> <DeepExtract> if self.filename.find('$') != -1: for (parameter_id, parameter) in sorted(parameters.items(), key=lambda x: len(x[0]), reverse=True): if parameter and parameter.hasvalue: self.filename = self.filename.replace('$' + parameter.id, str(parameter.value)) if self.filename.find('$') != -1: for (parameter_id, value) in sorted(metadata.items(), key=lambda x: len(x[0]), reverse=True): if value != None: self.filename = self.filename.replace('$' + parameter_id, str(value)) if self.filename.find('$') != -1: if None: None = os.path.basename(None) raw = None.split('.', 1) inputstrippedfilename = raw[0] if len(raw) > 1: inputextension = raw[1] else: inputextension = '' self.filename = self.filename.replace('$INPUTFILENAME', None) self.filename = self.filename.replace('$INPUTSTRIPPEDFILENAME', inputstrippedfilename) self.filename = self.filename.replace('$INPUTEXTENSION', inputextension) if project: self.filename = self.filename.replace('$PROJECT', project) if not self.unique: self.filename = self.filename.replace('$SEQNR', str(0)) if not self.unique: if '#' in self.filename: self.filename = self.filename.replace('#', str(0)) clam.common.util.printdebug('Determined output filename: ' + self.filename) self.filename = self.filename </DeepExtract> yield (None, None, None, filename, metadata) else: raise Exception('Unable to generate from OutputTemplate, no parent or filename specified')
def generate(self, profile, parameters, projectpath, inputfiles, provenancedata=None): """Yields (inputtemplate, inputfilename, inputmetadata, outputfilename, metadata) tuples""" project = os.path.basename(projectpath) if self.parent: assert self.parent for inputtemplate in profile.input: if inputtemplate == self.parent: parent = inputtemplate raise Exception("Parent InputTemplate '" + self.parent + "' not found!") parentinputfiles = parent.matchingfiles(projectpath) if not parentinputfiles: raise Exception("OutputTemplate '" + self.id + "' has parent '" + self.parent + "', but no matching input files were found!") for (seqnr, inputfilename, inputtemplate) in parentinputfiles: if self.filename: filename = self.filename parentfile = CLAMInputFile(projectpath, inputfilename) elif parent: filename = inputfilename parentfile = CLAMInputFile(projectpath, inputfilename) else: raise Exception("OutputTemplate '" + self.id + "' has no parent nor filename defined!") relevantinputfiles = [] for (seqnr2, inputfilename2, inputtemplate2) in inputfiles: if seqnr2 == 0 or seqnr2 == seqnr: relevantinputfiles.append((inputtemplate2, CLAMInputFile(projectpath, inputfilename2))) if not self.filename and self.removeextensions: if self.removeextensions is True: filename = filename.split('.')[0] elif isinstance(self.removeextensions, list): for ext in self.removeextensions: if ext: if ext[0] != '.' and filename[-len(ext) - 1:] == '.' + ext: filename = filename[:-len(ext) - 1] elif ext[0] == '.' and filename[-len(ext):] == ext: filename = filename[:-len(ext)] if self.extension and (not self.filename) and (filename[-len(self.extension) - 1:] != '.' + self.extension): filename += '.' + self.extension assert isinstance(provenancedata, CLAMProvenanceData) or provenancedata is None data = {} if self.copymetadata: for (key, value) in parentfile.metadata.items(): data[key] = value for metafield in self.metafields: if isinstance(metafield, ParameterCondition): metafield = metafield.evaluate(parameters) if not metafield: continue assert isinstance(metafield, AbstractMetaField) metafield.resolve(data, parameters, parentfile, relevantinputfiles) if provenancedata: data['provenance'] = copy(provenancedata) data['constraints'] = self.constraints data['skipvalidation'] = self.skipvalidation metadata = self.formatclass(None, **data) if filename.find('$') != -1: for (parameter_id, parameter) in sorted(parameters.items(), key=lambda x: len(x[0]), reverse=True): if parameter and parameter.hasvalue: filename = filename.replace('$' + parameter.id, str(parameter.value)) if filename.find('$') != -1: for (parameter_id, value) in sorted(metadata.items(), key=lambda x: len(x[0]), reverse=True): if value != None: filename = filename.replace('$' + parameter_id, str(value)) if filename.find('$') != -1: if inputfilename: inputfilename = os.path.basename(inputfilename) raw = inputfilename.split('.', 1) inputstrippedfilename = raw[0] if len(raw) > 1: inputextension = raw[1] else: inputextension = '' filename = filename.replace('$INPUTFILENAME', inputfilename) filename = filename.replace('$INPUTSTRIPPEDFILENAME', inputstrippedfilename) filename = filename.replace('$INPUTEXTENSION', inputextension) if project: filename = filename.replace('$PROJECT', project) if not self.unique: filename = filename.replace('$SEQNR', str(seqnr)) if not self.unique: if '#' in filename: filename = filename.replace('#', str(seqnr)) clam.common.util.printdebug('Determined output filename: ' + filename) filename = filename yield (inputtemplate, inputfilename, parentfile.metadata, filename, metadata) elif self.unique and self.filename: assert isinstance(provenancedata, CLAMProvenanceData) or provenancedata is None data = {} if self.copymetadata: for (key, value) in None.metadata.items(): data[key] = value for metafield in self.metafields: if isinstance(metafield, ParameterCondition): metafield = metafield.evaluate(parameters) if not metafield: continue assert isinstance(metafield, AbstractMetaField) metafield.resolve(data, parameters, None, []) if provenancedata: data['provenance'] = copy(provenancedata) data['constraints'] = self.constraints data['skipvalidation'] = self.skipvalidation metadata = self.formatclass(None, **data) if self.filename.find('$') != -1: for (parameter_id, parameter) in sorted(parameters.items(), key=lambda x: len(x[0]), reverse=True): if parameter and parameter.hasvalue: self.filename = self.filename.replace('$' + parameter.id, str(parameter.value)) if self.filename.find('$') != -1: for (parameter_id, value) in sorted(metadata.items(), key=lambda x: len(x[0]), reverse=True): if value != None: self.filename = self.filename.replace('$' + parameter_id, str(value)) if self.filename.find('$') != -1: if None: None = os.path.basename(None) raw = None.split('.', 1) inputstrippedfilename = raw[0] if len(raw) > 1: inputextension = raw[1] else: inputextension = '' self.filename = self.filename.replace('$INPUTFILENAME', None) self.filename = self.filename.replace('$INPUTSTRIPPEDFILENAME', inputstrippedfilename) self.filename = self.filename.replace('$INPUTEXTENSION', inputextension) if project: self.filename = self.filename.replace('$PROJECT', project) if not self.unique: self.filename = self.filename.replace('$SEQNR', str(0)) if not self.unique: if '#' in self.filename: self.filename = self.filename.replace('#', str(0)) clam.common.util.printdebug('Determined output filename: ' + self.filename) self.filename = self.filename yield (None, None, None, filename, metadata) else: raise Exception('Unable to generate from OutputTemplate, no parent or filename specified')
clam
positive
def on_character_changed(self, char): symbol = Character(char) self.selected_objects = [] <DeepExtract> obj = SelectedObjects(symbol.startpos, symbol) self.selected_objects.append(obj) </DeepExtract> pub.sendMessage('CHARACTER_SELECTED', char=symbol)
def on_character_changed(self, char): symbol = Character(char) self.selected_objects = [] obj = SelectedObjects(symbol.startpos, symbol) self.selected_objects.append(obj) pub.sendMessage('CHARACTER_SELECTED', char=symbol)
AACircuit
positive
def testStorageOpt(self): """ d1, d2 with storage size limit """ <DeepExtract> self.net = Containernet(controller=Controller) self.net.addController('c0') for i in range(0, 1): self.s.append(self.net.addSwitch('s%d' % i)) if autolinkswitches: for i in range(0, len(self.s) - 1): self.net.addLink(self.s[i], self.s[i + 1]) for i in range(0, 0): self.h.append(self.net.addHost('h%d' % i)) for i in range(0, 0): self.d.append(self.net.addDocker('d%d' % i, dimage='ubuntu:trusty')) </DeepExtract> d0 = self.net.addDocker('d0', ip='10.0.0.1', dimage='ubuntu:trusty', storage_opt={'size': '42m'}) d1 = self.net.addDocker('d1', ip='10.0.0.2', dimage='ubuntu:trusty', storage_opt={'size': '1G'}) self.net.addLink(d0, self.s[0]) self.net.addLink(d1, self.s[0]) <DeepExtract> self.net.start() </DeepExtract> self.assertTrue(len(self.net.hosts) == 2) self.assertEqual(d0.cmd('df -h | grep overlay').split()[1], '42M') self.assertEqual(d1.cmd('df -h | grep overlay').split()[1], '1.0G') <DeepExtract> self.net.stop() self.s = [] self.h = [] self.d = [] </DeepExtract>
def testStorageOpt(self): """ d1, d2 with storage size limit """ self.net = Containernet(controller=Controller) self.net.addController('c0') for i in range(0, 1): self.s.append(self.net.addSwitch('s%d' % i)) if autolinkswitches: for i in range(0, len(self.s) - 1): self.net.addLink(self.s[i], self.s[i + 1]) for i in range(0, 0): self.h.append(self.net.addHost('h%d' % i)) for i in range(0, 0): self.d.append(self.net.addDocker('d%d' % i, dimage='ubuntu:trusty')) d0 = self.net.addDocker('d0', ip='10.0.0.1', dimage='ubuntu:trusty', storage_opt={'size': '42m'}) d1 = self.net.addDocker('d1', ip='10.0.0.2', dimage='ubuntu:trusty', storage_opt={'size': '1G'}) self.net.addLink(d0, self.s[0]) self.net.addLink(d1, self.s[0]) self.net.start() self.assertTrue(len(self.net.hosts) == 2) self.assertEqual(d0.cmd('df -h | grep overlay').split()[1], '42M') self.assertEqual(d1.cmd('df -h | grep overlay').split()[1], '1.0G') self.net.stop() self.s = [] self.h = [] self.d = [] </DeepExtract>
containernet
positive
def get(self): if not can_control_experiments(): return <DeepExtract> if self.is_requesting_archives(): bingo_cache = BingoCache.load_from_datastore(archives=True) else: bingo_cache = BingoCache.get() </DeepExtract> experiment_name = self.request.get('experiment_name') experiment = bingo_cache.get_experiment(experiment_name) alternatives = bingo_cache.get_alternatives(experiment_name) if not experiment or not alternatives: raise Exception('No experiment matching name: %s' % canonical_name) short_circuit_number = -1 for alternative in alternatives: if not experiment.live and experiment.short_circuit_content == alternative.content: short_circuit_number = alternative.number alternative.load_latest_counts() context = {'canonical_name': experiment.canonical_name, 'live': experiment.live, 'total_participants': reduce(lambda a, b: a + b, map(lambda alternative: alternative.participants, alternatives)), 'total_conversions': reduce(lambda a, b: a + b, map(lambda alternative: alternative.conversions, alternatives)), 'alternatives': alternatives, 'significance_test_results': describe_result_in_words(alternatives), 'y_axis_title': experiment.y_axis_title, 'timeline_series': get_experiment_timeline_data(experiment), 'short_circuit_number': short_circuit_number} self.response.headers['Content-Type'] = 'application/json' self.response.out.write(jsonify(context))
def get(self): if not can_control_experiments(): return if self.is_requesting_archives(): bingo_cache = BingoCache.load_from_datastore(archives=True) else: bingo_cache = BingoCache.get() experiment_name = self.request.get('experiment_name') experiment = bingo_cache.get_experiment(experiment_name) alternatives = bingo_cache.get_alternatives(experiment_name) if not experiment or not alternatives: raise Exception('No experiment matching name: %s' % canonical_name) short_circuit_number = -1 for alternative in alternatives: if not experiment.live and experiment.short_circuit_content == alternative.content: short_circuit_number = alternative.number alternative.load_latest_counts() context = {'canonical_name': experiment.canonical_name, 'live': experiment.live, 'total_participants': reduce(lambda a, b: a + b, map(lambda alternative: alternative.participants, alternatives)), 'total_conversions': reduce(lambda a, b: a + b, map(lambda alternative: alternative.conversions, alternatives)), 'alternatives': alternatives, 'significance_test_results': describe_result_in_words(alternatives), 'y_axis_title': experiment.y_axis_title, 'timeline_series': get_experiment_timeline_data(experiment), 'short_circuit_number': short_circuit_number} self.response.headers['Content-Type'] = 'application/json' self.response.out.write(jsonify(context))
analytics
positive
def run(self): <DeepExtract> self.sessions.terminate_past_sessions() return True </DeepExtract> <DeepExtract> self.sessions.send_notifications() return True </DeepExtract> return True
def run(self): self.sessions.terminate_past_sessions() return True self.sessions.send_notifications() return True return True
crackerjack
positive
def test_model_abalone_advanced_config(self): """Test on the Abalone dataset.""" <DeepExtract> dataset_directory = os.path.join(ydf_test_data_path(), 'dataset') dataset_path = os.path.join(dataset_directory, 'abalone.csv') dataset = pd.read_csv(dataset_path) (train, test) = train_test_split(dataset, ratio_second=0.3) dataset = prepare_dataset(train, test, label='Rings', num_classes=1) </DeepExtract> <DeepExtract> def df_to_ds(df): (tf_train, tf_test) = tf.data.Dataset.from_tensor_slices((dict(df.drop(dataset.label, axis=1)), df[dataset.label].values)) train_ds = df_to_ds(dataset.train).batch(1024) test_ds = df_to_ds(dataset.test).batch(1024) (tf_train, tf_test) = (train_ds, test_ds) </DeepExtract> yggdrasil_training_config = keras.core.YggdrasilTrainingConfig() rf_training_config = yggdrasil_training_config.Extensions[random_forest_pb2.random_forest_config] rf_training_config.decision_tree.internal.sorting_strategy = decision_tree_pb2.DecisionTreeTrainingConfig.Internal.SortingStrategy.IN_NODE yggdrasil_deployment_config = keras.core.YggdrasilDeploymentConfig(num_threads=10) model = keras.RandomForestModel(task=keras.Task.REGRESSION, advanced_arguments=keras.AdvancedArguments(yggdrasil_training_config=yggdrasil_training_config, yggdrasil_deployment_config=yggdrasil_deployment_config)) model.compile(metrics=['mse']) model.fit(x=tf_train, validation_data=tf_test) model.summary() evaluation = model.evaluate(tf_test) logging.info('Evaluation: %s', evaluation) self.assertLessEqual(evaluation[1], 6.0) predictions = model.predict(tf_test) logging.info('Predictions: %s', predictions)
def test_model_abalone_advanced_config(self): """Test on the Abalone dataset.""" dataset_directory = os.path.join(ydf_test_data_path(), 'dataset') dataset_path = os.path.join(dataset_directory, 'abalone.csv') dataset = pd.read_csv(dataset_path) (train, test) = train_test_split(dataset, ratio_second=0.3) dataset = prepare_dataset(train, test, label='Rings', num_classes=1) def df_to_ds(df): (tf_train, tf_test) = tf.data.Dataset.from_tensor_slices((dict(df.drop(dataset.label, axis=1)), df[dataset.label].values)) train_ds = df_to_ds(dataset.train).batch(1024) test_ds = df_to_ds(dataset.test).batch(1024) (tf_train, tf_test) = (train_ds, test_ds) yggdrasil_training_config = keras.core.YggdrasilTrainingConfig() rf_training_config = yggdrasil_training_config.Extensions[random_forest_pb2.random_forest_config] rf_training_config.decision_tree.internal.sorting_strategy = decision_tree_pb2.DecisionTreeTrainingConfig.Internal.SortingStrategy.IN_NODE yggdrasil_deployment_config = keras.core.YggdrasilDeploymentConfig(num_threads=10) model = keras.RandomForestModel(task=keras.Task.REGRESSION, advanced_arguments=keras.AdvancedArguments(yggdrasil_training_config=yggdrasil_training_config, yggdrasil_deployment_config=yggdrasil_deployment_config)) model.compile(metrics=['mse']) model.fit(x=tf_train, validation_data=tf_test) model.summary() evaluation = model.evaluate(tf_test) logging.info('Evaluation: %s', evaluation) self.assertLessEqual(evaluation[1], 6.0) predictions = model.predict(tf_test) logging.info('Predictions: %s', predictions)
decision-forests
positive
def _iter_table_markdown(table_paths, template): for table_path in table_paths: source_urls = {'Source Directory': f'{SOURCE_URL}/{str(table_path)}'} <DeepExtract> referenced_tables = [] view_file = table_path / VIEW_FILE if view_file.exists(): for referenced_table in extract_table_references(view_file.read_text()): table_split = referenced_table.split('.') if len(table_split) == 2: [dataset_id, table_id] = table_split project_id = view_file.parent.parent.parent.name elif len(table_split) == 3: [project_id, dataset_id, table_id] = table_split else: continue referenced_tables.append({'project_id': project_id, 'dataset_id': dataset_id, 'table_id': table_id}) referenced_tables = referenced_tables </DeepExtract> if referenced_tables: source_urls['View Definition'] = f'{SOURCE_URL}/{str(table_path / VIEW_FILE)}' <DeepExtract> metadata_path = table_path / metadata_filename try: if metadata_filename == METADATA_FILE: metadata = Metadata.from_file(metadata_path) metadata = metadata elif metadata_filename == DATASET_METADATA_FILE: metadata = DatasetMetadata.from_file(metadata_path) metadata = metadata else: raise Exception(f'Invalid metadata filename provided - {metadata_filename}') except FileNotFoundError: logging.warning(f'Metadata not found at {str(metadata_path)}') </DeepExtract> if metadata: source_urls['Metadata File'] = f'{SOURCE_URL}/{str(table_path / METADATA_FILE)}' <DeepExtract> readme_file = table_path / README_FILE if readme_file.exists(): readme_content = readme_file.read_text() </DeepExtract> <DeepExtract> schema_path = table_path / SCHEMA_FILE try: schema = Schema.from_schema_file(schema_path) schema = schema.schema.get('fields') except Exception as e: logging.warning(f'Unable to open schema: {e}') </DeepExtract> output = template.render(metadata=metadata, readme_content=readme_content, schema=schema, table_name=table_path.name, qualified_table_name=f'{table_path.parent.name}.{table_path.name}', source_urls=source_urls, referenced_tables=referenced_tables, project_url=f'{SOURCE_URL}/sql') yield output
def _iter_table_markdown(table_paths, template): for table_path in table_paths: source_urls = {'Source Directory': f'{SOURCE_URL}/{str(table_path)}'} referenced_tables = [] view_file = table_path / VIEW_FILE if view_file.exists(): for referenced_table in extract_table_references(view_file.read_text()): table_split = referenced_table.split('.') if len(table_split) == 2: [dataset_id, table_id] = table_split project_id = view_file.parent.parent.parent.name elif len(table_split) == 3: [project_id, dataset_id, table_id] = table_split else: continue referenced_tables.append({'project_id': project_id, 'dataset_id': dataset_id, 'table_id': table_id}) referenced_tables = referenced_tables if referenced_tables: source_urls['View Definition'] = f'{SOURCE_URL}/{str(table_path / VIEW_FILE)}' metadata_path = table_path / metadata_filename try: if metadata_filename == METADATA_FILE: metadata = Metadata.from_file(metadata_path) metadata = metadata elif metadata_filename == DATASET_METADATA_FILE: metadata = DatasetMetadata.from_file(metadata_path) metadata = metadata else: raise Exception(f'Invalid metadata filename provided - {metadata_filename}') except FileNotFoundError: logging.warning(f'Metadata not found at {str(metadata_path)}') if metadata: source_urls['Metadata File'] = f'{SOURCE_URL}/{str(table_path / METADATA_FILE)}' readme_file = table_path / README_FILE if readme_file.exists(): readme_content = readme_file.read_text() schema_path = table_path / SCHEMA_FILE try: schema = Schema.from_schema_file(schema_path) schema = schema.schema.get('fields') except Exception as e: logging.warning(f'Unable to open schema: {e}') output = template.render(metadata=metadata, readme_content=readme_content, schema=schema, table_name=table_path.name, qualified_table_name=f'{table_path.parent.name}.{table_path.name}', source_urls=source_urls, referenced_tables=referenced_tables, project_url=f'{SOURCE_URL}/sql') yield output
bigquery-etl
positive
def create_marking_union(*stix1_objects): union_object_marking_refs = [] for stix1_object in stix1_objects: <DeepExtract> object_marking_refs = [] for marking_specification in get_marking_specifications(stix1_object) or []: for marking_structure in marking_specification.marking_structures: stix2x_marking = map_1x_markings_to_2x(marking_structure) if isinstance(stix2x_marking, dict): object_marking_refs.append(stix2x_marking['id']) else: object_marking_refs.append(stix2x_marking) stix2_marking_refs = object_marking_refs </DeepExtract> union_object_marking_refs.extend(stix2_marking_refs) return list(set(union_object_marking_refs))
def create_marking_union(*stix1_objects): union_object_marking_refs = [] for stix1_object in stix1_objects: object_marking_refs = [] for marking_specification in get_marking_specifications(stix1_object) or []: for marking_structure in marking_specification.marking_structures: stix2x_marking = map_1x_markings_to_2x(marking_structure) if isinstance(stix2x_marking, dict): object_marking_refs.append(stix2x_marking['id']) else: object_marking_refs.append(stix2x_marking) stix2_marking_refs = object_marking_refs union_object_marking_refs.extend(stix2_marking_refs) return list(set(union_object_marking_refs))
cti-stix-elevator
positive
def test_serialize(self): <DeepExtract> cell = Cell(serializer('location'), False) cell.obstacle = Obstacle(1) cell.avatar = serializer('avatar') cell.interactable = serializer('interactable') self.expected = {'avatar': 'avatar', 'habitable': False, 'location': 'location', 'interactable': 'interactable', 'partially_fogged': False} cell = cell </DeepExtract> assert cell.serialize() == self.expected
def test_serialize(self): cell = Cell(serializer('location'), False) cell.obstacle = Obstacle(1) cell.avatar = serializer('avatar') cell.interactable = serializer('interactable') self.expected = {'avatar': 'avatar', 'habitable': False, 'location': 'location', 'interactable': 'interactable', 'partially_fogged': False} cell = cell assert cell.serialize() == self.expected
aimmo
positive
def get_dag(self, dag_id): """ Gets the DAG out of the dictionary, and refreshes it if expired """ root_dag_id = dag_id if dag_id in self.dags: dag = self.dags[dag_id] if dag.is_subdag: root_dag_id = dag.parent_dag.dag_id orm_dag = DagModel.get_current(root_dag_id) if orm_dag and (root_dag_id not in self.dags or (orm_dag.last_expired and dag.last_loaded < orm_dag.last_expired)): <DeepExtract> found_dags = [] if not os.path.isfile(orm_dag.fileloc): found_dags = found_dags try: file_last_changed_on_disk = datetime.fromtimestamp(os.path.getmtime(orm_dag.fileloc)) if False and orm_dag.fileloc in self.file_last_changed and (file_last_changed_on_disk == self.file_last_changed[orm_dag.fileloc]): found_dags = found_dags except Exception as e: logging.exception(e) found_dags = found_dags mods = [] if not zipfile.is_zipfile(orm_dag.fileloc): if safe_mode and os.path.isfile(orm_dag.fileloc): with open(orm_dag.fileloc, 'rb') as f: content = f.read() if not all([s in content for s in (b'DAG', b'airflow')]): self.file_last_changed[orm_dag.fileloc] = file_last_changed_on_disk found_dags = found_dags self.logger.debug('Importing {}'.format(orm_dag.fileloc)) (org_mod_name, _) = os.path.splitext(os.path.split(orm_dag.fileloc)[-1]) mod_name = 'unusual_prefix_' + hashlib.sha1(orm_dag.fileloc.encode('utf-8')).hexdigest() + '_' + org_mod_name if mod_name in sys.modules: del sys.modules[mod_name] with timeout(configuration.getint('core', 'DAGBAG_IMPORT_TIMEOUT')): try: m = imp.load_source(mod_name, orm_dag.fileloc) mods.append(m) except Exception as e: self.logger.exception('Failed to import: ' + orm_dag.fileloc) self.import_errors[orm_dag.fileloc] = str(e) self.file_last_changed[orm_dag.fileloc] = file_last_changed_on_disk else: zip_file = zipfile.ZipFile(orm_dag.fileloc) for mod in zip_file.infolist(): (head, _) = os.path.split(mod.filename) (mod_name, ext) = os.path.splitext(mod.filename) if not head and (ext == '.py' or ext == '.pyc'): if mod_name == '__init__': self.logger.warning('Found __init__.{0} at root of {1}'.format(ext, orm_dag.fileloc)) if safe_mode: with zip_file.open(mod.filename) as zf: self.logger.debug('Reading {} from {}'.format(mod.filename, orm_dag.fileloc)) content = zf.read() if not all([s in content for s in (b'DAG', b'airflow')]): self.file_last_changed[orm_dag.fileloc] = file_last_changed_on_disk found_dags = found_dags if mod_name in sys.modules: del sys.modules[mod_name] try: sys.path.insert(0, orm_dag.fileloc) m = importlib.import_module(mod_name) mods.append(m) except Exception as e: self.logger.exception('Failed to import: ' + orm_dag.fileloc) self.import_errors[orm_dag.fileloc] = str(e) self.file_last_changed[orm_dag.fileloc] = file_last_changed_on_disk for m in mods: for dag in list(m.__dict__.values()): if isinstance(dag, DAG): if not dag.full_filepath: dag.full_filepath = orm_dag.fileloc dag.is_subdag = False self.bag_dag(dag, parent_dag=dag, root_dag=dag) found_dags.append(dag) found_dags += dag.subdags self.file_last_changed[orm_dag.fileloc] = file_last_changed_on_disk found_dags = found_dags </DeepExtract> if found_dags and dag_id in [dag.dag_id for dag in found_dags]: return self.dags[dag_id] elif dag_id in self.dags: del self.dags[dag_id] return self.dags.get(dag_id)
def get_dag(self, dag_id): """ Gets the DAG out of the dictionary, and refreshes it if expired """ root_dag_id = dag_id if dag_id in self.dags: dag = self.dags[dag_id] if dag.is_subdag: root_dag_id = dag.parent_dag.dag_id orm_dag = DagModel.get_current(root_dag_id) if orm_dag and (root_dag_id not in self.dags or (orm_dag.last_expired and dag.last_loaded < orm_dag.last_expired)): found_dags = [] if not os.path.isfile(orm_dag.fileloc): found_dags = found_dags try: file_last_changed_on_disk = datetime.fromtimestamp(os.path.getmtime(orm_dag.fileloc)) if False and orm_dag.fileloc in self.file_last_changed and (file_last_changed_on_disk == self.file_last_changed[orm_dag.fileloc]): found_dags = found_dags except Exception as e: logging.exception(e) found_dags = found_dags mods = [] if not zipfile.is_zipfile(orm_dag.fileloc): if safe_mode and os.path.isfile(orm_dag.fileloc): with open(orm_dag.fileloc, 'rb') as f: content = f.read() if not all([s in content for s in (b'DAG', b'airflow')]): self.file_last_changed[orm_dag.fileloc] = file_last_changed_on_disk found_dags = found_dags self.logger.debug('Importing {}'.format(orm_dag.fileloc)) (org_mod_name, _) = os.path.splitext(os.path.split(orm_dag.fileloc)[-1]) mod_name = 'unusual_prefix_' + hashlib.sha1(orm_dag.fileloc.encode('utf-8')).hexdigest() + '_' + org_mod_name if mod_name in sys.modules: del sys.modules[mod_name] with timeout(configuration.getint('core', 'DAGBAG_IMPORT_TIMEOUT')): try: m = imp.load_source(mod_name, orm_dag.fileloc) mods.append(m) except Exception as e: self.logger.exception('Failed to import: ' + orm_dag.fileloc) self.import_errors[orm_dag.fileloc] = str(e) self.file_last_changed[orm_dag.fileloc] = file_last_changed_on_disk else: zip_file = zipfile.ZipFile(orm_dag.fileloc) for mod in zip_file.infolist(): (head, _) = os.path.split(mod.filename) (mod_name, ext) = os.path.splitext(mod.filename) if not head and (ext == '.py' or ext == '.pyc'): if mod_name == '__init__': self.logger.warning('Found __init__.{0} at root of {1}'.format(ext, orm_dag.fileloc)) if safe_mode: with zip_file.open(mod.filename) as zf: self.logger.debug('Reading {} from {}'.format(mod.filename, orm_dag.fileloc)) content = zf.read() if not all([s in content for s in (b'DAG', b'airflow')]): self.file_last_changed[orm_dag.fileloc] = file_last_changed_on_disk found_dags = found_dags if mod_name in sys.modules: del sys.modules[mod_name] try: sys.path.insert(0, orm_dag.fileloc) m = importlib.import_module(mod_name) mods.append(m) except Exception as e: self.logger.exception('Failed to import: ' + orm_dag.fileloc) self.import_errors[orm_dag.fileloc] = str(e) self.file_last_changed[orm_dag.fileloc] = file_last_changed_on_disk for m in mods: for dag in list(m.__dict__.values()): if isinstance(dag, DAG): if not dag.full_filepath: dag.full_filepath = orm_dag.fileloc dag.is_subdag = False self.bag_dag(dag, parent_dag=dag, root_dag=dag) found_dags.append(dag) found_dags += dag.subdags self.file_last_changed[orm_dag.fileloc] = file_last_changed_on_disk found_dags = found_dags if found_dags and dag_id in [dag.dag_id for dag in found_dags]: return self.dags[dag_id] elif dag_id in self.dags: del self.dags[dag_id] return self.dags.get(dag_id)
docker-airflow
positive
@property def output_shape(self): input_shape = self.input_shape rows = input_shape[2] cols = input_shape[3] <DeepExtract> if rows is None: rows = None assert self.border_mode in {'same', 'full', 'valid'} if self.border_mode == 'same': output_length = rows elif self.border_mode == 'full': output_length = rows + self.nb_row - 1 elif self.border_mode == 'valid': output_length = rows - self.nb_row + 1 rows = (output_length + self.subsample[0] - 1) // self.subsample[0] </DeepExtract> <DeepExtract> if cols is None: cols = None assert self.border_mode in {'same', 'full', 'valid'} if self.border_mode == 'same': output_length = cols elif self.border_mode == 'full': output_length = cols + self.nb_col - 1 elif self.border_mode == 'valid': output_length = cols - self.nb_col + 1 cols = (output_length + self.subsample[1] - 1) // self.subsample[1] </DeepExtract> return (input_shape[0], self.nb_filter, rows, cols)
@property def output_shape(self): input_shape = self.input_shape rows = input_shape[2] cols = input_shape[3] if rows is None: rows = None assert self.border_mode in {'same', 'full', 'valid'} if self.border_mode == 'same': output_length = rows elif self.border_mode == 'full': output_length = rows + self.nb_row - 1 elif self.border_mode == 'valid': output_length = rows - self.nb_row + 1 rows = (output_length + self.subsample[0] - 1) // self.subsample[0] if cols is None: cols = None assert self.border_mode in {'same', 'full', 'valid'} if self.border_mode == 'same': output_length = cols elif self.border_mode == 'full': output_length = cols + self.nb_col - 1 elif self.border_mode == 'valid': output_length = cols - self.nb_col + 1 cols = (output_length + self.subsample[1] - 1) // self.subsample[1] return (input_shape[0], self.nb_filter, rows, cols)
deep-coref
positive
def orbit_pattern(self): """The orbit pattern that determines a rocket's flight duration. :type self: GameController :rtype: OrbitPattern """ result = _lib.bc_GameController_orbit_pattern(self._ptr) <DeepExtract> if _lib.bc_has_err(): _lasterror = _ffi.new('char**') err = _lib.bc_get_last_err(_lasterror) errtext = _ffi.string(_lasterror[0]) _lib.bc_free_string(_lasterror[0]) raise Exception(errtext) </DeepExtract> _result = OrbitPattern.__new__(OrbitPattern) if result != _ffi.NULL: _result._ptr = result result = _result return result
def orbit_pattern(self): """The orbit pattern that determines a rocket's flight duration. :type self: GameController :rtype: OrbitPattern """ result = _lib.bc_GameController_orbit_pattern(self._ptr) if _lib.bc_has_err(): _lasterror = _ffi.new('char**') err = _lib.bc_get_last_err(_lasterror) errtext = _ffi.string(_lasterror[0]) _lib.bc_free_string(_lasterror[0]) raise Exception(errtext) _result = OrbitPattern.__new__(OrbitPattern) if result != _ffi.NULL: _result._ptr = result result = _result return result
bc18-scaffold
positive
@property def ap_score(self): <DeepExtract> (pred_probs_vec, labels_vec) = (self.pred_probs[:, 1], self.labels) </DeepExtract> return sm.average_precision_score(labels_vec, pred_probs_vec)
@property def ap_score(self): (pred_probs_vec, labels_vec) = (self.pred_probs[:, 1], self.labels) return sm.average_precision_score(labels_vec, pred_probs_vec)
dex-net
positive
def DSIN(dnn_feature_columns, sess_feature_list, sess_max_count=5, bias_encoding=False, att_embedding_size=1, att_head_num=8, dnn_hidden_units=(256, 128, 64), dnn_activation='relu', dnn_dropout=0, dnn_use_bn=False, l2_reg_dnn=0, l2_reg_embedding=1e-06, seed=1024, task='binary'): """Instantiates the Deep Session Interest Network architecture. :param dnn_feature_columns: An iterable containing all the features used by deep part of the model. :param sess_feature_list: list,to indicate sequence sparse field :param sess_max_count: positive int, to indicate the max number of sessions :param sess_len_max: positive int, to indicate the max length of each session :param bias_encoding: bool. Whether use bias encoding or postional encoding :param att_embedding_size: positive int, the embedding size of each attention head :param att_head_num: positive int, the number of attention head :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net :param dnn_activation: Activation function to use in deep net :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net :param l2_reg_dnn: float. L2 regularizer strength applied to DNN :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param seed: integer ,to use as random seed. :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss :return: A Keras model instance. """ hist_emb_size = sum(map(lambda fc: fc.embedding_dim, filter(lambda fc: fc.name in sess_feature_list, dnn_feature_columns))) if att_embedding_size * att_head_num != hist_emb_size: raise ValueError('hist_emb_size must equal to att_embedding_size * att_head_num ,got %d != %d *%d' % (hist_emb_size, att_embedding_size, att_head_num)) features = build_input_features(dnn_feature_columns) sparse_feature_columns = list(filter(lambda x: isinstance(x, SparseFeat), dnn_feature_columns)) if dnn_feature_columns else [] dense_feature_columns = list(filter(lambda x: isinstance(x, DenseFeat), dnn_feature_columns)) if dnn_feature_columns else [] varlen_sparse_feature_columns = list(filter(lambda x: isinstance(x, VarLenSparseFeat), dnn_feature_columns)) if dnn_feature_columns else [] sparse_varlen_feature_columns = [] history_fc_names = list(map(lambda x: 'sess' + x, sess_feature_list)) for fc in varlen_sparse_feature_columns: feature_name = fc.name if feature_name in history_fc_names: continue else: sparse_varlen_feature_columns.append(fc) inputs_list = list(features.values()) user_behavior_input_dict = {} for idx in range(sess_max_count): sess_input = OrderedDict() for (i, feat) in enumerate(sess_feature_list): sess_input[feat] = features['sess_' + str(idx) + '_' + feat] user_behavior_input_dict['sess_' + str(idx)] = sess_input user_sess_length = Input(shape=(1,), name='sess_length') embedding_dict = {feat.embedding_name: Embedding(feat.vocabulary_size, feat.embedding_dim, embeddings_initializer=feat.embeddings_initializer, embeddings_regularizer=l2(l2_reg_embedding), name='sparse_emb_' + str(i) + '-' + feat.name, mask_zero=feat.name in sess_feature_list) for (i, feat) in enumerate(sparse_feature_columns)} query_emb_list = embedding_lookup(embedding_dict, features, sparse_feature_columns, sess_feature_list, sess_feature_list, to_list=True) dnn_input_emb_list = embedding_lookup(embedding_dict, features, sparse_feature_columns, mask_feat_list=sess_feature_list, to_list=True) dense_value_list = get_dense_input(features, dense_feature_columns) query_emb = concat_func(query_emb_list, mask=True) dnn_input_emb = Flatten()(concat_func(dnn_input_emb_list)) <DeepExtract> tr_input = [] for i in range(sess_max_count): sess_name = 'sess_' + str(i) keys_emb_list = get_embedding_vec_list(embedding_dict, user_behavior_input_dict[sess_name], sparse_feature_columns, sess_feature_list, sess_feature_list) keys_emb = concat_func(keys_emb_list, mask=True) tr_input.append(keys_emb) if bias_encoding: tr_input = BiasEncoding(sess_max_count)(tr_input) tr_input = tr_input </DeepExtract> Self_Attention = Transformer(att_embedding_size, att_head_num, dropout_rate=0, use_layer_norm=False, use_positional_encoding=not bias_encoding, seed=seed, supports_masking=True, blinding=True) <DeepExtract> tr_out = [] for i in range(sess_max_count): tr_out.append(Self_Attention([tr_input[i], tr_input[i]])) sess_fea = concat_func(tr_out, axis=1) sess_fea = sess_fea </DeepExtract> interest_attention_layer = AttentionSequencePoolingLayer(att_hidden_units=(64, 16), weight_normalization=True, supports_masking=False)([query_emb, sess_fea, user_sess_length]) lstm_outputs = BiLSTM(hist_emb_size, layers=2, res_layers=0, dropout_rate=0.2)(sess_fea) lstm_attention_layer = AttentionSequencePoolingLayer(att_hidden_units=(64, 16), weight_normalization=True)([query_emb, lstm_outputs, user_sess_length]) dnn_input_emb = Concatenate()([dnn_input_emb, Flatten()(interest_attention_layer), Flatten()(lstm_attention_layer)]) dnn_input_emb = combined_dnn_input([dnn_input_emb], dense_value_list) output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed)(dnn_input_emb) output = Dense(1, use_bias=False)(output) output = PredictionLayer(task)(output) sess_input_list = [] for i in range(sess_max_count): sess_name = 'sess_' + str(i) sess_input_list.extend(get_inputs_list([user_behavior_input_dict[sess_name]])) model = Model(inputs=inputs_list + [user_sess_length], outputs=output) return model
def DSIN(dnn_feature_columns, sess_feature_list, sess_max_count=5, bias_encoding=False, att_embedding_size=1, att_head_num=8, dnn_hidden_units=(256, 128, 64), dnn_activation='relu', dnn_dropout=0, dnn_use_bn=False, l2_reg_dnn=0, l2_reg_embedding=1e-06, seed=1024, task='binary'): """Instantiates the Deep Session Interest Network architecture. :param dnn_feature_columns: An iterable containing all the features used by deep part of the model. :param sess_feature_list: list,to indicate sequence sparse field :param sess_max_count: positive int, to indicate the max number of sessions :param sess_len_max: positive int, to indicate the max length of each session :param bias_encoding: bool. Whether use bias encoding or postional encoding :param att_embedding_size: positive int, the embedding size of each attention head :param att_head_num: positive int, the number of attention head :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net :param dnn_activation: Activation function to use in deep net :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net :param l2_reg_dnn: float. L2 regularizer strength applied to DNN :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param seed: integer ,to use as random seed. :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss :return: A Keras model instance. """ hist_emb_size = sum(map(lambda fc: fc.embedding_dim, filter(lambda fc: fc.name in sess_feature_list, dnn_feature_columns))) if att_embedding_size * att_head_num != hist_emb_size: raise ValueError('hist_emb_size must equal to att_embedding_size * att_head_num ,got %d != %d *%d' % (hist_emb_size, att_embedding_size, att_head_num)) features = build_input_features(dnn_feature_columns) sparse_feature_columns = list(filter(lambda x: isinstance(x, SparseFeat), dnn_feature_columns)) if dnn_feature_columns else [] dense_feature_columns = list(filter(lambda x: isinstance(x, DenseFeat), dnn_feature_columns)) if dnn_feature_columns else [] varlen_sparse_feature_columns = list(filter(lambda x: isinstance(x, VarLenSparseFeat), dnn_feature_columns)) if dnn_feature_columns else [] sparse_varlen_feature_columns = [] history_fc_names = list(map(lambda x: 'sess' + x, sess_feature_list)) for fc in varlen_sparse_feature_columns: feature_name = fc.name if feature_name in history_fc_names: continue else: sparse_varlen_feature_columns.append(fc) inputs_list = list(features.values()) user_behavior_input_dict = {} for idx in range(sess_max_count): sess_input = OrderedDict() for (i, feat) in enumerate(sess_feature_list): sess_input[feat] = features['sess_' + str(idx) + '_' + feat] user_behavior_input_dict['sess_' + str(idx)] = sess_input user_sess_length = Input(shape=(1,), name='sess_length') embedding_dict = {feat.embedding_name: Embedding(feat.vocabulary_size, feat.embedding_dim, embeddings_initializer=feat.embeddings_initializer, embeddings_regularizer=l2(l2_reg_embedding), name='sparse_emb_' + str(i) + '-' + feat.name, mask_zero=feat.name in sess_feature_list) for (i, feat) in enumerate(sparse_feature_columns)} query_emb_list = embedding_lookup(embedding_dict, features, sparse_feature_columns, sess_feature_list, sess_feature_list, to_list=True) dnn_input_emb_list = embedding_lookup(embedding_dict, features, sparse_feature_columns, mask_feat_list=sess_feature_list, to_list=True) dense_value_list = get_dense_input(features, dense_feature_columns) query_emb = concat_func(query_emb_list, mask=True) dnn_input_emb = Flatten()(concat_func(dnn_input_emb_list)) tr_input = [] for i in range(sess_max_count): sess_name = 'sess_' + str(i) keys_emb_list = get_embedding_vec_list(embedding_dict, user_behavior_input_dict[sess_name], sparse_feature_columns, sess_feature_list, sess_feature_list) keys_emb = concat_func(keys_emb_list, mask=True) tr_input.append(keys_emb) if bias_encoding: tr_input = BiasEncoding(sess_max_count)(tr_input) tr_input = tr_input Self_Attention = Transformer(att_embedding_size, att_head_num, dropout_rate=0, use_layer_norm=False, use_positional_encoding=not bias_encoding, seed=seed, supports_masking=True, blinding=True) tr_out = [] for i in range(sess_max_count): tr_out.append(Self_Attention([tr_input[i], tr_input[i]])) sess_fea = concat_func(tr_out, axis=1) sess_fea = sess_fea interest_attention_layer = AttentionSequencePoolingLayer(att_hidden_units=(64, 16), weight_normalization=True, supports_masking=False)([query_emb, sess_fea, user_sess_length]) lstm_outputs = BiLSTM(hist_emb_size, layers=2, res_layers=0, dropout_rate=0.2)(sess_fea) lstm_attention_layer = AttentionSequencePoolingLayer(att_hidden_units=(64, 16), weight_normalization=True)([query_emb, lstm_outputs, user_sess_length]) dnn_input_emb = Concatenate()([dnn_input_emb, Flatten()(interest_attention_layer), Flatten()(lstm_attention_layer)]) dnn_input_emb = combined_dnn_input([dnn_input_emb], dense_value_list) output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed)(dnn_input_emb) output = Dense(1, use_bias=False)(output) output = PredictionLayer(task)(output) sess_input_list = [] for i in range(sess_max_count): sess_name = 'sess_' + str(i) sess_input_list.extend(get_inputs_list([user_behavior_input_dict[sess_name]])) model = Model(inputs=inputs_list + [user_sess_length], outputs=output) return model
DeepCTR
positive
def main(args): logger.info('-' * 100) logger.info('Load data files') train_exs = utils.load_data(args, args.train_file, skip_no_answer=True) logger.info('Num train examples = %d' % len(train_exs)) dev_exs = utils.load_data(args, args.dev_file) logger.info('Num dev examples = %d' % len(dev_exs)) if args.official_eval: dev_texts = utils.load_text(args.dev_json) dev_offsets = {ex['id']: ex['offsets'] for ex in dev_exs} dev_answers = utils.load_answers(args.dev_json) logger.info('-' * 100) start_epoch = 0 if args.checkpoint and os.path.isfile(args.model_file + '.checkpoint'): logger.info('Found a checkpoint...') checkpoint_file = args.model_file + '.checkpoint' (model, start_epoch) = DocReader.load_checkpoint(checkpoint_file, args) else: if args.pretrained: logger.info('Using pretrained model...') model = DocReader.load(args.pretrained, args) if args.expand_dictionary: logger.info('Expanding dictionary for new data...') words = utils.load_words(args, train_exs + dev_exs) added = model.expand_dictionary(words) if args.embedding_file: model.load_embeddings(added, args.embedding_file) else: logger.info('Training model from scratch...') <DeepExtract> logger.info('-' * 100) logger.info('Generate features') feature_dict = utils.build_feature_dict(args, train_exs) logger.info('Num features = %d' % len(feature_dict)) logger.info(feature_dict) logger.info('-' * 100) logger.info('Build dictionary') word_dict = utils.build_word_dict(args, train_exs + dev_exs) logger.info('Num words = %d' % len(word_dict)) model = DocReader(config.get_model_args(args), word_dict, feature_dict) if args.embedding_file: model.load_embeddings(word_dict.tokens(), args.embedding_file) model = model </DeepExtract> if args.tune_partial > 0: logger.info('-' * 100) logger.info('Counting %d most frequent question words' % args.tune_partial) top_words = utils.top_question_words(args, train_exs, model.word_dict) for word in top_words[:5]: logger.info(word) logger.info('...') for word in top_words[-6:-1]: logger.info(word) model.tune_embeddings([w[0] for w in top_words]) model.init_optimizer() if args.cuda: model.cuda() if args.parallel: model.parallelize() logger.info('-' * 100) logger.info('Make data loaders') train_dataset = data.ReaderDataset(train_exs, model, single_answer=True) if args.sort_by_len: train_sampler = data.SortedBatchSampler(train_dataset.lengths(), args.batch_size, shuffle=True) else: train_sampler = torch.utils.data.sampler.RandomSampler(train_dataset) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, sampler=train_sampler, num_workers=args.data_workers, collate_fn=vector.batchify, pin_memory=args.cuda) dev_dataset = data.ReaderDataset(dev_exs, model, single_answer=False) if args.sort_by_len: dev_sampler = data.SortedBatchSampler(dev_dataset.lengths(), args.test_batch_size, shuffle=False) else: dev_sampler = torch.utils.data.sampler.SequentialSampler(dev_dataset) dev_loader = torch.utils.data.DataLoader(dev_dataset, batch_size=args.test_batch_size, sampler=dev_sampler, num_workers=args.data_workers, collate_fn=vector.batchify, pin_memory=args.cuda) logger.info('-' * 100) logger.info('CONFIG:\n%s' % json.dumps(vars(args), indent=4, sort_keys=True)) logger.info('-' * 100) logger.info('Starting training...') stats = {'timer': utils.Timer(), 'epoch': 0, 'best_valid': 0} for epoch in range(start_epoch, args.num_epochs): stats['epoch'] = epoch <DeepExtract> train_loss = utils.AverageMeter() epoch_time = utils.Timer() for (idx, ex) in enumerate(train_loader): train_loss.update(*model.update(ex)) if idx % args.display_iter == 0: logger.info('train: Epoch = %d | iter = %d/%d | ' % (stats['epoch'], idx, len(train_loader)) + 'loss = %.2f | elapsed time = %.2f (s)' % (train_loss.avg, stats['timer'].time())) train_loss.reset() logger.info('train: Epoch %d done. Time for epoch = %.2f (s)' % (stats['epoch'], epoch_time.time())) if args.checkpoint: model.checkpoint(args.model_file + '.checkpoint', stats['epoch'] + 1) </DeepExtract> <DeepExtract> eval_time = utils.Timer() start_acc = utils.AverageMeter() end_acc = utils.AverageMeter() exact_match = utils.AverageMeter() examples = 0 for ex in train_loader: batch_size = ex[0].size(0) (pred_s, pred_e, _) = model.predict(ex) (target_s, target_e) = ex[-3:-1] accuracies = eval_accuracies(pred_s, target_s, pred_e, target_e) start_acc.update(accuracies[0], batch_size) end_acc.update(accuracies[1], batch_size) exact_match.update(accuracies[2], batch_size) examples += batch_size if 'train' == 'train' and examples >= 10000.0: break logger.info('%s valid unofficial: Epoch = %d | start = %.2f | ' % ('train', stats['epoch'], start_acc.avg) + 'end = %.2f | exact = %.2f | examples = %d | ' % (end_acc.avg, exact_match.avg, examples) + 'valid time = %.2f (s)' % eval_time.time()) return {'exact_match': exact_match.avg} </DeepExtract> <DeepExtract> eval_time = utils.Timer() start_acc = utils.AverageMeter() end_acc = utils.AverageMeter() exact_match = utils.AverageMeter() examples = 0 for ex in dev_loader: batch_size = ex[0].size(0) (pred_s, pred_e, _) = model.predict(ex) (target_s, target_e) = ex[-3:-1] accuracies = eval_accuracies(pred_s, target_s, pred_e, target_e) start_acc.update(accuracies[0], batch_size) end_acc.update(accuracies[1], batch_size) exact_match.update(accuracies[2], batch_size) examples += batch_size if 'dev' == 'train' and examples >= 10000.0: break logger.info('%s valid unofficial: Epoch = %d | start = %.2f | ' % ('dev', stats['epoch'], start_acc.avg) + 'end = %.2f | exact = %.2f | examples = %d | ' % (end_acc.avg, exact_match.avg, examples) + 'valid time = %.2f (s)' % eval_time.time()) result = {'exact_match': exact_match.avg} </DeepExtract> if args.official_eval: <DeepExtract> eval_time = utils.Timer() f1 = utils.AverageMeter() exact_match = utils.AverageMeter() examples = 0 for ex in dev_loader: (ex_id, batch_size) = (ex[-1], ex[0].size(0)) (pred_s, pred_e, _) = model.predict(ex) for i in range(batch_size): s_offset = dev_offsets[ex_id[i]][pred_s[i][0]][0] e_offset = dev_offsets[ex_id[i]][pred_e[i][0]][1] prediction = dev_texts[ex_id[i]][s_offset:e_offset] ground_truths = dev_answers[ex_id[i]] exact_match.update(utils.metric_max_over_ground_truths(utils.exact_match_score, prediction, ground_truths)) f1.update(utils.metric_max_over_ground_truths(utils.f1_score, prediction, ground_truths)) examples += batch_size logger.info('dev valid official: Epoch = %d | EM = %.2f | ' % (stats['epoch'], exact_match.avg * 100) + 'F1 = %.2f | examples = %d | valid time = %.2f (s)' % (f1.avg * 100, examples, eval_time.time())) result = {'exact_match': exact_match.avg * 100, 'f1': f1.avg * 100} </DeepExtract> if result[args.valid_metric] > stats['best_valid']: logger.info('Best valid: %s = %.2f (epoch %d, %d updates)' % (args.valid_metric, result[args.valid_metric], stats['epoch'], model.updates)) model.save(args.model_file) stats['best_valid'] = result[args.valid_metric]
def main(args): logger.info('-' * 100) logger.info('Load data files') train_exs = utils.load_data(args, args.train_file, skip_no_answer=True) logger.info('Num train examples = %d' % len(train_exs)) dev_exs = utils.load_data(args, args.dev_file) logger.info('Num dev examples = %d' % len(dev_exs)) if args.official_eval: dev_texts = utils.load_text(args.dev_json) dev_offsets = {ex['id']: ex['offsets'] for ex in dev_exs} dev_answers = utils.load_answers(args.dev_json) logger.info('-' * 100) start_epoch = 0 if args.checkpoint and os.path.isfile(args.model_file + '.checkpoint'): logger.info('Found a checkpoint...') checkpoint_file = args.model_file + '.checkpoint' (model, start_epoch) = DocReader.load_checkpoint(checkpoint_file, args) else: if args.pretrained: logger.info('Using pretrained model...') model = DocReader.load(args.pretrained, args) if args.expand_dictionary: logger.info('Expanding dictionary for new data...') words = utils.load_words(args, train_exs + dev_exs) added = model.expand_dictionary(words) if args.embedding_file: model.load_embeddings(added, args.embedding_file) else: logger.info('Training model from scratch...') logger.info('-' * 100) logger.info('Generate features') feature_dict = utils.build_feature_dict(args, train_exs) logger.info('Num features = %d' % len(feature_dict)) logger.info(feature_dict) logger.info('-' * 100) logger.info('Build dictionary') word_dict = utils.build_word_dict(args, train_exs + dev_exs) logger.info('Num words = %d' % len(word_dict)) model = DocReader(config.get_model_args(args), word_dict, feature_dict) if args.embedding_file: model.load_embeddings(word_dict.tokens(), args.embedding_file) model = model if args.tune_partial > 0: logger.info('-' * 100) logger.info('Counting %d most frequent question words' % args.tune_partial) top_words = utils.top_question_words(args, train_exs, model.word_dict) for word in top_words[:5]: logger.info(word) logger.info('...') for word in top_words[-6:-1]: logger.info(word) model.tune_embeddings([w[0] for w in top_words]) model.init_optimizer() if args.cuda: model.cuda() if args.parallel: model.parallelize() logger.info('-' * 100) logger.info('Make data loaders') train_dataset = data.ReaderDataset(train_exs, model, single_answer=True) if args.sort_by_len: train_sampler = data.SortedBatchSampler(train_dataset.lengths(), args.batch_size, shuffle=True) else: train_sampler = torch.utils.data.sampler.RandomSampler(train_dataset) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, sampler=train_sampler, num_workers=args.data_workers, collate_fn=vector.batchify, pin_memory=args.cuda) dev_dataset = data.ReaderDataset(dev_exs, model, single_answer=False) if args.sort_by_len: dev_sampler = data.SortedBatchSampler(dev_dataset.lengths(), args.test_batch_size, shuffle=False) else: dev_sampler = torch.utils.data.sampler.SequentialSampler(dev_dataset) dev_loader = torch.utils.data.DataLoader(dev_dataset, batch_size=args.test_batch_size, sampler=dev_sampler, num_workers=args.data_workers, collate_fn=vector.batchify, pin_memory=args.cuda) logger.info('-' * 100) logger.info('CONFIG:\n%s' % json.dumps(vars(args), indent=4, sort_keys=True)) logger.info('-' * 100) logger.info('Starting training...') stats = {'timer': utils.Timer(), 'epoch': 0, 'best_valid': 0} for epoch in range(start_epoch, args.num_epochs): stats['epoch'] = epoch train_loss = utils.AverageMeter() epoch_time = utils.Timer() for (idx, ex) in enumerate(train_loader): train_loss.update(*model.update(ex)) if idx % args.display_iter == 0: logger.info('train: Epoch = %d | iter = %d/%d | ' % (stats['epoch'], idx, len(train_loader)) + 'loss = %.2f | elapsed time = %.2f (s)' % (train_loss.avg, stats['timer'].time())) train_loss.reset() logger.info('train: Epoch %d done. Time for epoch = %.2f (s)' % (stats['epoch'], epoch_time.time())) if args.checkpoint: model.checkpoint(args.model_file + '.checkpoint', stats['epoch'] + 1) eval_time = utils.Timer() start_acc = utils.AverageMeter() end_acc = utils.AverageMeter() exact_match = utils.AverageMeter() examples = 0 for ex in train_loader: batch_size = ex[0].size(0) (pred_s, pred_e, _) = model.predict(ex) (target_s, target_e) = ex[-3:-1] accuracies = eval_accuracies(pred_s, target_s, pred_e, target_e) start_acc.update(accuracies[0], batch_size) end_acc.update(accuracies[1], batch_size) exact_match.update(accuracies[2], batch_size) examples += batch_size if 'train' == 'train' and examples >= 10000.0: break logger.info('%s valid unofficial: Epoch = %d | start = %.2f | ' % ('train', stats['epoch'], start_acc.avg) + 'end = %.2f | exact = %.2f | examples = %d | ' % (end_acc.avg, exact_match.avg, examples) + 'valid time = %.2f (s)' % eval_time.time()) return {'exact_match': exact_match.avg} eval_time = utils.Timer() start_acc = utils.AverageMeter() end_acc = utils.AverageMeter() exact_match = utils.AverageMeter() examples = 0 for ex in dev_loader: batch_size = ex[0].size(0) (pred_s, pred_e, _) = model.predict(ex) (target_s, target_e) = ex[-3:-1] accuracies = eval_accuracies(pred_s, target_s, pred_e, target_e) start_acc.update(accuracies[0], batch_size) end_acc.update(accuracies[1], batch_size) exact_match.update(accuracies[2], batch_size) examples += batch_size if 'dev' == 'train' and examples >= 10000.0: break logger.info('%s valid unofficial: Epoch = %d | start = %.2f | ' % ('dev', stats['epoch'], start_acc.avg) + 'end = %.2f | exact = %.2f | examples = %d | ' % (end_acc.avg, exact_match.avg, examples) + 'valid time = %.2f (s)' % eval_time.time()) result = {'exact_match': exact_match.avg} if args.official_eval: eval_time = utils.Timer() f1 = utils.AverageMeter() exact_match = utils.AverageMeter() examples = 0 for ex in dev_loader: (ex_id, batch_size) = (ex[-1], ex[0].size(0)) (pred_s, pred_e, _) = model.predict(ex) for i in range(batch_size): s_offset = dev_offsets[ex_id[i]][pred_s[i][0]][0] e_offset = dev_offsets[ex_id[i]][pred_e[i][0]][1] prediction = dev_texts[ex_id[i]][s_offset:e_offset] ground_truths = dev_answers[ex_id[i]] exact_match.update(utils.metric_max_over_ground_truths(utils.exact_match_score, prediction, ground_truths)) f1.update(utils.metric_max_over_ground_truths(utils.f1_score, prediction, ground_truths)) examples += batch_size logger.info('dev valid official: Epoch = %d | EM = %.2f | ' % (stats['epoch'], exact_match.avg * 100) + 'F1 = %.2f | examples = %d | valid time = %.2f (s)' % (f1.avg * 100, examples, eval_time.time())) result = {'exact_match': exact_match.avg * 100, 'f1': f1.avg * 100} if result[args.valid_metric] > stats['best_valid']: logger.info('Best valid: %s = %.2f (epoch %d, %d updates)' % (args.valid_metric, result[args.valid_metric], stats['epoch'], model.updates)) model.save(args.model_file) stats['best_valid'] = result[args.valid_metric]
DeFactoNLP
positive
def _create_worker_process(i): sentinel = self._ctx.Event() if self.allow_restart else None <DeepExtract> (inq, outq, synq) = (self._inqueue, self._outqueue, None) </DeepExtract> on_ready_counter = self._ctx.Value('i') <DeepExtract> w = self.Worker(inq, outq, synq, self._initializer, self._initargs, self._maxtasksperchild, sentinel, self._on_process_exit, sigprotection=self.threads, wrap_exception=self._wrap_exception, max_memory_per_child=self._max_memory_per_child, on_ready_counter=on_ready_counter).contribute_to_object(self.Process(target=self.Worker(inq, outq, synq, self._initializer, self._initargs, self._maxtasksperchild, sentinel, self._on_process_exit, sigprotection=self.threads, wrap_exception=self._wrap_exception, max_memory_per_child=self._max_memory_per_child, on_ready_counter=on_ready_counter))) </DeepExtract> self._pool.append(w) <DeepExtract> pass </DeepExtract> w.name = w.name.replace('Process', 'PoolWorker') w.daemon = True w.index = i w.start() self._poolctrl[w.pid] = sentinel self._on_ready_counters[w.pid] = on_ready_counter if self.on_process_up: self.on_process_up(w) return w
def _create_worker_process(i): sentinel = self._ctx.Event() if self.allow_restart else None (inq, outq, synq) = (self._inqueue, self._outqueue, None) on_ready_counter = self._ctx.Value('i') w = self.Worker(inq, outq, synq, self._initializer, self._initargs, self._maxtasksperchild, sentinel, self._on_process_exit, sigprotection=self.threads, wrap_exception=self._wrap_exception, max_memory_per_child=self._max_memory_per_child, on_ready_counter=on_ready_counter).contribute_to_object(self.Process(target=self.Worker(inq, outq, synq, self._initializer, self._initargs, self._maxtasksperchild, sentinel, self._on_process_exit, sigprotection=self.threads, wrap_exception=self._wrap_exception, max_memory_per_child=self._max_memory_per_child, on_ready_counter=on_ready_counter))) self._pool.append(w) pass w.name = w.name.replace('Process', 'PoolWorker') w.daemon = True w.index = i w.start() self._poolctrl[w.pid] = sentinel self._on_ready_counters[w.pid] = on_ready_counter if self.on_process_up: self.on_process_up(w) return w
billiard
positive
def notify(message, priority='normal', timeout=0, block=False): """ opens notification popup. :param message: message to print :type message: str :param priority: priority string, used to format the popup: currently, 'normal' and 'error' are defined. If you use 'X' here, the attribute 'global_notify_X' is used to format the popup. :type priority: str :param timeout: seconds until message disappears. Defaults to the value of 'notify_timeout' in the general config section. A negative value means never time out. :type timeout: int :param block: this notification blocks until a keypress is made :type block: bool :returns: an urwid widget (this notification) that can be handed to :meth:`clear_notify` for removal """ def build_line(msg, prio): cols = urwid.Columns([urwid.Text(msg)]) att = settings.get_theming_attribute('global', 'notify_' + prio) return urwid.AttrMap(cols, att) msgs = [build_line(message, priority)] if not self._notificationbar: self._notificationbar = urwid.Pile(msgs) else: newpile = self._notificationbar.widget_list + msgs self._notificationbar = urwid.Pile(newpile) <DeepExtract> mainframe = self.root_widget.original_widget if self.current_buffer: mainframe.set_body(self.current_buffer) lines = [] if self._notificationbar: lines.append(self._notificationbar) if self._show_statusbar: lines.append(self.build_statusbar()) if lines: mainframe.set_footer(urwid.Pile(lines)) else: mainframe.set_footer(None) if self.mainloop.screen.started and redraw: self.mainloop.draw_screen() </DeepExtract> def clear(*_): <DeepExtract> newpile = self._notificationbar.widget_list for l in msgs: if l in newpile: newpile.remove(l) if newpile: self._notificationbar = urwid.Pile(newpile) else: self._notificationbar = None self.update() </DeepExtract> if block: <DeepExtract> cols = urwid.Columns([urwid.Text('(escape continues)')]) att = settings.get_theming_attribute('global', 'notify_' + priority) txt = urwid.AttrMap(cols, att) </DeepExtract> overlay = urwid.Overlay(txt, self.root_widget, ('fixed left', 0), ('fixed right', 0), ('fixed bottom', 0), None) <DeepExtract> self.mainloop.widget = overlay self._unlock_key = 'esc' self._unlock_callback = clear self._locked = True </DeepExtract> elif timeout >= 0: if timeout == 0: timeout = settings.get('notify_timeout') self.mainloop.set_alarm_in(timeout, clear) return msgs[0]
def notify(message, priority='normal', timeout=0, block=False): """ opens notification popup. :param message: message to print :type message: str :param priority: priority string, used to format the popup: currently, 'normal' and 'error' are defined. If you use 'X' here, the attribute 'global_notify_X' is used to format the popup. :type priority: str :param timeout: seconds until message disappears. Defaults to the value of 'notify_timeout' in the general config section. A negative value means never time out. :type timeout: int :param block: this notification blocks until a keypress is made :type block: bool :returns: an urwid widget (this notification) that can be handed to :meth:`clear_notify` for removal """ def build_line(msg, prio): cols = urwid.Columns([urwid.Text(msg)]) att = settings.get_theming_attribute('global', 'notify_' + prio) return urwid.AttrMap(cols, att) msgs = [build_line(message, priority)] if not self._notificationbar: self._notificationbar = urwid.Pile(msgs) else: newpile = self._notificationbar.widget_list + msgs self._notificationbar = urwid.Pile(newpile) mainframe = self.root_widget.original_widget if self.current_buffer: mainframe.set_body(self.current_buffer) lines = [] if self._notificationbar: lines.append(self._notificationbar) if self._show_statusbar: lines.append(self.build_statusbar()) if lines: mainframe.set_footer(urwid.Pile(lines)) else: mainframe.set_footer(None) if self.mainloop.screen.started and redraw: self.mainloop.draw_screen() def clear(*_): newpile = self._notificationbar.widget_list for l in msgs: if l in newpile: newpile.remove(l) if newpile: self._notificationbar = urwid.Pile(newpile) else: self._notificationbar = None self.update() if block: cols = urwid.Columns([urwid.Text('(escape continues)')]) att = settings.get_theming_attribute('global', 'notify_' + priority) txt = urwid.AttrMap(cols, att) overlay = urwid.Overlay(txt, self.root_widget, ('fixed left', 0), ('fixed right', 0), ('fixed bottom', 0), None) self.mainloop.widget = overlay self._unlock_key = 'esc' self._unlock_callback = clear self._locked = True elif timeout >= 0: if timeout == 0: timeout = settings.get('notify_timeout') self.mainloop.set_alarm_in(timeout, clear) return msgs[0]
alot
positive
def set_looper_rev(self, track, xclip, ident, value=None): """ Toggles or turns looper reverse on/off """ <DeepExtract> self._looper_data = {} for d in track.devices: if d.class_name == 'Looper': self._looper_data['Looper'] = d for p in d.parameters: if p.name in ('Device On', 'Reverse', 'State'): self._looper_data[p.name] = p break elif not self._looper_data and self._parent._can_have_nested_devices and d.can_have_chains and d.chains: for c in d.chains: self.get_looper(c) </DeepExtract> if self._looper_data and self._looper_data['Looper'] and self._looper_data['Reverse'].is_enabled: if value in KEYWORDS: self._looper_data['Reverse'].value = KEYWORDS[value] else: self._looper_data['Reverse'].value = not self._looper_data['Reverse'].value
def set_looper_rev(self, track, xclip, ident, value=None): """ Toggles or turns looper reverse on/off """ self._looper_data = {} for d in track.devices: if d.class_name == 'Looper': self._looper_data['Looper'] = d for p in d.parameters: if p.name in ('Device On', 'Reverse', 'State'): self._looper_data[p.name] = p break elif not self._looper_data and self._parent._can_have_nested_devices and d.can_have_chains and d.chains: for c in d.chains: self.get_looper(c) if self._looper_data and self._looper_data['Looper'] and self._looper_data['Reverse'].is_enabled: if value in KEYWORDS: self._looper_data['Reverse'].value = KEYWORDS[value] else: self._looper_data['Reverse'].value = not self._looper_data['Reverse'].value
clyphx-live10
positive
def force_complete(self): <DeepExtract> while not self.task_queue.empty(): self.task_queue.get_nowait() self.task_queue.task_done() </DeepExtract> <DeepExtract> for worker in self.threads: worker.terminate() </DeepExtract>
def force_complete(self): while not self.task_queue.empty(): self.task_queue.get_nowait() self.task_queue.task_done() for worker in self.threads: worker.terminate() </DeepExtract>
assetscan
positive
def get_sequence_term_parameter(self, param_node, position, close): """Validate that sequence parameters are working for items in sequence.""" if not position or close: raise self._error(param_node, 'Unexpected parameters') value = NodeInfo(ast.Boolean(True), TypeHint.Boolean) <DeepExtract> if param_node['name'] is None: key = None if isinstance(param_node['name'], list): key = [self.visit(t) for t in param_node['name']] key = Interpreter.visit(self, param_node['name']) </DeepExtract> if len(param_node.children) > 1: <DeepExtract> if param_node.children[-1] is None: value = None if isinstance(param_node.children[-1], list): value = [self.visit(t) for t in param_node.children[-1]] value = Interpreter.visit(self, param_node.children[-1]) </DeepExtract> if key == 'fork': if not value.validate((TypeHint.Boolean.require_literal(), TypeHint.Numeric.require_literal())): raise self._type_error(param_node, TypeHint.Boolean, 'Expected type {expected_type} value for {k}') if value.node.value not in (True, False, 0, 1): raise self._error(param_node, 'Invalid value for {k}') else: raise self._error(param_node['name'], 'Unknown parameter {NAME}') return (key, ast.Boolean(bool(value.node.value)))
def get_sequence_term_parameter(self, param_node, position, close): """Validate that sequence parameters are working for items in sequence.""" if not position or close: raise self._error(param_node, 'Unexpected parameters') value = NodeInfo(ast.Boolean(True), TypeHint.Boolean) if param_node['name'] is None: key = None if isinstance(param_node['name'], list): key = [self.visit(t) for t in param_node['name']] key = Interpreter.visit(self, param_node['name']) if len(param_node.children) > 1: if param_node.children[-1] is None: value = None if isinstance(param_node.children[-1], list): value = [self.visit(t) for t in param_node.children[-1]] value = Interpreter.visit(self, param_node.children[-1]) if key == 'fork': if not value.validate((TypeHint.Boolean.require_literal(), TypeHint.Numeric.require_literal())): raise self._type_error(param_node, TypeHint.Boolean, 'Expected type {expected_type} value for {k}') if value.node.value not in (True, False, 0, 1): raise self._error(param_node, 'Invalid value for {k}') else: raise self._error(param_node['name'], 'Unknown parameter {NAME}') return (key, ast.Boolean(bool(value.node.value)))
eql
positive
def _handle_op_command(self, msg_header, responseTo): op_command = MongoOpCommand.read_from_bytestream(self.rfile, msg_header.messageLength) print('OP_COMMAND:', dict(**op_command._asdict())) <DeepExtract> payloads = {b'whatsmyuri': {'you': ':'.join(map(str, self.connection.getpeername())), 'ok': 1.0}, b'buildinfo': {'version': '3.4.4', 'gitVersion': '888390515874a9debd1b6c5d36559ca86b44babd', 'targetMinOS': 'Windows 7/Windows Server 2008 R2', 'modules': [], 'allocator': 'tcmalloc', 'javascriptEngine': 'mozjs', 'sysInfo': 'deprecated', 'versionArray': [3, 4, 4, 0], 'openssl': {'running': 'OpenSSL 1.0.1u-fips 22 Sep 2016', 'compiled': 'OpenSSL 1.0.1u-fips 22 Sep 2016'}, 'buildEnvironment': {'distmod': '2008plus-ssl', 'distarch': 'x86_64', 'cc': 'cl: Microsoft (R) C/C++ Optimizing Compiler Version 19.00.24218.1 for x64', 'ccflags': '/nologo /EHsc /W3 /wd4355 /wd4800 /wd4267 /wd4244 /wd4290 /wd4068 /wd4351 /we4013 /we4099 /we4930 /Z7 /errorReport:none /MD /O2 /Oy- /bigobj /Gw /Gy /Zc:inline', 'cxx': 'cl: Microsoft (R) C/C++ Optimizing Compiler Version 19.00.24218.1 for x64', 'cxxflags': '/TP', 'linkflags': '/nologo /DEBUG /INCREMENTAL:NO /LARGEADDRESSAWARE /OPT:REF', 'target_arch': 'x86_64', 'target_os': 'windows'}, 'bits': 64, 'debug': False, 'maxBsonObjectSize': 16777216, 'storageEngines': ['devnull', 'ephemeralForTest', 'mmapv1', 'wiredTiger'], 'ok': 1.0}, b'isMaster': {'ismaster': True, 'maxBsonObjectSize': 16777216, 'maxMessageSizeBytes': 48000000, 'maxWriteBatchSize': 1000, 'localTime': datetime.utcnow(), 'maxWireVersion': 5, 'minWireVersion': 0, 'readOnly': False, 'ok': 1.0}, b'replSetGetStatus': {'ok': 0.0, 'errmsg': 'not running with --replSet', 'code': 76, 'codeName': 'NoReplicationEnabled'}} payloads[b'buildInfo'] = payloads[b'buildinfo'] cmd_reply = payloads[op_command.commandName] </DeepExtract> print('OP_COMMANDREPLY:', cmd_reply) byteresp = bson.dumps(cmd_reply) + bson.dumps({}) <DeepExtract> msg_length = sizeof(MongoMsgHeader) + len(byteresp) self.wfile.write(convert_struct_to_bytes(MongoMsgHeader(messageLength=msg_length, requestID=0, responseTo=responseTo, opCode=OP_COMMANDREPLY)) + byteresp) self.wfile.flush() </DeepExtract>
def _handle_op_command(self, msg_header, responseTo): op_command = MongoOpCommand.read_from_bytestream(self.rfile, msg_header.messageLength) print('OP_COMMAND:', dict(**op_command._asdict())) payloads = {b'whatsmyuri': {'you': ':'.join(map(str, self.connection.getpeername())), 'ok': 1.0}, b'buildinfo': {'version': '3.4.4', 'gitVersion': '888390515874a9debd1b6c5d36559ca86b44babd', 'targetMinOS': 'Windows 7/Windows Server 2008 R2', 'modules': [], 'allocator': 'tcmalloc', 'javascriptEngine': 'mozjs', 'sysInfo': 'deprecated', 'versionArray': [3, 4, 4, 0], 'openssl': {'running': 'OpenSSL 1.0.1u-fips 22 Sep 2016', 'compiled': 'OpenSSL 1.0.1u-fips 22 Sep 2016'}, 'buildEnvironment': {'distmod': '2008plus-ssl', 'distarch': 'x86_64', 'cc': 'cl: Microsoft (R) C/C++ Optimizing Compiler Version 19.00.24218.1 for x64', 'ccflags': '/nologo /EHsc /W3 /wd4355 /wd4800 /wd4267 /wd4244 /wd4290 /wd4068 /wd4351 /we4013 /we4099 /we4930 /Z7 /errorReport:none /MD /O2 /Oy- /bigobj /Gw /Gy /Zc:inline', 'cxx': 'cl: Microsoft (R) C/C++ Optimizing Compiler Version 19.00.24218.1 for x64', 'cxxflags': '/TP', 'linkflags': '/nologo /DEBUG /INCREMENTAL:NO /LARGEADDRESSAWARE /OPT:REF', 'target_arch': 'x86_64', 'target_os': 'windows'}, 'bits': 64, 'debug': False, 'maxBsonObjectSize': 16777216, 'storageEngines': ['devnull', 'ephemeralForTest', 'mmapv1', 'wiredTiger'], 'ok': 1.0}, b'isMaster': {'ismaster': True, 'maxBsonObjectSize': 16777216, 'maxMessageSizeBytes': 48000000, 'maxWriteBatchSize': 1000, 'localTime': datetime.utcnow(), 'maxWireVersion': 5, 'minWireVersion': 0, 'readOnly': False, 'ok': 1.0}, b'replSetGetStatus': {'ok': 0.0, 'errmsg': 'not running with --replSet', 'code': 76, 'codeName': 'NoReplicationEnabled'}} payloads[b'buildInfo'] = payloads[b'buildinfo'] cmd_reply = payloads[op_command.commandName] print('OP_COMMANDREPLY:', cmd_reply) byteresp = bson.dumps(cmd_reply) + bson.dumps({}) msg_length = sizeof(MongoMsgHeader) + len(byteresp) self.wfile.write(convert_struct_to_bytes(MongoMsgHeader(messageLength=msg_length, requestID=0, responseTo=responseTo, opCode=OP_COMMANDREPLY)) + byteresp) self.wfile.flush() </DeepExtract>
dotfiles_and_notes
positive
@pytest.fixture def http_server(): """Provision a server creator as a fixture.""" def start_srv(): bind_addr = (yield) if bind_addr is None: return <DeepExtract> httpserver = HTTPServer(bind_addr=bind_addr, gateway=Gateway) threading.Thread(target=httpserver.safe_start).start() while not httpserver.ready: time.sleep(0.1) httpserver = httpserver </DeepExtract> yield httpserver yield httpserver srv_creator = iter(start_srv()) next(srv_creator) yield srv_creator try: while True: httpserver = next(srv_creator) if httpserver is not None: httpserver.stop() except StopIteration: pass
@pytest.fixture def http_server(): """Provision a server creator as a fixture.""" def start_srv(): bind_addr = (yield) if bind_addr is None: return httpserver = HTTPServer(bind_addr=bind_addr, gateway=Gateway) threading.Thread(target=httpserver.safe_start).start() while not httpserver.ready: time.sleep(0.1) httpserver = httpserver yield httpserver yield httpserver srv_creator = iter(start_srv()) next(srv_creator) yield srv_creator try: while True: httpserver = next(srv_creator) if httpserver is not None: httpserver.stop() except StopIteration: pass
cheroot
positive
@patch('ciftify.config.verify_msm_available') @patch('ciftify.bidsapp.fmriprep_ciftify.run') def test_ux05_default_all_participants_for_synth(mock_run, mock_vmsm): uargs = [synth_bids, '/output/dir', 'participant'] <DeepExtract> docstring = run_script.__doc__ arguments = docopt(docstring, uargs) settings = run_script.Settings(arguments) if settings.analysis_level == 'group': ret = run_script.run_group_workflow(settings) if settings.analysis_level == 'participant': ret = run_script.run_participant_workflow(settings) ret = ret </DeepExtract> <DeepExtract> cmd_list = [] for i in mock_run.call_args_list: cmd = i[0][0] if type(cmd) is list: cmd = ' '.join(cmd) cmd_list.append(cmd) mock_run.call_args_list = cmd_list </DeepExtract> assert count_calls_to('fmriprep', call_list) == 65 assert count_calls_to('ciftify_recon_all', call_list) == 5
@patch('ciftify.config.verify_msm_available') @patch('ciftify.bidsapp.fmriprep_ciftify.run') def test_ux05_default_all_participants_for_synth(mock_run, mock_vmsm): uargs = [synth_bids, '/output/dir', 'participant'] docstring = run_script.__doc__ arguments = docopt(docstring, uargs) settings = run_script.Settings(arguments) if settings.analysis_level == 'group': ret = run_script.run_group_workflow(settings) if settings.analysis_level == 'participant': ret = run_script.run_participant_workflow(settings) ret = ret cmd_list = [] for i in mock_run.call_args_list: cmd = i[0][0] if type(cmd) is list: cmd = ' '.join(cmd) cmd_list.append(cmd) mock_run.call_args_list = cmd_list assert count_calls_to('fmriprep', call_list) == 65 assert count_calls_to('ciftify_recon_all', call_list) == 5
ciftify
positive
def test_318_ubuntu18_postgres_dockerfile(self): """ WHEN using a dockerfile for systemd-enabled Ubuntu 16.04 and python3, THEN we can create an image with an PostgreSql DB service being installed and enabled. Without a special startup.sh script or container-cmd one can just start the image and in the container expecting that the service is started. Therefore, WHEN we start the image as a docker container THEN we can see a specific role with an SQL query because the test script has created a new user account in the in the database with a known password. """ if not os.path.exists(DOCKER_SOCKET): self.skipTest('docker-based test') if not os.path.exists(PSQL_TOOL): self.skipTest('postgres tools missing on host') docker = _docker curl = _curl python = _python or _python3 <DeepExtract> name = self.caller_testname() if suffix: testname = name + '_' + suffix testname = name </DeepExtract> <DeepExtract> testname = testname or self.caller_testname() newdir = 'tmp/tmp.' + testname if os.path.isdir(newdir): shutil.rmtree(newdir) os.makedirs(newdir) testdir = newdir </DeepExtract> dockerfile = 'ubuntu18-postgres.dockerfile' <DeepExtract> image = '' for line in open(dockerfile): m = re.match('[Ff][Rr][Oo][Mm] *"([^"]*)"', line) if m: image = m.group(1) break m = re.match('[Ff][Rr][Oo][Mm] *(\\w[^ ]*)', line) if m: image = m.group(1).strip() break logg.debug("--\n-- '%s' FROM '%s'", dockerfile, image) if image: addhosts = self.start_mirror(image, extras) addhosts = '' </DeepExtract> <DeepExtract> savename = os.path.splitext(os.path.basename(dockerfile))[0] </DeepExtract> saveto = SAVETO images = IMAGES psql = PSQL_TOOL <DeepExtract> if _password: password = _password out = 'Password.' out += random.choice(string.ascii_uppercase) out += random.choice(string.ascii_lowercase) out += random.choice(string.ascii_lowercase) out += random.choice(string.ascii_lowercase) out += random.choice(string.ascii_lowercase) out += random.choice(',.-+') out += random.choice('0123456789') out += random.choice('0123456789') password = out </DeepExtract> testpass = 'Test.' + password cmd = '{docker} build . -f {dockerfile} {addhosts} --build-arg PASSWORD={password} --build-arg TESTPASS={testpass} --tag {images}:{testname}' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = '{docker} rm --force {testname}' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = '{docker} run -d --name {testname} {images}:{testname}' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> <DeepExtract> docker = _docker cmd = '{docker} inspect {name}' values = output(cmd.format(**locals())) values = json.loads(values) if not values or 'NetworkSettings' not in values[0]: logg.critical(' docker inspect %s => %s ', testname, values) container = values[0]['NetworkSettings']['IPAddress'] </DeepExtract> cmd = 'for i in 1 2 3 4 5 6 7 8 9; do echo -n "[$i] "; pg_isready -h {container} && break; sleep 2; done' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> login = 'export PGUSER=testuser_11; export PGPASSWORD=' + testpass query = 'SELECT rolname FROM pg_roles' cmd = "{login}; {psql} -h {container} -d postgres -c '{query}' > {testdir}/{testname}.txt" <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = 'grep testuser_ok {testdir}/{testname}.txt' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = '{docker} stop {testname}' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = '{docker} rm --force {testname}' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = '{docker} rmi {saveto}/{savename}:latest' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = '{docker} tag {images}:{testname} {saveto}/{savename}:latest' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = '{docker} rmi {images}:{testname}' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.call(cmd.format(**locals()), shell=shell) </DeepExtract> <DeepExtract> testname = testname or self.caller_testname() newdir = 'tmp/tmp.' + testname if os.path.isdir(newdir): shutil.rmtree(newdir) return newdir </DeepExtract>
def test_318_ubuntu18_postgres_dockerfile(self): """ WHEN using a dockerfile for systemd-enabled Ubuntu 16.04 and python3, THEN we can create an image with an PostgreSql DB service being installed and enabled. Without a special startup.sh script or container-cmd one can just start the image and in the container expecting that the service is started. Therefore, WHEN we start the image as a docker container THEN we can see a specific role with an SQL query because the test script has created a new user account in the in the database with a known password. """ if not os.path.exists(DOCKER_SOCKET): self.skipTest('docker-based test') if not os.path.exists(PSQL_TOOL): self.skipTest('postgres tools missing on host') docker = _docker curl = _curl python = _python or _python3 name = self.caller_testname() if suffix: testname = name + '_' + suffix testname = name testname = testname or self.caller_testname() newdir = 'tmp/tmp.' + testname if os.path.isdir(newdir): shutil.rmtree(newdir) os.makedirs(newdir) testdir = newdir dockerfile = 'ubuntu18-postgres.dockerfile' image = '' for line in open(dockerfile): m = re.match('[Ff][Rr][Oo][Mm] *"([^"]*)"', line) if m: image = m.group(1) break m = re.match('[Ff][Rr][Oo][Mm] *(\\w[^ ]*)', line) if m: image = m.group(1).strip() break logg.debug("--\n-- '%s' FROM '%s'", dockerfile, image) if image: addhosts = self.start_mirror(image, extras) addhosts = '' savename = os.path.splitext(os.path.basename(dockerfile))[0] saveto = SAVETO images = IMAGES psql = PSQL_TOOL if _password: password = _password out = 'Password.' out += random.choice(string.ascii_uppercase) out += random.choice(string.ascii_lowercase) out += random.choice(string.ascii_lowercase) out += random.choice(string.ascii_lowercase) out += random.choice(string.ascii_lowercase) out += random.choice(',.-+') out += random.choice('0123456789') out += random.choice('0123456789') password = out testpass = 'Test.' + password cmd = '{docker} build . -f {dockerfile} {addhosts} --build-arg PASSWORD={password} --build-arg TESTPASS={testpass} --tag {images}:{testname}' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) cmd = '{docker} rm --force {testname}' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.call(cmd.format(**locals()), shell=shell) cmd = '{docker} run -d --name {testname} {images}:{testname}' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) docker = _docker cmd = '{docker} inspect {name}' values = output(cmd.format(**locals())) values = json.loads(values) if not values or 'NetworkSettings' not in values[0]: logg.critical(' docker inspect %s => %s ', testname, values) container = values[0]['NetworkSettings']['IPAddress'] cmd = 'for i in 1 2 3 4 5 6 7 8 9; do echo -n "[$i] "; pg_isready -h {container} && break; sleep 2; done' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) login = 'export PGUSER=testuser_11; export PGPASSWORD=' + testpass query = 'SELECT rolname FROM pg_roles' cmd = "{login}; {psql} -h {container} -d postgres -c '{query}' > {testdir}/{testname}.txt" if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) cmd = 'grep testuser_ok {testdir}/{testname}.txt' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) cmd = '{docker} stop {testname}' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) cmd = '{docker} rm --force {testname}' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) cmd = '{docker} rmi {saveto}/{savename}:latest' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.call(cmd.format(**locals()), shell=shell) cmd = '{docker} tag {images}:{testname} {saveto}/{savename}:latest' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) cmd = '{docker} rmi {images}:{testname}' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.call(cmd.format(**locals()), shell=shell) testname = testname or self.caller_testname() newdir = 'tmp/tmp.' + testname if os.path.isdir(newdir): shutil.rmtree(newdir) return newdir </DeepExtract>
docker-systemctl-images
positive
def main(): parser = argparse.ArgumentParser() parser.add_argument('black_bot_name') parser.add_argument('white_bot_name') parser.add_argument('--komi', '-k', type=float, default=5.5) args = parser.parse_args() <DeepExtract> model_file = 'model_zoo/' + args.black_bot_name + '_bot.yml' weight_file = 'model_zoo/' + args.black_bot_name + '_weights.hd5' with open(model_file, 'r') as f: yml = yaml.load(f) model = model_from_yaml(yaml.dump(yml)) model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy']) model.load_weights(weight_file) processor = SevenPlaneProcessor() black_bot = KerasBot(model=model, processor=processor) </DeepExtract> <DeepExtract> model_file = 'model_zoo/' + args.white_bot_name + '_bot.yml' weight_file = 'model_zoo/' + args.white_bot_name + '_weights.hd5' with open(model_file, 'r') as f: yml = yaml.load(f) model = model_from_yaml(yaml.dump(yml)) model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy']) model.load_weights(weight_file) processor = SevenPlaneProcessor() white_bot = KerasBot(model=model, processor=processor) </DeepExtract> print('Simulating %s vs %s...' % (args.black_bot_name, args.white_bot_name)) board = goboard.GoBoard() simulate_game(board, black_bot=black_bot, white_bot=white_bot) print('Game over!') print(goboard.to_string(board)) print('\nScore (Chinese rules):') status = scoring.evaluate_territory(board) black_area = status.num_black_territory + status.num_black_stones white_area = status.num_white_territory + status.num_white_stones white_score = white_area + args.komi print('Black %d' % black_area) print('White %d + %.1f = %.1f' % (white_area, args.komi, white_score))
def main(): parser = argparse.ArgumentParser() parser.add_argument('black_bot_name') parser.add_argument('white_bot_name') parser.add_argument('--komi', '-k', type=float, default=5.5) args = parser.parse_args() model_file = 'model_zoo/' + args.black_bot_name + '_bot.yml' weight_file = 'model_zoo/' + args.black_bot_name + '_weights.hd5' with open(model_file, 'r') as f: yml = yaml.load(f) model = model_from_yaml(yaml.dump(yml)) model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy']) model.load_weights(weight_file) processor = SevenPlaneProcessor() black_bot = KerasBot(model=model, processor=processor) model_file = 'model_zoo/' + args.white_bot_name + '_bot.yml' weight_file = 'model_zoo/' + args.white_bot_name + '_weights.hd5' with open(model_file, 'r') as f: yml = yaml.load(f) model = model_from_yaml(yaml.dump(yml)) model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy']) model.load_weights(weight_file) processor = SevenPlaneProcessor() white_bot = KerasBot(model=model, processor=processor) print('Simulating %s vs %s...' % (args.black_bot_name, args.white_bot_name)) board = goboard.GoBoard() simulate_game(board, black_bot=black_bot, white_bot=white_bot) print('Game over!') print(goboard.to_string(board)) print('\nScore (Chinese rules):') status = scoring.evaluate_territory(board) black_area = status.num_black_territory + status.num_black_stones white_area = status.num_white_territory + status.num_white_stones white_score = white_area + args.komi print('Black %d' % black_area) print('White %d + %.1f = %.1f' % (white_area, args.komi, white_score))
betago
positive
def test_embedded_lambda_decorator(self): """ Test that the lambda decorator flushes metrics correctly and only once """ @datadog_lambda_wrapper def wrapped_function_1(): lambda_metric('lambda.dist.1', 10) @datadog_lambda_wrapper def wrapped_function_2(): <DeepExtract> lambda_metric('lambda.dist.1', 10) </DeepExtract> lambda_metric('lambda.dist.2', 30) _get_lambda_stats().reporter = self.reporter <DeepExtract> wrapped_function_1() lambda_metric('lambda.dist.2', 30) </DeepExtract> assert _get_lambda_stats().reporter.dist_flush_counter == 1 <DeepExtract> def sort(metric): tags = metric['tags'] or [] host = metric['host'] or '' dists = (metric['points'][0][0], metric['metric'], tags, host, metric['points'][0][1]) dists = sorted(_get_lambda_stats().reporter.distributions, key=sort) </DeepExtract> assert len(dists) == 2
def test_embedded_lambda_decorator(self): """ Test that the lambda decorator flushes metrics correctly and only once """ @datadog_lambda_wrapper def wrapped_function_1(): lambda_metric('lambda.dist.1', 10) @datadog_lambda_wrapper def wrapped_function_2(): lambda_metric('lambda.dist.1', 10) lambda_metric('lambda.dist.2', 30) _get_lambda_stats().reporter = self.reporter wrapped_function_1() lambda_metric('lambda.dist.2', 30) assert _get_lambda_stats().reporter.dist_flush_counter == 1 def sort(metric): tags = metric['tags'] or [] host = metric['host'] or '' dists = (metric['points'][0][0], metric['metric'], tags, host, metric['points'][0][1]) dists = sorted(_get_lambda_stats().reporter.distributions, key=sort) assert len(dists) == 2
datadogpy
positive
def __call__(self, class_num=1000, layer_out=None, **configs): <DeepExtract> data = mx.sym.Variable('data') self.config_map.update(configs) first_c = int(round(self.config_map['firstconv_filter_num'] * self.multiplier)) first_layer = mobilenet_unit(data=data, num_filter=first_c, kernel=(3, 3), stride=(2, 2), pad=(1, 1), if_act=True, prefix='first-3x3-conv') last_bottleneck_layer = first_layer in_c = first_c for (i, layer_setting) in enumerate(self.config_map['bottleneck_params_list']): (t, c, n, s) = layer_setting last_bottleneck_layer = inverted_residual_blocks(data=last_bottleneck_layer, in_c=in_c, t=t, c=int(round(c * self.multiplier)), n=n, s=s, prefix='seq-%d' % i) in_c = int(round(c * self.multiplier)) last_fm = mobilenet_unit(data=last_bottleneck_layer, num_filter=int(1280 * self.multiplier) if self.multiplier > 1.0 else 1280, kernel=(1, 1), stride=(1, 1), pad=(0, 0), if_act=True, prefix='last-1x1-conv') pool_size = int(self.data_wh[0] / 32) pool = mx.sym.Pooling(data=last_fm, kernel=(pool_size, pool_size), stride=(1, 1), pool_type='avg', name='global_pool', global_pool=True) flatten = mx.sym.Flatten(data=pool, name='flatten') fc = mx.symbol.FullyConnected(data=flatten, num_hidden=class_num, name='fc') softmax = mx.symbol.SoftmaxOutput(data=fc, name='softmax') sym = softmax </DeepExtract> if layer_out is None: return sym internals = sym.get_internals() if type(layer_out) is list or type(layer_out) is tuple: layers_out = [internals[layer_nm.strip() + '_output'] for layer_nm in layer_out] return layers_out else: layer_out = internals[layer_out.strip() + '_output'] return layer_out
def __call__(self, class_num=1000, layer_out=None, **configs): data = mx.sym.Variable('data') self.config_map.update(configs) first_c = int(round(self.config_map['firstconv_filter_num'] * self.multiplier)) first_layer = mobilenet_unit(data=data, num_filter=first_c, kernel=(3, 3), stride=(2, 2), pad=(1, 1), if_act=True, prefix='first-3x3-conv') last_bottleneck_layer = first_layer in_c = first_c for (i, layer_setting) in enumerate(self.config_map['bottleneck_params_list']): (t, c, n, s) = layer_setting last_bottleneck_layer = inverted_residual_blocks(data=last_bottleneck_layer, in_c=in_c, t=t, c=int(round(c * self.multiplier)), n=n, s=s, prefix='seq-%d' % i) in_c = int(round(c * self.multiplier)) last_fm = mobilenet_unit(data=last_bottleneck_layer, num_filter=int(1280 * self.multiplier) if self.multiplier > 1.0 else 1280, kernel=(1, 1), stride=(1, 1), pad=(0, 0), if_act=True, prefix='last-1x1-conv') pool_size = int(self.data_wh[0] / 32) pool = mx.sym.Pooling(data=last_fm, kernel=(pool_size, pool_size), stride=(1, 1), pool_type='avg', name='global_pool', global_pool=True) flatten = mx.sym.Flatten(data=pool, name='flatten') fc = mx.symbol.FullyConnected(data=flatten, num_hidden=class_num, name='fc') softmax = mx.symbol.SoftmaxOutput(data=fc, name='softmax') sym = softmax if layer_out is None: return sym internals = sym.get_internals() if type(layer_out) is list or type(layer_out) is tuple: layers_out = [internals[layer_nm.strip() + '_output'] for layer_nm in layer_out] return layers_out else: layer_out = internals[layer_out.strip() + '_output'] return layer_out
byteps
positive
def _process_families(self) -> None: families_json = self.api_client.query('get/families') for family_id in families_json: try: if families_json[family_id]['updated'] == '': families_json[family_id]['updated'] = None fam = Family.parse_obj(families_json[family_id]) fam.malpedia_name = family_id except ValidationError as e: self.helper.log_error(f'error parsing family: {family_id} {e} {families_json[family_id]}') continue self.helper.log_info('Processing malware family: ' + fam.malpedia_name) <DeepExtract> self.helper.log_info('Processing malware family: ' + fam.malpedia_name) guessed_malwares = self._guess_malwares_from_tags(fam.all_names) if fam.description == '' or fam.description is None: fam.description = 'Malpedia entry for ' + fam.malpedia_name marking_tlp_white = self.helper.api.marking_definition.read(id=TLP_WHITE['id']) if guessed_malwares == {} or self.update_data: try: malware = self.helper.api.malware.create(name=fam.main_name, is_family=True, createdBy=self.organization['id'], description=fam.description, aliases=fam.alt_names if fam.alt_names != [] else None, objectMarking=[marking_tlp_white['id']], update=self.update_data) except Exception as e: self.helper.log_error(f'error creating malware entity: {e}') malware_id = '' self._add_refs_for_id(fam.urls, malware['id']) malware_id = malware['id'] guessed_malwares_list = list(guessed_malwares.values()) malware_id = guessed_malwares_list[0] </DeepExtract> if malware_id == '': self.helper.log_error('some error occurred during malware creation') continue yara_rules = self.api_client.query('get/yara/' + family_id) self.helper.log_info('importing yara rules for: ' + family_id) <DeepExtract> if not self.import_yara: return for tlp_level in yara_rules: for yara_rule in yara_rules[tlp_level]: try: yr = YaraRule(tlp_level=tlp_level, rule_name=yara_rule, raw_rule=yara_rules[tlp_level][yara_rule]) self.helper.log_info(f'processing yara_rule ({yr.rule_name})') mapped_marking = self._TLP_MAPPING[tlp_level] if mapped_marking == '': continue indicator = self.helper.api.indicator.create(name=yr.rule_name, description='Yara rule from Malpedia library', pattern_type='yara', pattern=yr.raw_rule, objetMarking=[mapped_marking['id']], x_opencti_main_observable_type='StixFile', createdBy=self.organization['id'], valid_from=yr.date, update=self.update_data) except Exception as e: self.helper.log_error(f'error creating yara indicator: {e}') continue self.helper.api.stix_core_relationship.create(fromId=indicator['id'], toId=malware_id, relationship_type='indicates', description='Yara rule for ' + fam.main_name, confidence=self.confidence_level, createdBy=self.organization['id'], update=self.update_data) </DeepExtract> if not self.api_client.unauthenticated: samples = self.api_client.query('list/samples/' + family_id) self.helper.log_info(f'creating hash indicators for {fam.malpedia_name} samples') <DeepExtract> for sample in samples: try: sam = Sample.parse_obj(sample) except ValidationError as e: self.helper.log_error(f'error marshaling sample data for {sample}: {e}') continue self.helper.log_info('Processing sample: ' + sam.sha256) if sam.sha256 == '' or sam.sha256 is None or len(sam.sha256) != 64: continue san_hash = sam.sha256.lower() pattern = "[file:hashes.'SHA-256' = '" + san_hash + "']" obs = None if self.create_observables: try: obs = self.helper.api.stix_cyber_observable.create(observableData={'type': 'file', 'hashes': {'SHA-256': san_hash}}, createdBy=self.organization['id'], objectMarking=[self.default_marking['id']], update=self.update_data) except Exception as e: print(obs) self.helper.log_error(f'error storing observable ({sam.sha256}): {e}') continue indicator = None if self.create_indicators: try: indicator = self.helper.api.indicator.create(name=san_hash, description='Sample hash pattern from Malpedia', pattern_type='stix', pattern=pattern, x_opencti_main_observable_type='File', createdBy=self.organization['id'], objectMarking=[self.default_marking['id']], update=self.update_data) except Exception as e: self.helper.log_error(f'error storing indicator: {e}') continue if indicator is not None: try: self.helper.api.stix_core_relationship.create(fromId=indicator['id'], toId=malware_id, relationship_type='indicates', description='Sample in Malpedia database', confidence=self.confidence_level, createdBy=self.organization['id'], objectMarking=[self.default_marking['id']], update=self.update_data) except Exception as e: self.helper.log_error(f'error storing indicator relation: {e}') continue if obs is not None: try: self.helper.api.stix_core_relationship.create(fromId=obs['id'], toId=malware_id, relationship_type='related-to', description='Sample in Malpedia database', confidence=self.confidence_level, createdBy=self.organization['id'], objectMarking=[self.default_marking['id']], update=self.update_data) except Exception as e: self.helper.log_error(f'error storing indicator relation: {e}') continue if indicator is not None and obs is not None: try: self.helper.api.stix_core_relationship.create(fromId=indicator['id'], toId=obs['id'], relationship_type='based-on', createdBy=self.organization['id'], update=self.update_data) except Exception as e: self.helper.log_error(f'error storing indicator relation: {e}') continue </DeepExtract> self.helper.log_info(f'creating intrusion sets for {fam.malpedia_name}') <DeepExtract> for actor in fam.attribution: actor_json = self.api_client.query('get/actor/' + actor.lower().replace(' ', '_')) try: act = Actor.parse_obj(actor_json) except ValidationError as e: self.helper.log_error(f'error marshaling actor data for {actor}: {e}') continue self.helper.log_info('Processing actor: ' + act.value) guessed_intrusion_set = self._guess_intrusion_set_from_tags([actor] + act.meta.synonyms) if act.value == '' or act.value is None: continue if act.description == '': act.description = 'Malpedia library entry for ' + act.value if guessed_intrusion_set == {} and self.import_intrusion_sets or (self.update_data and self.import_intrusion_sets): intrusion_set = self.helper.api.intrusion_set.create(name=act.value, description=act.description, aliases=act.meta.synonyms if act.meta.synonyms else None, createdBy=self.organization['id'], update=self.update_data) self.helper.api.stix_core_relationship.create(fromId=intrusion_set['id'], toId=malware_id, relationship_type='uses', description='Malpedia indicates usage', confidence=self.confidence_level, createdBy=self.organization['id'], update=self.update_data) for act_ref_url in act.meta.refs: reference = self.helper.api.external_reference.create(source_name='Malpedia', url=act_ref_url, description='Reference found in the Malpedia library') self.helper.api.stix_domain_object.add_external_reference(id=intrusion_set['id'], external_reference_id=reference['id']) else: self.helper.log_info(f'not creating intrusion set ({act.value}) based on config') try: guessed_id = list(guessed_intrusion_set.values())[0] except Exception as err: self.helper.log_error(f'error guessing intrusion-set id: {err}') continue if guessed_id is None or guessed_id == '': continue if guessed_intrusion_set != {} and self.guess_intrusion_set: self.helper.api.stix_core_relationship.create(fromId=guessed_id, toId=malware_id, relationship_type='uses', description='Malpedia indicates usage', confidence=self.confidence_level, createdByRef=self.organization['id'], update=self.update_data) for act_ref_url in act.meta.refs: reference = self.helper.api.external_reference.create(source_name='Malpedia', url=act_ref_url, description='Reference found in the Malpedia library') self.helper.api.stix_domain_object.add_external_reference(id=guessed_id, external_reference_id=reference['id']) </DeepExtract>
def _process_families(self) -> None: families_json = self.api_client.query('get/families') for family_id in families_json: try: if families_json[family_id]['updated'] == '': families_json[family_id]['updated'] = None fam = Family.parse_obj(families_json[family_id]) fam.malpedia_name = family_id except ValidationError as e: self.helper.log_error(f'error parsing family: {family_id} {e} {families_json[family_id]}') continue self.helper.log_info('Processing malware family: ' + fam.malpedia_name) self.helper.log_info('Processing malware family: ' + fam.malpedia_name) guessed_malwares = self._guess_malwares_from_tags(fam.all_names) if fam.description == '' or fam.description is None: fam.description = 'Malpedia entry for ' + fam.malpedia_name marking_tlp_white = self.helper.api.marking_definition.read(id=TLP_WHITE['id']) if guessed_malwares == {} or self.update_data: try: malware = self.helper.api.malware.create(name=fam.main_name, is_family=True, createdBy=self.organization['id'], description=fam.description, aliases=fam.alt_names if fam.alt_names != [] else None, objectMarking=[marking_tlp_white['id']], update=self.update_data) except Exception as e: self.helper.log_error(f'error creating malware entity: {e}') malware_id = '' self._add_refs_for_id(fam.urls, malware['id']) malware_id = malware['id'] guessed_malwares_list = list(guessed_malwares.values()) malware_id = guessed_malwares_list[0] if malware_id == '': self.helper.log_error('some error occurred during malware creation') continue yara_rules = self.api_client.query('get/yara/' + family_id) self.helper.log_info('importing yara rules for: ' + family_id) if not self.import_yara: return for tlp_level in yara_rules: for yara_rule in yara_rules[tlp_level]: try: yr = YaraRule(tlp_level=tlp_level, rule_name=yara_rule, raw_rule=yara_rules[tlp_level][yara_rule]) self.helper.log_info(f'processing yara_rule ({yr.rule_name})') mapped_marking = self._TLP_MAPPING[tlp_level] if mapped_marking == '': continue indicator = self.helper.api.indicator.create(name=yr.rule_name, description='Yara rule from Malpedia library', pattern_type='yara', pattern=yr.raw_rule, objetMarking=[mapped_marking['id']], x_opencti_main_observable_type='StixFile', createdBy=self.organization['id'], valid_from=yr.date, update=self.update_data) except Exception as e: self.helper.log_error(f'error creating yara indicator: {e}') continue self.helper.api.stix_core_relationship.create(fromId=indicator['id'], toId=malware_id, relationship_type='indicates', description='Yara rule for ' + fam.main_name, confidence=self.confidence_level, createdBy=self.organization['id'], update=self.update_data) if not self.api_client.unauthenticated: samples = self.api_client.query('list/samples/' + family_id) self.helper.log_info(f'creating hash indicators for {fam.malpedia_name} samples') for sample in samples: try: sam = Sample.parse_obj(sample) except ValidationError as e: self.helper.log_error(f'error marshaling sample data for {sample}: {e}') continue self.helper.log_info('Processing sample: ' + sam.sha256) if sam.sha256 == '' or sam.sha256 is None or len(sam.sha256) != 64: continue san_hash = sam.sha256.lower() pattern = "[file:hashes.'SHA-256' = '" + san_hash + "']" obs = None if self.create_observables: try: obs = self.helper.api.stix_cyber_observable.create(observableData={'type': 'file', 'hashes': {'SHA-256': san_hash}}, createdBy=self.organization['id'], objectMarking=[self.default_marking['id']], update=self.update_data) except Exception as e: print(obs) self.helper.log_error(f'error storing observable ({sam.sha256}): {e}') continue indicator = None if self.create_indicators: try: indicator = self.helper.api.indicator.create(name=san_hash, description='Sample hash pattern from Malpedia', pattern_type='stix', pattern=pattern, x_opencti_main_observable_type='File', createdBy=self.organization['id'], objectMarking=[self.default_marking['id']], update=self.update_data) except Exception as e: self.helper.log_error(f'error storing indicator: {e}') continue if indicator is not None: try: self.helper.api.stix_core_relationship.create(fromId=indicator['id'], toId=malware_id, relationship_type='indicates', description='Sample in Malpedia database', confidence=self.confidence_level, createdBy=self.organization['id'], objectMarking=[self.default_marking['id']], update=self.update_data) except Exception as e: self.helper.log_error(f'error storing indicator relation: {e}') continue if obs is not None: try: self.helper.api.stix_core_relationship.create(fromId=obs['id'], toId=malware_id, relationship_type='related-to', description='Sample in Malpedia database', confidence=self.confidence_level, createdBy=self.organization['id'], objectMarking=[self.default_marking['id']], update=self.update_data) except Exception as e: self.helper.log_error(f'error storing indicator relation: {e}') continue if indicator is not None and obs is not None: try: self.helper.api.stix_core_relationship.create(fromId=indicator['id'], toId=obs['id'], relationship_type='based-on', createdBy=self.organization['id'], update=self.update_data) except Exception as e: self.helper.log_error(f'error storing indicator relation: {e}') continue self.helper.log_info(f'creating intrusion sets for {fam.malpedia_name}') for actor in fam.attribution: actor_json = self.api_client.query('get/actor/' + actor.lower().replace(' ', '_')) try: act = Actor.parse_obj(actor_json) except ValidationError as e: self.helper.log_error(f'error marshaling actor data for {actor}: {e}') continue self.helper.log_info('Processing actor: ' + act.value) guessed_intrusion_set = self._guess_intrusion_set_from_tags([actor] + act.meta.synonyms) if act.value == '' or act.value is None: continue if act.description == '': act.description = 'Malpedia library entry for ' + act.value if guessed_intrusion_set == {} and self.import_intrusion_sets or (self.update_data and self.import_intrusion_sets): intrusion_set = self.helper.api.intrusion_set.create(name=act.value, description=act.description, aliases=act.meta.synonyms if act.meta.synonyms else None, createdBy=self.organization['id'], update=self.update_data) self.helper.api.stix_core_relationship.create(fromId=intrusion_set['id'], toId=malware_id, relationship_type='uses', description='Malpedia indicates usage', confidence=self.confidence_level, createdBy=self.organization['id'], update=self.update_data) for act_ref_url in act.meta.refs: reference = self.helper.api.external_reference.create(source_name='Malpedia', url=act_ref_url, description='Reference found in the Malpedia library') self.helper.api.stix_domain_object.add_external_reference(id=intrusion_set['id'], external_reference_id=reference['id']) else: self.helper.log_info(f'not creating intrusion set ({act.value}) based on config') try: guessed_id = list(guessed_intrusion_set.values())[0] except Exception as err: self.helper.log_error(f'error guessing intrusion-set id: {err}') continue if guessed_id is None or guessed_id == '': continue if guessed_intrusion_set != {} and self.guess_intrusion_set: self.helper.api.stix_core_relationship.create(fromId=guessed_id, toId=malware_id, relationship_type='uses', description='Malpedia indicates usage', confidence=self.confidence_level, createdByRef=self.organization['id'], update=self.update_data) for act_ref_url in act.meta.refs: reference = self.helper.api.external_reference.create(source_name='Malpedia', url=act_ref_url, description='Reference found in the Malpedia library') self.helper.api.stix_domain_object.add_external_reference(id=guessed_id, external_reference_id=reference['id']) </DeepExtract>
connectors
positive
def template(*args, **kwargs): """ Get a rendered template as a string iterator. You can use a name, a filename or a template string as first parameter. Template rendering arguments can be passed as dictionaries or directly (as keyword arguments). """ tpl = args[0] if args else None adapter = kwargs.pop('template_adapter', SimpleTemplate) lookup = kwargs.pop('template_lookup', TEMPLATE_PATH) tplid = (id(lookup), tpl) if tplid not in TEMPLATES or DEBUG: settings = kwargs.pop('template_settings', {}) if isinstance(tpl, adapter): TEMPLATES[tplid] = tpl if settings: TEMPLATES[tplid].prepare(**settings) elif '\n' in tpl or '{' in tpl or '%' in tpl or ('$' in tpl): TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings) else: TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings) if not TEMPLATES[tplid]: <DeepExtract> raise HTTPError(500, 'Template (%s) not found' % tpl) </DeepExtract> for dictarg in args[1:]: kwargs.update(dictarg) return TEMPLATES[tplid].render(kwargs)
def template(*args, **kwargs): """ Get a rendered template as a string iterator. You can use a name, a filename or a template string as first parameter. Template rendering arguments can be passed as dictionaries or directly (as keyword arguments). """ tpl = args[0] if args else None adapter = kwargs.pop('template_adapter', SimpleTemplate) lookup = kwargs.pop('template_lookup', TEMPLATE_PATH) tplid = (id(lookup), tpl) if tplid not in TEMPLATES or DEBUG: settings = kwargs.pop('template_settings', {}) if isinstance(tpl, adapter): TEMPLATES[tplid] = tpl if settings: TEMPLATES[tplid].prepare(**settings) elif '\n' in tpl or '{' in tpl or '%' in tpl or ('$' in tpl): TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings) else: TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings) if not TEMPLATES[tplid]: raise HTTPError(500, 'Template (%s) not found' % tpl) for dictarg in args[1:]: kwargs.update(dictarg) return TEMPLATES[tplid].render(kwargs)
aws-servicebroker
positive
def ToJSCode(self, name, columns_order=None, order_by=()): """Writes the data table as a JS code string. This method writes a string of JS code that can be run to generate a DataTable with the specified data. Typically used for debugging only. Args: name: The name of the table. The name would be used as the DataTable's variable name in the created JS code. columns_order: Optional. Specifies the order of columns in the output table. Specify a list of all column IDs in the order in which you want the table created. Note that you must list all column IDs in this parameter, if you use it. order_by: Optional. Specifies the name of the column(s) to sort by. Passed as is to _PreparedData. Returns: A string of JS code that, when run, generates a DataTable with the given name and the data stored in the DataTable object. Example result: "var tab1 = new google.visualization.DataTable(); tab1.addColumn("string", "a", "a"); tab1.addColumn("number", "b", "b"); tab1.addColumn("boolean", "c", "c"); tab1.addRows(10); tab1.setCell(0, 0, "a"); tab1.setCell(0, 1, 1, null, {"foo": "bar"}); tab1.setCell(0, 2, true); ... tab1.setCell(9, 0, "c"); tab1.setCell(9, 1, 3, "3$"); tab1.setCell(9, 2, false);" Raises: DataTableException: The data does not match the type. """ encoder = DataTableJSONEncoder() if columns_order is None: columns_order = [col['id'] for col in self.__columns] col_dict = dict([(col['id'], col) for col in self.__columns]) jscode = 'var %s = new google.visualization.DataTable();\n' % name if self.custom_properties: jscode += '%s.setTableProperties(%s);\n' % (name, encoder.encode(self.custom_properties)) for (i, col) in enumerate(columns_order): jscode += '%s.addColumn(%s, %s, %s);\n' % (name, encoder.encode(col_dict[col]['type']), encoder.encode(col_dict[col]['label']), encoder.encode(col_dict[col]['id'])) if col_dict[col]['custom_properties']: jscode += '%s.setColumnProperties(%d, %s);\n' % (name, i, encoder.encode(col_dict[col]['custom_properties'])) jscode += '%s.addRows(%d);\n' % (name, len(self.__data)) for (i, (row, cp)) in enumerate(self._PreparedData(order_by)): for (j, col) in enumerate(columns_order): if col not in row or row[col] is None: continue <DeepExtract> if isinstance(row[col], tuple): if len(row[col]) not in [2, 3] or (len(row[col]) == 3 and (not isinstance(row[col][2], dict))): raise DataTableException('Wrong format for value and formatting - %s.' % str(row[col])) if not isinstance(row[col][1], types.StringTypes + (types.NoneType,)): raise DataTableException('Formatted value is not string, given %s.' % type(row[col][1])) js_value = DataTable.CoerceValue(row[col][0], col_dict[col]['type']) row[col] = (js_value,) + row[col][1:] t_value = type(row[col]) if row[col] is None: row[col] = row[col] if col_dict[col]['type'] == 'boolean': row[col] = bool(row[col]) elif col_dict[col]['type'] == 'number': if isinstance(row[col], (int, long, float)): row[col] = row[col] raise DataTableException('Wrong type %s when expected number' % t_value) elif col_dict[col]['type'] == 'string': if isinstance(row[col], unicode): row[col] = row[col] else: row[col] = str(row[col]).decode('utf-8') elif col_dict[col]['type'] == 'date': if isinstance(row[col], datetime.datetime): row[col] = datetime.date(row[col].year, row[col].month, row[col].day) elif isinstance(row[col], datetime.date): row[col] = row[col] else: raise DataTableException('Wrong type %s when expected date' % t_value) elif col_dict[col]['type'] == 'timeofday': if isinstance(row[col], datetime.datetime): row[col] = datetime.time(row[col].hour, row[col].minute, row[col].second) elif isinstance(row[col], datetime.time): row[col] = row[col] else: raise DataTableException('Wrong type %s when expected time' % t_value) elif col_dict[col]['type'] == 'datetime': if isinstance(row[col], datetime.datetime): row[col] = row[col] else: raise DataTableException('Wrong type %s when expected datetime' % t_value) raise DataTableException('Unsupported type %s' % col_dict[col]['type']) </DeepExtract> if isinstance(value, tuple): cell_cp = '' if len(value) == 3: cell_cp = ', %s' % encoder.encode(row[col][2]) jscode += '%s.setCell(%d, %d, %s, %s%s);\n' % (name, i, j, self.EscapeForJSCode(encoder, value[0]), self.EscapeForJSCode(encoder, value[1]), cell_cp) else: jscode += '%s.setCell(%d, %d, %s);\n' % (name, i, j, self.EscapeForJSCode(encoder, value)) if cp: jscode += '%s.setRowProperties(%d, %s);\n' % (name, i, encoder.encode(cp)) return jscode
def ToJSCode(self, name, columns_order=None, order_by=()): """Writes the data table as a JS code string. This method writes a string of JS code that can be run to generate a DataTable with the specified data. Typically used for debugging only. Args: name: The name of the table. The name would be used as the DataTable's variable name in the created JS code. columns_order: Optional. Specifies the order of columns in the output table. Specify a list of all column IDs in the order in which you want the table created. Note that you must list all column IDs in this parameter, if you use it. order_by: Optional. Specifies the name of the column(s) to sort by. Passed as is to _PreparedData. Returns: A string of JS code that, when run, generates a DataTable with the given name and the data stored in the DataTable object. Example result: "var tab1 = new google.visualization.DataTable(); tab1.addColumn("string", "a", "a"); tab1.addColumn("number", "b", "b"); tab1.addColumn("boolean", "c", "c"); tab1.addRows(10); tab1.setCell(0, 0, "a"); tab1.setCell(0, 1, 1, null, {"foo": "bar"}); tab1.setCell(0, 2, true); ... tab1.setCell(9, 0, "c"); tab1.setCell(9, 1, 3, "3$"); tab1.setCell(9, 2, false);" Raises: DataTableException: The data does not match the type. """ encoder = DataTableJSONEncoder() if columns_order is None: columns_order = [col['id'] for col in self.__columns] col_dict = dict([(col['id'], col) for col in self.__columns]) jscode = 'var %s = new google.visualization.DataTable();\n' % name if self.custom_properties: jscode += '%s.setTableProperties(%s);\n' % (name, encoder.encode(self.custom_properties)) for (i, col) in enumerate(columns_order): jscode += '%s.addColumn(%s, %s, %s);\n' % (name, encoder.encode(col_dict[col]['type']), encoder.encode(col_dict[col]['label']), encoder.encode(col_dict[col]['id'])) if col_dict[col]['custom_properties']: jscode += '%s.setColumnProperties(%d, %s);\n' % (name, i, encoder.encode(col_dict[col]['custom_properties'])) jscode += '%s.addRows(%d);\n' % (name, len(self.__data)) for (i, (row, cp)) in enumerate(self._PreparedData(order_by)): for (j, col) in enumerate(columns_order): if col not in row or row[col] is None: continue if isinstance(row[col], tuple): if len(row[col]) not in [2, 3] or (len(row[col]) == 3 and (not isinstance(row[col][2], dict))): raise DataTableException('Wrong format for value and formatting - %s.' % str(row[col])) if not isinstance(row[col][1], types.StringTypes + (types.NoneType,)): raise DataTableException('Formatted value is not string, given %s.' % type(row[col][1])) js_value = DataTable.CoerceValue(row[col][0], col_dict[col]['type']) row[col] = (js_value,) + row[col][1:] t_value = type(row[col]) if row[col] is None: row[col] = row[col] if col_dict[col]['type'] == 'boolean': row[col] = bool(row[col]) elif col_dict[col]['type'] == 'number': if isinstance(row[col], (int, long, float)): row[col] = row[col] raise DataTableException('Wrong type %s when expected number' % t_value) elif col_dict[col]['type'] == 'string': if isinstance(row[col], unicode): row[col] = row[col] else: row[col] = str(row[col]).decode('utf-8') elif col_dict[col]['type'] == 'date': if isinstance(row[col], datetime.datetime): row[col] = datetime.date(row[col].year, row[col].month, row[col].day) elif isinstance(row[col], datetime.date): row[col] = row[col] else: raise DataTableException('Wrong type %s when expected date' % t_value) elif col_dict[col]['type'] == 'timeofday': if isinstance(row[col], datetime.datetime): row[col] = datetime.time(row[col].hour, row[col].minute, row[col].second) elif isinstance(row[col], datetime.time): row[col] = row[col] else: raise DataTableException('Wrong type %s when expected time' % t_value) elif col_dict[col]['type'] == 'datetime': if isinstance(row[col], datetime.datetime): row[col] = row[col] else: raise DataTableException('Wrong type %s when expected datetime' % t_value) raise DataTableException('Unsupported type %s' % col_dict[col]['type']) if isinstance(value, tuple): cell_cp = '' if len(value) == 3: cell_cp = ', %s' % encoder.encode(row[col][2]) jscode += '%s.setCell(%d, %d, %s, %s%s);\n' % (name, i, j, self.EscapeForJSCode(encoder, value[0]), self.EscapeForJSCode(encoder, value[1]), cell_cp) else: jscode += '%s.setCell(%d, %d, %s);\n' % (name, i, j, self.EscapeForJSCode(encoder, value)) if cp: jscode += '%s.setRowProperties(%d, %s);\n' % (name, i, encoder.encode(cp)) return jscode
DCMetroMetrics
positive
def summary(self, count=True): """Prints summary of the database. The information that is displayed is: whether it exists, what tables does it have, how many records it contains (if ``count=True``), what are fields held and their types with an example record. Parameters ---------- count: bool Whether to count number of records per table (can be time consuming). Example ------- >>> adapter = SQLiteAdapter("database_24-06-2020_03-51-12_PM.db") >>> with adapter as db: >>> db.summary() Summary for Database database_29-06-2020_09-37-20_AM.db File size: 3.9453MB Tables: triples_table +-----------------------------------------------------------------------------------+ | TRIPLES_TABLE | +---------------------+------------------+---------------+--------------------------+ | | subject (int) | predicate (int) | object (int) | dataset_type (text(50)) | +----+----------------+------------------+---------------+--------------------------+ |e.g.| 34321 | 29218 | 38102 | train | +----+----------------+------------------+---------------+--------------------------+ Records: 59070 """ if os.path.exists(self.db_path): print('Summary for Database {}'.format(self.db_name)) print('Located in {}'.format(self.db_path)) file_size = os.path.getsize(self.db_path) summary = 'File size: {:.5}{}\nTables: {}' <DeepExtract> with self: cursor = self.connection.cursor() output = None try: cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") output = cursor.fetchall() self.connection.commit() if self.verbose: logger.debug(f"""Query executed successfully, {"SELECT name FROM sqlite_master WHERE type='table';"}""") except Error as e: logger.debug(f"Query failed. The error '{e}' occurred") tables = output </DeepExtract> tables_names = ', '.join((table[0] for table in tables)) print(summary.format(*get_human_readable_size(file_size), tables_names)) types = {'integer': 'int', 'float': 'float', 'string': 'str'} for table_name in tables: <DeepExtract> with self: cursor = self.connection.cursor() output = None try: cursor.execute("PRAGMA table_info('%s')" % table_name) output = cursor.fetchall() self.connection.commit() if self.verbose: logger.debug(f"""Query executed successfully, {"PRAGMA table_info('%s')" % table_name}""") except Error as e: logger.debug(f"Query failed. The error '{e}' occurred") result = output </DeepExtract> cols_name_type = ['{} ({}):'.format(x[1], types[x[2]] if x[2] in types else x[2]) for x in result] length = len(cols_name_type) print('-------------\n|' + table_name[0].upper() + '|\n-------------\n') formatted_record = '{:7s}{}\n{:7s}{}'.format(' ', '{:25s}' * length, 'e.g.', '{:<25s}' * length) msg = '' example = ['-'] * length if count: <DeepExtract> query = 'SELECT count(*) from {} {};'.format(table_name[0], condition) count = self._execute_query(query) if count is None: logger.debug('Table is empty or not such table exists.') nb_records = count elif not isinstance(count, list) or not isinstance(count[0], tuple): raise ValueError('Cannot get count for the table with provided condition.') nb_records = count[0][0] </DeepExtract> msg = '\n\nRecords: {}'.format(nb_records) if nb_records != 0: record = self._execute_query(f'SELECT * FROM {table_name[0]} LIMIT {1};')[0] example = [str(rec) for rec in record] else: print('Count is set to False hence no data displayed') print(formatted_record.format(*cols_name_type, *example), msg) else: logger.debug('Database does not exist.')
def summary(self, count=True): """Prints summary of the database. The information that is displayed is: whether it exists, what tables does it have, how many records it contains (if ``count=True``), what are fields held and their types with an example record. Parameters ---------- count: bool Whether to count number of records per table (can be time consuming). Example ------- >>> adapter = SQLiteAdapter("database_24-06-2020_03-51-12_PM.db") >>> with adapter as db: >>> db.summary() Summary for Database database_29-06-2020_09-37-20_AM.db File size: 3.9453MB Tables: triples_table +-----------------------------------------------------------------------------------+ | TRIPLES_TABLE | +---------------------+------------------+---------------+--------------------------+ | | subject (int) | predicate (int) | object (int) | dataset_type (text(50)) | +----+----------------+------------------+---------------+--------------------------+ |e.g.| 34321 | 29218 | 38102 | train | +----+----------------+------------------+---------------+--------------------------+ Records: 59070 """ if os.path.exists(self.db_path): print('Summary for Database {}'.format(self.db_name)) print('Located in {}'.format(self.db_path)) file_size = os.path.getsize(self.db_path) summary = 'File size: {:.5}{}\nTables: {}' with self: cursor = self.connection.cursor() output = None try: cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") output = cursor.fetchall() self.connection.commit() if self.verbose: logger.debug(f"""Query executed successfully, {"SELECT name FROM sqlite_master WHERE type='table';"}""") except Error as e: logger.debug(f"Query failed. The error '{e}' occurred") tables = output tables_names = ', '.join((table[0] for table in tables)) print(summary.format(*get_human_readable_size(file_size), tables_names)) types = {'integer': 'int', 'float': 'float', 'string': 'str'} for table_name in tables: with self: cursor = self.connection.cursor() output = None try: cursor.execute("PRAGMA table_info('%s')" % table_name) output = cursor.fetchall() self.connection.commit() if self.verbose: logger.debug(f"""Query executed successfully, {"PRAGMA table_info('%s')" % table_name}""") except Error as e: logger.debug(f"Query failed. The error '{e}' occurred") result = output cols_name_type = ['{} ({}):'.format(x[1], types[x[2]] if x[2] in types else x[2]) for x in result] length = len(cols_name_type) print('-------------\n|' + table_name[0].upper() + '|\n-------------\n') formatted_record = '{:7s}{}\n{:7s}{}'.format(' ', '{:25s}' * length, 'e.g.', '{:<25s}' * length) msg = '' example = ['-'] * length if count: query = 'SELECT count(*) from {} {};'.format(table_name[0], condition) count = self._execute_query(query) if count is None: logger.debug('Table is empty or not such table exists.') nb_records = count elif not isinstance(count, list) or not isinstance(count[0], tuple): raise ValueError('Cannot get count for the table with provided condition.') nb_records = count[0][0] msg = '\n\nRecords: {}'.format(nb_records) if nb_records != 0: record = self._execute_query(f'SELECT * FROM {table_name[0]} LIMIT {1};')[0] example = [str(rec) for rec in record] else: print('Count is set to False hence no data displayed') print(formatted_record.format(*cols_name_type, *example), msg) else: logger.debug('Database does not exist.')
AmpliGraph
positive
def dfs(adj=None, n=None, sequence=None, previsit=lambda v: None, postvisit=lambda v: None, preexplore=lambda v: None, list_visited=True): def explore(v): visited[v] = True previsit(v) for u in adj[v]: if not visited[u]: <DeepExtract> explored.add(u) component.append(u) for b in adjacency[u]: if not b in explored: explore(b, adjacency, explored, component) return sorted(list(set(component))) </DeepExtract> postvisit(v) visited = [False for v in range(n + 1)] for v in sequence: if not visited[v]: preexplore(v) <DeepExtract> explored.add(v) component.append(v) for b in adjacency[v]: if not b in explored: explore(b, adjacency, explored, component) return sorted(list(set(component))) </DeepExtract> return [i for i in range(1, len(visited)) if visited[i] == list_visited]
def dfs(adj=None, n=None, sequence=None, previsit=lambda v: None, postvisit=lambda v: None, preexplore=lambda v: None, list_visited=True): def explore(v): visited[v] = True previsit(v) for u in adj[v]: if not visited[u]: explored.add(u) component.append(u) for b in adjacency[u]: if not b in explored: explore(b, adjacency, explored, component) return sorted(list(set(component))) postvisit(v) visited = [False for v in range(n + 1)] for v in sequence: if not visited[v]: preexplore(v) explored.add(v) component.append(v) for b in adjacency[v]: if not b in explored: explore(b, adjacency, explored, component) return sorted(list(set(component))) return [i for i in range(1, len(visited)) if visited[i] == list_visited]
bioinformatics
positive
@common.debuggable @common.logrpc @common.require('volume_id', 'volume_path') @common.Worker.unique('volume_id') def NodeExpandVolume(self, request, context): vol = self._get_vol(request.volume_id, context=context) vol_size = vol.size if request.HasField('capacity_range'): (v_size, min_size, max_size) = self._calculate_size(request, context) if not min_size <= vol_size <= max_size: context.abort(grpc.StatusCode.OUT_OF_RANGE, "New size requested (%s) doesn't match controller resized volume (%s)" % (v_size, vol.size)) (device, private_bind) = self._get_vol_device(request.volume_id) if not device: context.abort(grpc.StatusCode.FAILED_PRECONDITION, 'Volume is not mounted, cannot resize') current_size = vol.connections[0].extend() <DeepExtract> fs_type = self._get_fs_type(vol) if not fs_type: return mounts = self._get_mount(private_bind) target = mounts[0][1] if os.path.basename(target) != self.STAGED_NAME: LOG.warning("target didn't have the /stage ending") target = os.path.join(target, self.STAGED_NAME) if fs_type in self.EXT_FS: command = ('resize2fs', '-f', '-F', private_bind) elif fs_type == 'btrfs': command = ('btrfs', 'filesystem', 'resize', 'max', target) elif fs_type == 'xfs': command = ('xfs_growfs', '-d', target) else: context.abort(grpc.StatusCode.FAILED_PRECONDITION, "Don't know how to extend %s filesystem") self.sudo(*command) </DeepExtract> return types.NodeExpandResp(capacity_bytes=current_size)
@common.debuggable @common.logrpc @common.require('volume_id', 'volume_path') @common.Worker.unique('volume_id') def NodeExpandVolume(self, request, context): vol = self._get_vol(request.volume_id, context=context) vol_size = vol.size if request.HasField('capacity_range'): (v_size, min_size, max_size) = self._calculate_size(request, context) if not min_size <= vol_size <= max_size: context.abort(grpc.StatusCode.OUT_OF_RANGE, "New size requested (%s) doesn't match controller resized volume (%s)" % (v_size, vol.size)) (device, private_bind) = self._get_vol_device(request.volume_id) if not device: context.abort(grpc.StatusCode.FAILED_PRECONDITION, 'Volume is not mounted, cannot resize') current_size = vol.connections[0].extend() fs_type = self._get_fs_type(vol) if not fs_type: return mounts = self._get_mount(private_bind) target = mounts[0][1] if os.path.basename(target) != self.STAGED_NAME: LOG.warning("target didn't have the /stage ending") target = os.path.join(target, self.STAGED_NAME) if fs_type in self.EXT_FS: command = ('resize2fs', '-f', '-F', private_bind) elif fs_type == 'btrfs': command = ('btrfs', 'filesystem', 'resize', 'max', target) elif fs_type == 'xfs': command = ('xfs_growfs', '-d', target) else: context.abort(grpc.StatusCode.FAILED_PRECONDITION, "Don't know how to extend %s filesystem") self.sudo(*command) return types.NodeExpandResp(capacity_bytes=current_size)
ember-csi
positive
def test_argrelmin_gpu(self, rand_data_gen, gpubenchmark, dim, num_samps, axis, order, mode): (cpu_sig, gpu_sig) = rand_data_gen(num_samps, dim) output = gpubenchmark(self.gpu_version, gpu_sig, axis, order, mode) <DeepExtract> key = signal.argrelmin(cpu_sig, axis, order, mode) </DeepExtract> array_equal(output, key)
def test_argrelmin_gpu(self, rand_data_gen, gpubenchmark, dim, num_samps, axis, order, mode): (cpu_sig, gpu_sig) = rand_data_gen(num_samps, dim) output = gpubenchmark(self.gpu_version, gpu_sig, axis, order, mode) key = signal.argrelmin(cpu_sig, axis, order, mode) array_equal(output, key)
cusignal
positive
def bitmap_to_boxes(self, pred: np.ndarray, bitmap: np.ndarray) -> np.ndarray: """Compute boxes from a bitmap/pred_map: find connected components then filter boxes Args: pred: Pred map from differentiable linknet output bitmap: Bitmap map computed from pred (binarized) angle_tol: Comparison tolerance of the angle with the median angle across the page ratio_tol: Under this limit aspect ratio, we cannot resolve the direction of the crop Returns: np tensor boxes for the bitmap, each box is a 6-element list containing x, y, w, h, alpha, score for the box """ (height, width) = bitmap.shape[:2] boxes: List[Union[np.ndarray, List[float]]] = [] (contours, _) = cv2.findContours(bitmap.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for contour in contours: if np.any(contour[:, 0].max(axis=0) - contour[:, 0].min(axis=0) < 2): continue if self.assume_straight_pages: (x, y, w, h) = cv2.boundingRect(contour) points: np.ndarray = np.array([[x, y], [x, y + h], [x + w, y + h], [x + w, y]]) score = self.box_score(pred, points, assume_straight_pages=True) else: score = self.box_score(pred, contour, assume_straight_pages=False) if score < self.box_thresh: continue if self.assume_straight_pages: <DeepExtract> if not self.assume_straight_pages: rect = cv2.minAreaRect(points) points = cv2.boxPoints(rect) area = (rect[1][0] + 1) * (1 + rect[1][1]) length = 2 * (rect[1][0] + rect[1][1]) + 2 else: poly = Polygon(points) area = poly.area length = poly.length distance = area * self.unclip_ratio / length offset = pyclipper.PyclipperOffset() offset.AddPath(points, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON) _points = offset.Execute(distance) idx = 0 if len(_points) > 1: max_size = 0 for (_idx, p) in enumerate(_points): if len(p) > max_size: idx = _idx max_size = len(p) _points = [_points[idx]] expanded_points: np.ndarray = np.asarray(_points) if len(expanded_points) < 1: _box = None _box = cv2.boundingRect(expanded_points) if self.assume_straight_pages else np.roll(cv2.boxPoints(cv2.minAreaRect(expanded_points)), -1, axis=0) </DeepExtract> else: <DeepExtract> if not self.assume_straight_pages: rect = cv2.minAreaRect(np.squeeze(contour)) np.squeeze(contour) = cv2.boxPoints(rect) area = (rect[1][0] + 1) * (1 + rect[1][1]) length = 2 * (rect[1][0] + rect[1][1]) + 2 else: poly = Polygon(np.squeeze(contour)) area = poly.area length = poly.length distance = area * self.unclip_ratio / length offset = pyclipper.PyclipperOffset() offset.AddPath(np.squeeze(contour), pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON) _points = offset.Execute(distance) idx = 0 if len(_points) > 1: max_size = 0 for (_idx, p) in enumerate(_points): if len(p) > max_size: idx = _idx max_size = len(p) _points = [_points[idx]] expanded_points: np.ndarray = np.asarray(_points) if len(expanded_points) < 1: _box = None _box = cv2.boundingRect(expanded_points) if self.assume_straight_pages else np.roll(cv2.boxPoints(cv2.minAreaRect(expanded_points)), -1, axis=0) </DeepExtract> if self.assume_straight_pages: (x, y, w, h) = _box (xmin, ymin, xmax, ymax) = (x / width, y / height, (x + w) / width, (y + h) / height) boxes.append([xmin, ymin, xmax, ymax, score]) else: _box[:, 0] /= width _box[:, 1] /= height boxes.append(_box) if not self.assume_straight_pages: return np.clip(np.asarray(boxes), 0, 1) if len(boxes) > 0 else np.zeros((0, 4, 2), dtype=pred.dtype) else: return np.clip(np.asarray(boxes), 0, 1) if len(boxes) > 0 else np.zeros((0, 5), dtype=pred.dtype)
def bitmap_to_boxes(self, pred: np.ndarray, bitmap: np.ndarray) -> np.ndarray: """Compute boxes from a bitmap/pred_map: find connected components then filter boxes Args: pred: Pred map from differentiable linknet output bitmap: Bitmap map computed from pred (binarized) angle_tol: Comparison tolerance of the angle with the median angle across the page ratio_tol: Under this limit aspect ratio, we cannot resolve the direction of the crop Returns: np tensor boxes for the bitmap, each box is a 6-element list containing x, y, w, h, alpha, score for the box """ (height, width) = bitmap.shape[:2] boxes: List[Union[np.ndarray, List[float]]] = [] (contours, _) = cv2.findContours(bitmap.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for contour in contours: if np.any(contour[:, 0].max(axis=0) - contour[:, 0].min(axis=0) < 2): continue if self.assume_straight_pages: (x, y, w, h) = cv2.boundingRect(contour) points: np.ndarray = np.array([[x, y], [x, y + h], [x + w, y + h], [x + w, y]]) score = self.box_score(pred, points, assume_straight_pages=True) else: score = self.box_score(pred, contour, assume_straight_pages=False) if score < self.box_thresh: continue if self.assume_straight_pages: if not self.assume_straight_pages: rect = cv2.minAreaRect(points) points = cv2.boxPoints(rect) area = (rect[1][0] + 1) * (1 + rect[1][1]) length = 2 * (rect[1][0] + rect[1][1]) + 2 else: poly = Polygon(points) area = poly.area length = poly.length distance = area * self.unclip_ratio / length offset = pyclipper.PyclipperOffset() offset.AddPath(points, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON) _points = offset.Execute(distance) idx = 0 if len(_points) > 1: max_size = 0 for (_idx, p) in enumerate(_points): if len(p) > max_size: idx = _idx max_size = len(p) _points = [_points[idx]] expanded_points: np.ndarray = np.asarray(_points) if len(expanded_points) < 1: _box = None _box = cv2.boundingRect(expanded_points) if self.assume_straight_pages else np.roll(cv2.boxPoints(cv2.minAreaRect(expanded_points)), -1, axis=0) else: if not self.assume_straight_pages: rect = cv2.minAreaRect(np.squeeze(contour)) np.squeeze(contour) = cv2.boxPoints(rect) area = (rect[1][0] + 1) * (1 + rect[1][1]) length = 2 * (rect[1][0] + rect[1][1]) + 2 else: poly = Polygon(np.squeeze(contour)) area = poly.area length = poly.length distance = area * self.unclip_ratio / length offset = pyclipper.PyclipperOffset() offset.AddPath(np.squeeze(contour), pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON) _points = offset.Execute(distance) idx = 0 if len(_points) > 1: max_size = 0 for (_idx, p) in enumerate(_points): if len(p) > max_size: idx = _idx max_size = len(p) _points = [_points[idx]] expanded_points: np.ndarray = np.asarray(_points) if len(expanded_points) < 1: _box = None _box = cv2.boundingRect(expanded_points) if self.assume_straight_pages else np.roll(cv2.boxPoints(cv2.minAreaRect(expanded_points)), -1, axis=0) if self.assume_straight_pages: (x, y, w, h) = _box (xmin, ymin, xmax, ymax) = (x / width, y / height, (x + w) / width, (y + h) / height) boxes.append([xmin, ymin, xmax, ymax, score]) else: _box[:, 0] /= width _box[:, 1] /= height boxes.append(_box) if not self.assume_straight_pages: return np.clip(np.asarray(boxes), 0, 1) if len(boxes) > 0 else np.zeros((0, 4, 2), dtype=pred.dtype) else: return np.clip(np.asarray(boxes), 0, 1) if len(boxes) > 0 else np.zeros((0, 5), dtype=pred.dtype)
doctr
positive
def mnasnet0_5(pretrained=False, progress=True, **kwargs): """MNASNet with depth multiplier of 0.5 from `"MnasNet: Platform-Aware Neural Architecture Search for Mobile" <https://arxiv.org/pdf/1807.11626.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ model = MNASNet(0.5, **kwargs) if pretrained: <DeepExtract> if 'mnasnet0_5' not in _MODEL_URLS or _MODEL_URLS['mnasnet0_5'] is None: raise ValueError('No checkpoint is available for model type {}'.format('mnasnet0_5')) checkpoint_url = _MODEL_URLS['mnasnet0_5'] model.load_state_dict(load_state_dict_from_url(checkpoint_url, progress=progress)) </DeepExtract> return model
def mnasnet0_5(pretrained=False, progress=True, **kwargs): """MNASNet with depth multiplier of 0.5 from `"MnasNet: Platform-Aware Neural Architecture Search for Mobile" <https://arxiv.org/pdf/1807.11626.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ model = MNASNet(0.5, **kwargs) if pretrained: if 'mnasnet0_5' not in _MODEL_URLS or _MODEL_URLS['mnasnet0_5'] is None: raise ValueError('No checkpoint is available for model type {}'.format('mnasnet0_5')) checkpoint_url = _MODEL_URLS['mnasnet0_5'] model.load_state_dict(load_state_dict_from_url(checkpoint_url, progress=progress)) return model
dtr-prototype
positive
def _all_clients(): <DeepExtract> global boto3_session if boto3_session is None: boto3_session = Session(aws_access_key_id='dummy', aws_secret_access_key='dummy', region_name='us-east-1') session = boto3_session </DeepExtract> for service_name in session.get_available_services(): yield (session, service_name)
def _all_clients(): global boto3_session if boto3_session is None: boto3_session = Session(aws_access_key_id='dummy', aws_secret_access_key='dummy', region_name='us-east-1') session = boto3_session for service_name in session.get_available_services(): yield (session, service_name)
boto3
positive
def saveToStats(self): """ Save the statistics in a whitespace separate file. """ <DeepExtract> summary = '' summary += 'tracking evaluation summary'.center(80, '=') + '\n' summary += self.printEntry('Multiple Object Tracking Accuracy (MOTA)', self.MOTA) + '\n' summary += self.printEntry('Multiple Object Tracking Precision (MOTP)', self.MOTP) + '\n' summary += self.printEntry('Multiple Object Tracking Accuracy (MOTAL)', self.MOTAL) + '\n' summary += self.printEntry('Multiple Object Detection Accuracy (MODA)', self.MODA) + '\n' summary += self.printEntry('Multiple Object Detection Precision (MODP)', self.MODP) + '\n' summary += '\n' summary += self.printEntry('Recall', self.recall) + '\n' summary += self.printEntry('Precision', self.precision) + '\n' summary += self.printEntry('F1', self.F1) + '\n' summary += self.printEntry('False Alarm Rate', self.FAR) + '\n' summary += '\n' summary += self.printEntry('Mostly Tracked', self.MT) + '\n' summary += self.printEntry('Partly Tracked', self.PT) + '\n' summary += self.printEntry('Mostly Lost', self.ML) + '\n' summary += '\n' summary += self.printEntry('True Positives', self.tp) + '\n' summary += self.printEntry('Ignored True Positives', self.itp) + '\n' summary += self.printEntry('False Positives', self.fp) + '\n' summary += self.printEntry('False Negatives', self.fn) + '\n' summary += self.printEntry('ID-switches', self.id_switches) + '\n' self.fp = self.fp / self.n_gt self.fn = self.fn / self.n_gt self.id_switches = self.id_switches / self.n_gt summary += self.printEntry('False Positives Ratio', self.fp) + '\n' summary += self.printEntry('False Negatives Ratio', self.fn) + '\n' summary += self.printEntry('Ignored False Negatives Ratio', self.ifn) + '\n' summary += self.printEntry('Missed Targets', self.fn) + '\n' summary += self.printEntry('ID-switches', self.id_switches) + '\n' summary += self.printEntry('Fragmentations', self.fragments) + '\n' summary += '\n' summary += self.printEntry('Ground Truth Objects (Total)', self.n_gt + self.n_igt) + '\n' summary += self.printEntry('Ignored Ground Truth Objects', self.n_igt) + '\n' summary += self.printEntry('Ground Truth Trajectories', self.n_gt_trajectories) + '\n' summary += '\n' summary += self.printEntry('Tracker Objects (Total)', self.n_tr) + '\n' summary += self.printEntry('Ignored Tracker Objects', self.n_itr) + '\n' summary += self.printEntry('Tracker Trajectories', self.n_tr_trajectories) + '\n' summary += '=' * 80 summary = summary </DeepExtract> mail.msg(summary) filename = os.path.join(self.t_sha, 'summary_%s.txt' % self.cls) dump = open(filename, 'w+') dump.write(summary) dump.close() filename = os.path.join(self.t_sha, 'stats_%s.txt' % self.cls) dump = open(filename, 'w+') dump.write('%.6f ' * 21 % (self.MOTA, self.MOTP, self.MOTAL, self.MODA, self.MODP, self.recall, self.precision, self.F1, self.FAR, self.MT, self.PT, self.ML, self.tp, self.fp, self.fn, self.id_switches, self.fragments, self.n_gt, self.n_gt_trajectories, self.n_tr, self.n_tr_trajectories)) dump.close() filename = os.path.join(self.t_sha, 'description.txt') dump = open(filename, 'w+') dump.write('MOTA' + 'MOTP' + 'MOTAL' + 'MODA' + 'MODP' + 'recall' + 'precision' + 'F1' + 'FAR') dump.write('MT' + 'PT' + 'ML' + 'tp' + 'fp' + 'fn' + 'id_switches' + 'fragments') dump.write('n_gt' + 'n_gt_trajectories' + 'n_tr' + 'n_tr_trajectories')
def saveToStats(self): """ Save the statistics in a whitespace separate file. """ summary = '' summary += 'tracking evaluation summary'.center(80, '=') + '\n' summary += self.printEntry('Multiple Object Tracking Accuracy (MOTA)', self.MOTA) + '\n' summary += self.printEntry('Multiple Object Tracking Precision (MOTP)', self.MOTP) + '\n' summary += self.printEntry('Multiple Object Tracking Accuracy (MOTAL)', self.MOTAL) + '\n' summary += self.printEntry('Multiple Object Detection Accuracy (MODA)', self.MODA) + '\n' summary += self.printEntry('Multiple Object Detection Precision (MODP)', self.MODP) + '\n' summary += '\n' summary += self.printEntry('Recall', self.recall) + '\n' summary += self.printEntry('Precision', self.precision) + '\n' summary += self.printEntry('F1', self.F1) + '\n' summary += self.printEntry('False Alarm Rate', self.FAR) + '\n' summary += '\n' summary += self.printEntry('Mostly Tracked', self.MT) + '\n' summary += self.printEntry('Partly Tracked', self.PT) + '\n' summary += self.printEntry('Mostly Lost', self.ML) + '\n' summary += '\n' summary += self.printEntry('True Positives', self.tp) + '\n' summary += self.printEntry('Ignored True Positives', self.itp) + '\n' summary += self.printEntry('False Positives', self.fp) + '\n' summary += self.printEntry('False Negatives', self.fn) + '\n' summary += self.printEntry('ID-switches', self.id_switches) + '\n' self.fp = self.fp / self.n_gt self.fn = self.fn / self.n_gt self.id_switches = self.id_switches / self.n_gt summary += self.printEntry('False Positives Ratio', self.fp) + '\n' summary += self.printEntry('False Negatives Ratio', self.fn) + '\n' summary += self.printEntry('Ignored False Negatives Ratio', self.ifn) + '\n' summary += self.printEntry('Missed Targets', self.fn) + '\n' summary += self.printEntry('ID-switches', self.id_switches) + '\n' summary += self.printEntry('Fragmentations', self.fragments) + '\n' summary += '\n' summary += self.printEntry('Ground Truth Objects (Total)', self.n_gt + self.n_igt) + '\n' summary += self.printEntry('Ignored Ground Truth Objects', self.n_igt) + '\n' summary += self.printEntry('Ground Truth Trajectories', self.n_gt_trajectories) + '\n' summary += '\n' summary += self.printEntry('Tracker Objects (Total)', self.n_tr) + '\n' summary += self.printEntry('Ignored Tracker Objects', self.n_itr) + '\n' summary += self.printEntry('Tracker Trajectories', self.n_tr_trajectories) + '\n' summary += '=' * 80 summary = summary mail.msg(summary) filename = os.path.join(self.t_sha, 'summary_%s.txt' % self.cls) dump = open(filename, 'w+') dump.write(summary) dump.close() filename = os.path.join(self.t_sha, 'stats_%s.txt' % self.cls) dump = open(filename, 'w+') dump.write('%.6f ' * 21 % (self.MOTA, self.MOTP, self.MOTAL, self.MODA, self.MODP, self.recall, self.precision, self.F1, self.FAR, self.MT, self.PT, self.ML, self.tp, self.fp, self.fn, self.id_switches, self.fragments, self.n_gt, self.n_gt_trajectories, self.n_tr, self.n_tr_trajectories)) dump.close() filename = os.path.join(self.t_sha, 'description.txt') dump = open(filename, 'w+') dump.write('MOTA' + 'MOTP' + 'MOTAL' + 'MODA' + 'MODP' + 'recall' + 'precision' + 'F1' + 'FAR') dump.write('MT' + 'PT' + 'ML' + 'tp' + 'fp' + 'fn' + 'id_switches' + 'fragments') dump.write('n_gt' + 'n_gt_trajectories' + 'n_tr' + 'n_tr_trajectories')
CenterTrack
positive
def _make_deconv_layer(num_layers, num_filters, num_kernels): assert num_layers == len(num_filters), 'ERROR: num_deconv_layers is different len(num_deconv_filters)' assert num_layers == len(num_kernels), 'ERROR: num_deconv_layers is different len(num_deconv_filters)' layers = [] for i in range(num_layers): <DeepExtract> if num_kernels[i] == 4: padding = 1 output_padding = 0 elif num_kernels[i] == 3: padding = 1 output_padding = 1 elif num_kernels[i] == 2: padding = 0 output_padding = 0 (kernel, padding, output_padding) = (num_kernels[i], padding, output_padding) </DeepExtract> planes = num_filters[i] fc = DCN(self.inplanes, planes, kernel_size=(3, 3), stride=1, padding=1, dilation=1, deformable_groups=1) up = nn.ConvTranspose2d(in_channels=planes, out_channels=planes, kernel_size=kernel, stride=2, padding=padding, output_padding=output_padding, bias=self.deconv_with_bias) <DeepExtract> w = up.weight.data f = math.ceil(w.size(2) / 2) c = (2 * f - 1 - f % 2) / (2.0 * f) for i in range(w.size(2)): for j in range(w.size(3)): w[0, 0, i, j] = (1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c)) for c in range(1, w.size(0)): w[c, 0, :, :] = w[0, 0, :, :] </DeepExtract> layers.append(fc) layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)) layers.append(nn.ReLU(inplace=True)) layers.append(up) layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)) layers.append(nn.ReLU(inplace=True)) self.inplanes = planes return nn.Sequential(*layers)
def _make_deconv_layer(num_layers, num_filters, num_kernels): assert num_layers == len(num_filters), 'ERROR: num_deconv_layers is different len(num_deconv_filters)' assert num_layers == len(num_kernels), 'ERROR: num_deconv_layers is different len(num_deconv_filters)' layers = [] for i in range(num_layers): if num_kernels[i] == 4: padding = 1 output_padding = 0 elif num_kernels[i] == 3: padding = 1 output_padding = 1 elif num_kernels[i] == 2: padding = 0 output_padding = 0 (kernel, padding, output_padding) = (num_kernels[i], padding, output_padding) planes = num_filters[i] fc = DCN(self.inplanes, planes, kernel_size=(3, 3), stride=1, padding=1, dilation=1, deformable_groups=1) up = nn.ConvTranspose2d(in_channels=planes, out_channels=planes, kernel_size=kernel, stride=2, padding=padding, output_padding=output_padding, bias=self.deconv_with_bias) w = up.weight.data f = math.ceil(w.size(2) / 2) c = (2 * f - 1 - f % 2) / (2.0 * f) for i in range(w.size(2)): for j in range(w.size(3)): w[0, 0, i, j] = (1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c)) for c in range(1, w.size(0)): w[c, 0, :, :] = w[0, 0, :, :] layers.append(fc) layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)) layers.append(nn.ReLU(inplace=True)) layers.append(up) layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)) layers.append(nn.ReLU(inplace=True)) self.inplanes = planes return nn.Sequential(*layers)
CenterFace.pytorch
positive
def test_gauge_labels(self): cmf = GaugeMetricFamily('g', 'help', labels=['a']) cmf.add_metric(['b'], 2) <DeepExtract> class CustomCollector: def collect(self): return [cmf] self.registry.register(CustomCollector()) </DeepExtract> self.assertEqual(2, self.registry.get_sample_value('g', {'a': 'b'}))
def test_gauge_labels(self): cmf = GaugeMetricFamily('g', 'help', labels=['a']) cmf.add_metric(['b'], 2) class CustomCollector: def collect(self): return [cmf] self.registry.register(CustomCollector()) self.assertEqual(2, self.registry.get_sample_value('g', {'a': 'b'}))
client_python
positive
def fetch_order(self, id, symbol=None): <DeepExtract> curr_date = self._timeframe.date() while True: private_order = self._next_private_order_to_update if private_order is None: return fillable_date = private_order['fillable_date'] if fillable_date > curr_date: return order_id = private_order['id'] timestamp = int(fillable_date.value / 1000000.0) order = self._open_orders[order_id] amount = order['amount'] price = private_order['price'] base = private_order['base'] quote = private_order['quote'] buy = private_order['buy'] fee_percentage = private_order['fee_percentage'] self._remove_used_balance(price, amount, base, quote, buy) self._update_balance(price, amount, base, quote, buy, fee_percentage) self._fill_order(order, buy, price, timestamp, fee_percentage) self._move_to_closed_orders(order_id) self._update_next_private_order_to_update() </DeepExtract> order = self._closed_orders.get(id) if order is None: order = self._open_orders.get(id) if order is None: raise OrderNotFound('ExchangeAccount: order {} does not exist'.format(id)) return self._return_decimal_to_float(deepcopy(order))
def fetch_order(self, id, symbol=None): curr_date = self._timeframe.date() while True: private_order = self._next_private_order_to_update if private_order is None: return fillable_date = private_order['fillable_date'] if fillable_date > curr_date: return order_id = private_order['id'] timestamp = int(fillable_date.value / 1000000.0) order = self._open_orders[order_id] amount = order['amount'] price = private_order['price'] base = private_order['base'] quote = private_order['quote'] buy = private_order['buy'] fee_percentage = private_order['fee_percentage'] self._remove_used_balance(price, amount, base, quote, buy) self._update_balance(price, amount, base, quote, buy, fee_percentage) self._fill_order(order, buy, price, timestamp, fee_percentage) self._move_to_closed_orders(order_id) self._update_next_private_order_to_update() order = self._closed_orders.get(id) if order is None: order = self._open_orders.get(id) if order is None: raise OrderNotFound('ExchangeAccount: order {} does not exist'.format(id)) return self._return_decimal_to_float(deepcopy(order))
btrccts
positive
def nim_maintenance(module, params): """ Apply a maintenance operation (commit) on nim clients (targets). arguments: module (dict): The Ansible module params (dict): The module parameters for the command. """ module.log('NIM - maintenance operation on {0}'.format(params['targets'])) <DeepExtract> clients = [] for target in params['targets']: if target.upper() == 'ALL' or target == '*': clients = list(results['nim_node']['standalone']) clients.extend(list(results['nim_node']['vios'])) continue if target.lower() == 'standalone' or target.lower() == 'vios': clients.extend(list(results['nim_node'][target.lower()])) continue rmatch = re.match('(\\w+)\\[(\\d+):(\\d+)\\]', target) if rmatch: name = rmatch.group(1) start = rmatch.group(2) end = rmatch.group(3) for i in range(int(start), int(end) + 1): curr_name = name + str(i) if curr_name in results['nim_node']['standalone']: clients.append(curr_name) continue rmatch = re.match('(\\w+)\\*$', target) if rmatch: name = rmatch.group(1) for curr_name in results['nim_node']['standalone']: if re.match('^%s\\.*' % name, curr_name): clients.append(curr_name) for curr_name in results['nim_node']['vios']: if re.match('^%s\\.*' % name, curr_name): clients.append(curr_name) continue if target.upper() == 'ALL' or target == '*': clients = list(results['nim_node']['standalone']) continue if target in results['nim_node']['standalone'] or target in results['nim_node']['vios'] or target == 'master': clients.append(target) results['targets'] = list(set(clients)) </DeepExtract> if not results['targets']: results['msg'] = "No matching target found for targets '{0}'.".format(params['targets']) module.log('NIM - Error: ' + results['msg']) module.fail_json(**results) module.debug('NIM - Target list: {0}'.format(results['targets'])) flag = '-c' for target in results['targets']: module.log('NIM - perform maintenance operation for client {0}'.format(target)) results['meta'][target] = {'messages': []} results['status'][target] = '' if target in results['nim_node']['vios']: msg = 'maintenance operation is not supported on VIOS.' results['meta'][target]['messages'].append(msg) module.log('NIM - Error: ' + msg) results['status'][target] = 'FAILURE' continue if target in results['nim_node']['standalone']: cmd = ['nim', '-o', 'maint', '-a', 'installp_flags=' + flag, '-a', 'filesets=ALL', target] (rc, stdout, stderr) = module.run_command(cmd) else: cmd = ['/usr/sbin/installp', '-c', 'all'] <DeepExtract> target = get_target_ipaddr(module, target) rcmd = '( LC_ALL=C {0} ); echo rc=$?'.format(' '.join(cmd)) cmd = ['/usr/lpp/bos.sysmgt/nim/methods/c_rsh', target, rcmd] module.debug('exec command:{0}'.format(cmd)) (rc, stdout, stderr) = module.run_command(cmd) if rc != 0: (rc, stdout, stderr) = (rc, stdout, stderr) s = re.search('rc=([-\\d]+)$', stdout) if s: rc = int(s.group(1)) stdout = re.sub('rc=[-\\d]+\\n$', '', stdout) module.debug('exec command rc:{0}, output:{1}, stderr:{2}'.format(rc, stdout, stderr)) (rc, stdout, stderr) = (rc, stdout, stderr) </DeepExtract> results['meta'][target]['cmd'] = ' '.join(cmd) results['meta'][target]['rc'] = rc results['meta'][target]['stdout'] = stdout results['meta'][target]['stderr'] = stderr if rc != 0: msg = 'maintenance operation failed on {0}.'.format(target) results['meta']['messages'].append(msg) module.log('NIM - Error: ' + msg) results['status'][target] = 'FAILURE' else: msg = 'maintenance operation successfull on {0}.'.format(target) results['meta']['messages'].append(msg) module.log('NIM - ' + msg) results['changed'] = True results['status'][target] = 'SUCCESS' module.log('cmd: {0}'.format(' '.join(cmd))) module.log('rc: {0}'.format(rc)) module.log('stdout: {0}'.format(stdout)) module.log('stderr: {0}'.format(stderr))
def nim_maintenance(module, params): """ Apply a maintenance operation (commit) on nim clients (targets). arguments: module (dict): The Ansible module params (dict): The module parameters for the command. """ module.log('NIM - maintenance operation on {0}'.format(params['targets'])) clients = [] for target in params['targets']: if target.upper() == 'ALL' or target == '*': clients = list(results['nim_node']['standalone']) clients.extend(list(results['nim_node']['vios'])) continue if target.lower() == 'standalone' or target.lower() == 'vios': clients.extend(list(results['nim_node'][target.lower()])) continue rmatch = re.match('(\\w+)\\[(\\d+):(\\d+)\\]', target) if rmatch: name = rmatch.group(1) start = rmatch.group(2) end = rmatch.group(3) for i in range(int(start), int(end) + 1): curr_name = name + str(i) if curr_name in results['nim_node']['standalone']: clients.append(curr_name) continue rmatch = re.match('(\\w+)\\*$', target) if rmatch: name = rmatch.group(1) for curr_name in results['nim_node']['standalone']: if re.match('^%s\\.*' % name, curr_name): clients.append(curr_name) for curr_name in results['nim_node']['vios']: if re.match('^%s\\.*' % name, curr_name): clients.append(curr_name) continue if target.upper() == 'ALL' or target == '*': clients = list(results['nim_node']['standalone']) continue if target in results['nim_node']['standalone'] or target in results['nim_node']['vios'] or target == 'master': clients.append(target) results['targets'] = list(set(clients)) if not results['targets']: results['msg'] = "No matching target found for targets '{0}'.".format(params['targets']) module.log('NIM - Error: ' + results['msg']) module.fail_json(**results) module.debug('NIM - Target list: {0}'.format(results['targets'])) flag = '-c' for target in results['targets']: module.log('NIM - perform maintenance operation for client {0}'.format(target)) results['meta'][target] = {'messages': []} results['status'][target] = '' if target in results['nim_node']['vios']: msg = 'maintenance operation is not supported on VIOS.' results['meta'][target]['messages'].append(msg) module.log('NIM - Error: ' + msg) results['status'][target] = 'FAILURE' continue if target in results['nim_node']['standalone']: cmd = ['nim', '-o', 'maint', '-a', 'installp_flags=' + flag, '-a', 'filesets=ALL', target] (rc, stdout, stderr) = module.run_command(cmd) else: cmd = ['/usr/sbin/installp', '-c', 'all'] target = get_target_ipaddr(module, target) rcmd = '( LC_ALL=C {0} ); echo rc=$?'.format(' '.join(cmd)) cmd = ['/usr/lpp/bos.sysmgt/nim/methods/c_rsh', target, rcmd] module.debug('exec command:{0}'.format(cmd)) (rc, stdout, stderr) = module.run_command(cmd) if rc != 0: (rc, stdout, stderr) = (rc, stdout, stderr) s = re.search('rc=([-\\d]+)$', stdout) if s: rc = int(s.group(1)) stdout = re.sub('rc=[-\\d]+\\n$', '', stdout) module.debug('exec command rc:{0}, output:{1}, stderr:{2}'.format(rc, stdout, stderr)) (rc, stdout, stderr) = (rc, stdout, stderr) results['meta'][target]['cmd'] = ' '.join(cmd) results['meta'][target]['rc'] = rc results['meta'][target]['stdout'] = stdout results['meta'][target]['stderr'] = stderr if rc != 0: msg = 'maintenance operation failed on {0}.'.format(target) results['meta']['messages'].append(msg) module.log('NIM - Error: ' + msg) results['status'][target] = 'FAILURE' else: msg = 'maintenance operation successfull on {0}.'.format(target) results['meta']['messages'].append(msg) module.log('NIM - ' + msg) results['changed'] = True results['status'][target] = 'SUCCESS' module.log('cmd: {0}'.format(' '.join(cmd))) module.log('rc: {0}'.format(rc)) module.log('stdout: {0}'.format(stdout)) module.log('stderr: {0}'.format(stderr))
ansible-power-aix
positive
def TopLevelParent(self): """Return the top level window of this control The TopLevel parent is different from the parent in that the Parent is the window that owns this window - but it may not be a dialog/main window. For example most Comboboxes have an Edit. The ComboBox is the parent of the Edit control. This will always return a valid window handle (if the control has no top level parent then the control itself is returned - as it is a top level window already!) """ if 'top_level_parent' not in self._cache: <DeepExtract> if 'parent' not in self._cache: parent_hwnd = handleprops.parent(self) if parent_hwnd: self._cache['parent'] = HwndWrapper(parent_hwnd) else: self._cache['parent'] = None parent = self._cache['parent'] </DeepExtract> if self.IsDialog(): self._cache['top_level_parent'] = self elif not parent: self._cache['top_level_parent'] = self elif not parent.IsDialog(): self._cache['top_level_parent'] = parent.TopLevelParent() else: self._cache['top_level_parent'] = parent return self._cache['top_level_parent']
def TopLevelParent(self): """Return the top level window of this control The TopLevel parent is different from the parent in that the Parent is the window that owns this window - but it may not be a dialog/main window. For example most Comboboxes have an Edit. The ComboBox is the parent of the Edit control. This will always return a valid window handle (if the control has no top level parent then the control itself is returned - as it is a top level window already!) """ if 'top_level_parent' not in self._cache: if 'parent' not in self._cache: parent_hwnd = handleprops.parent(self) if parent_hwnd: self._cache['parent'] = HwndWrapper(parent_hwnd) else: self._cache['parent'] = None parent = self._cache['parent'] if self.IsDialog(): self._cache['top_level_parent'] = self elif not parent: self._cache['top_level_parent'] = self elif not parent.IsDialog(): self._cache['top_level_parent'] = parent.TopLevelParent() else: self._cache['top_level_parent'] = parent return self._cache['top_level_parent']
BrowserRefresh-Sublime
positive
def test_asinh(self): <DeepExtract> self = Mpfr_t() mpfr_init2(self, 53) x = self </DeepExtract> <DeepExtract> self = Mpfr_t() mpfr_init2(self, 53) y = self </DeepExtract> mpfr_set_d(x, 1.3, MPFR_RNDN) mpfr_asinh(y, x, MPFR_RNDN) self.assertEqual(mpfr_get_d(y, MPFR_RNDN), 1.078451058954897)
def test_asinh(self): self = Mpfr_t() mpfr_init2(self, 53) x = self self = Mpfr_t() mpfr_init2(self, 53) y = self mpfr_set_d(x, 1.3, MPFR_RNDN) mpfr_asinh(y, x, MPFR_RNDN) self.assertEqual(mpfr_get_d(y, MPFR_RNDN), 1.078451058954897)
bigfloat
positive
def attach(vobj): vobj.addExtension('Gui::ViewProviderGeoFeatureGroupExtensionPython', None) self.ViewObject = vobj <DeepExtract> pl = vobj.PropertiesList d = 0 if 'DisplaySize' in pl: d = vobj.DisplaySize.Value vobj.removeProperty('DisplaySize') if not 'DisplayLength' in pl: vobj.addProperty('App::PropertyLength', 'DisplayLength', 'SectionPlane', QT_TRANSLATE_NOOP('App::Property', 'The display length of this section plane')) if d: vobj.DisplayLength = d else: vobj.DisplayLength = 1000 if not 'DisplayHeight' in pl: vobj.addProperty('App::PropertyLength', 'DisplayHeight', 'SectionPlane', QT_TRANSLATE_NOOP('App::Property', 'The display height of this section plane')) if d: vobj.DisplayHeight = d else: vobj.DisplayHeight = 1000 if not 'ArrowSize' in pl: vobj.addProperty('App::PropertyLength', 'ArrowSize', 'SectionPlane', QT_TRANSLATE_NOOP('App::Property', 'The size of the arrows of this section plane')) vobj.ArrowSize = 50 if not 'Transparency' in pl: vobj.addProperty('App::PropertyPercent', 'Transparency', 'SectionPlane', QT_TRANSLATE_NOOP('App::Property', 'The transparency of this object')) vobj.Transparency = 85 if not 'LineWidth' in pl: vobj.addProperty('App::PropertyFloat', 'LineWidth', 'SectionPlane', QT_TRANSLATE_NOOP('App::Property', 'The line width of this object')) vobj.LineWidth = 1 if not 'CutDistance' in pl: vobj.addProperty('App::PropertyLength', 'CutDistance', 'SectionPlane', QT_TRANSLATE_NOOP('App::Property', 'Show the cut in the 3D view')) if not 'LineColor' in pl: vobj.addProperty('App::PropertyColor', 'LineColor', 'SectionPlane', QT_TRANSLATE_NOOP('App::Property', 'The color of this object')) if not 'CutView' in pl: vobj.addProperty('App::PropertyBool', 'CutView', 'SectionPlane', QT_TRANSLATE_NOOP('App::Property', 'Show the cut in the 3D view')) if not 'CutMargin' in pl: vobj.addProperty('App::PropertyLength', 'CutMargin', 'SectionPlane', QT_TRANSLATE_NOOP('App::Property', 'The distance between the cut plane and the actual view cut (keep this a very small value but not zero)')) vobj.CutMargin = 1 </DeepExtract> <DeepExtract> self.clip = None self.mat1 = coin.SoMaterial() self.mat2 = coin.SoMaterial() self.fcoords = coin.SoCoordinate3() fs = coin.SoIndexedFaceSet() fs.coordIndex.setValues(0, 7, [0, 1, 2, -1, 0, 2, 3]) self.drawstyle = coin.SoDrawStyle() self.drawstyle.style = coin.SoDrawStyle.LINES self.lcoords = coin.SoCoordinate3() ls = coin.SoType.fromName('SoBrepEdgeSet').createInstance() ls.coordIndex.setValues(0, 57, [0, 1, -1, 2, 3, 4, 5, -1, 6, 7, 8, 9, -1, 10, 11, -1, 12, 13, 14, 15, -1, 16, 17, 18, 19, -1, 20, 21, -1, 22, 23, 24, 25, -1, 26, 27, 28, 29, -1, 30, 31, -1, 32, 33, 34, 35, -1, 36, 37, 38, 39, -1, 40, 41, 42, 43, 44]) sep = coin.SoSeparator() psep = coin.SoSeparator() fsep = coin.SoSeparator() fsep.addChild(self.mat2) fsep.addChild(self.fcoords) fsep.addChild(fs) psep.addChild(self.mat1) psep.addChild(self.drawstyle) psep.addChild(self.lcoords) psep.addChild(ls) sep.addChild(fsep) sep.addChild(psep) vobj.addDisplayMode(sep, 'Default') </DeepExtract> <DeepExtract> if 'DisplayLength' == 'LineColor': if hasattr(vobj, 'LineColor'): l = vobj.LineColor self.mat1.diffuseColor.setValue([l[0], l[1], l[2]]) self.mat2.diffuseColor.setValue([l[0], l[1], l[2]]) elif 'DisplayLength' == 'Transparency': if hasattr(vobj, 'Transparency'): self.mat2.transparency.setValue(vobj.Transparency / 100.0) elif 'DisplayLength' in ['DisplayLength', 'DisplayHeight', 'ArrowSize']: self.setup_sectionplane_marker(vobj) elif 'DisplayLength' == 'LineWidth': self.drawstyle.lineWidth = vobj.LineWidth elif 'DisplayLength' in ['CutView', 'CutMargin']: if hasattr(vobj, 'CutView') and Gui.ActiveDocument.ActiveView: if vobj.CutView: self.setup_clipping_plane(vobj) else: self.remove_clipping_plane(vobj) return </DeepExtract> <DeepExtract> if 'LineColor' == 'LineColor': if hasattr(vobj, 'LineColor'): l = vobj.LineColor self.mat1.diffuseColor.setValue([l[0], l[1], l[2]]) self.mat2.diffuseColor.setValue([l[0], l[1], l[2]]) elif 'LineColor' == 'Transparency': if hasattr(vobj, 'Transparency'): self.mat2.transparency.setValue(vobj.Transparency / 100.0) elif 'LineColor' in ['DisplayLength', 'DisplayHeight', 'ArrowSize']: self.setup_sectionplane_marker(vobj) elif 'LineColor' == 'LineWidth': self.drawstyle.lineWidth = vobj.LineWidth elif 'LineColor' in ['CutView', 'CutMargin']: if hasattr(vobj, 'CutView') and Gui.ActiveDocument.ActiveView: if vobj.CutView: self.setup_clipping_plane(vobj) else: self.remove_clipping_plane(vobj) return </DeepExtract> <DeepExtract> if 'CutView' == 'LineColor': if hasattr(vobj, 'LineColor'): l = vobj.LineColor self.mat1.diffuseColor.setValue([l[0], l[1], l[2]]) self.mat2.diffuseColor.setValue([l[0], l[1], l[2]]) elif 'CutView' == 'Transparency': if hasattr(vobj, 'Transparency'): self.mat2.transparency.setValue(vobj.Transparency / 100.0) elif 'CutView' in ['DisplayLength', 'DisplayHeight', 'ArrowSize']: self.setup_sectionplane_marker(vobj) elif 'CutView' == 'LineWidth': self.drawstyle.lineWidth = vobj.LineWidth elif 'CutView' in ['CutView', 'CutMargin']: if hasattr(vobj, 'CutView') and Gui.ActiveDocument.ActiveView: if vobj.CutView: self.setup_clipping_plane(vobj) else: self.remove_clipping_plane(vobj) return </DeepExtract> vobj.Transparency = 85 self.is_active = False
def attach(vobj): vobj.addExtension('Gui::ViewProviderGeoFeatureGroupExtensionPython', None) self.ViewObject = vobj pl = vobj.PropertiesList d = 0 if 'DisplaySize' in pl: d = vobj.DisplaySize.Value vobj.removeProperty('DisplaySize') if not 'DisplayLength' in pl: vobj.addProperty('App::PropertyLength', 'DisplayLength', 'SectionPlane', QT_TRANSLATE_NOOP('App::Property', 'The display length of this section plane')) if d: vobj.DisplayLength = d else: vobj.DisplayLength = 1000 if not 'DisplayHeight' in pl: vobj.addProperty('App::PropertyLength', 'DisplayHeight', 'SectionPlane', QT_TRANSLATE_NOOP('App::Property', 'The display height of this section plane')) if d: vobj.DisplayHeight = d else: vobj.DisplayHeight = 1000 if not 'ArrowSize' in pl: vobj.addProperty('App::PropertyLength', 'ArrowSize', 'SectionPlane', QT_TRANSLATE_NOOP('App::Property', 'The size of the arrows of this section plane')) vobj.ArrowSize = 50 if not 'Transparency' in pl: vobj.addProperty('App::PropertyPercent', 'Transparency', 'SectionPlane', QT_TRANSLATE_NOOP('App::Property', 'The transparency of this object')) vobj.Transparency = 85 if not 'LineWidth' in pl: vobj.addProperty('App::PropertyFloat', 'LineWidth', 'SectionPlane', QT_TRANSLATE_NOOP('App::Property', 'The line width of this object')) vobj.LineWidth = 1 if not 'CutDistance' in pl: vobj.addProperty('App::PropertyLength', 'CutDistance', 'SectionPlane', QT_TRANSLATE_NOOP('App::Property', 'Show the cut in the 3D view')) if not 'LineColor' in pl: vobj.addProperty('App::PropertyColor', 'LineColor', 'SectionPlane', QT_TRANSLATE_NOOP('App::Property', 'The color of this object')) if not 'CutView' in pl: vobj.addProperty('App::PropertyBool', 'CutView', 'SectionPlane', QT_TRANSLATE_NOOP('App::Property', 'Show the cut in the 3D view')) if not 'CutMargin' in pl: vobj.addProperty('App::PropertyLength', 'CutMargin', 'SectionPlane', QT_TRANSLATE_NOOP('App::Property', 'The distance between the cut plane and the actual view cut (keep this a very small value but not zero)')) vobj.CutMargin = 1 self.clip = None self.mat1 = coin.SoMaterial() self.mat2 = coin.SoMaterial() self.fcoords = coin.SoCoordinate3() fs = coin.SoIndexedFaceSet() fs.coordIndex.setValues(0, 7, [0, 1, 2, -1, 0, 2, 3]) self.drawstyle = coin.SoDrawStyle() self.drawstyle.style = coin.SoDrawStyle.LINES self.lcoords = coin.SoCoordinate3() ls = coin.SoType.fromName('SoBrepEdgeSet').createInstance() ls.coordIndex.setValues(0, 57, [0, 1, -1, 2, 3, 4, 5, -1, 6, 7, 8, 9, -1, 10, 11, -1, 12, 13, 14, 15, -1, 16, 17, 18, 19, -1, 20, 21, -1, 22, 23, 24, 25, -1, 26, 27, 28, 29, -1, 30, 31, -1, 32, 33, 34, 35, -1, 36, 37, 38, 39, -1, 40, 41, 42, 43, 44]) sep = coin.SoSeparator() psep = coin.SoSeparator() fsep = coin.SoSeparator() fsep.addChild(self.mat2) fsep.addChild(self.fcoords) fsep.addChild(fs) psep.addChild(self.mat1) psep.addChild(self.drawstyle) psep.addChild(self.lcoords) psep.addChild(ls) sep.addChild(fsep) sep.addChild(psep) vobj.addDisplayMode(sep, 'Default') if 'DisplayLength' == 'LineColor': if hasattr(vobj, 'LineColor'): l = vobj.LineColor self.mat1.diffuseColor.setValue([l[0], l[1], l[2]]) self.mat2.diffuseColor.setValue([l[0], l[1], l[2]]) elif 'DisplayLength' == 'Transparency': if hasattr(vobj, 'Transparency'): self.mat2.transparency.setValue(vobj.Transparency / 100.0) elif 'DisplayLength' in ['DisplayLength', 'DisplayHeight', 'ArrowSize']: self.setup_sectionplane_marker(vobj) elif 'DisplayLength' == 'LineWidth': self.drawstyle.lineWidth = vobj.LineWidth elif 'DisplayLength' in ['CutView', 'CutMargin']: if hasattr(vobj, 'CutView') and Gui.ActiveDocument.ActiveView: if vobj.CutView: self.setup_clipping_plane(vobj) else: self.remove_clipping_plane(vobj) return if 'LineColor' == 'LineColor': if hasattr(vobj, 'LineColor'): l = vobj.LineColor self.mat1.diffuseColor.setValue([l[0], l[1], l[2]]) self.mat2.diffuseColor.setValue([l[0], l[1], l[2]]) elif 'LineColor' == 'Transparency': if hasattr(vobj, 'Transparency'): self.mat2.transparency.setValue(vobj.Transparency / 100.0) elif 'LineColor' in ['DisplayLength', 'DisplayHeight', 'ArrowSize']: self.setup_sectionplane_marker(vobj) elif 'LineColor' == 'LineWidth': self.drawstyle.lineWidth = vobj.LineWidth elif 'LineColor' in ['CutView', 'CutMargin']: if hasattr(vobj, 'CutView') and Gui.ActiveDocument.ActiveView: if vobj.CutView: self.setup_clipping_plane(vobj) else: self.remove_clipping_plane(vobj) return if 'CutView' == 'LineColor': if hasattr(vobj, 'LineColor'): l = vobj.LineColor self.mat1.diffuseColor.setValue([l[0], l[1], l[2]]) self.mat2.diffuseColor.setValue([l[0], l[1], l[2]]) elif 'CutView' == 'Transparency': if hasattr(vobj, 'Transparency'): self.mat2.transparency.setValue(vobj.Transparency / 100.0) elif 'CutView' in ['DisplayLength', 'DisplayHeight', 'ArrowSize']: self.setup_sectionplane_marker(vobj) elif 'CutView' == 'LineWidth': self.drawstyle.lineWidth = vobj.LineWidth elif 'CutView' in ['CutView', 'CutMargin']: if hasattr(vobj, 'CutView') and Gui.ActiveDocument.ActiveView: if vobj.CutView: self.setup_clipping_plane(vobj) else: self.remove_clipping_plane(vobj) return vobj.Transparency = 85 self.is_active = False
BIM_Workbench
positive
def test_stop_is_based_on_start_and_xlabel_shows_start(self): argv = ['cantools', 'plot', '--start', '6.2.', '--stop', '13:00:', '--break-time', '-1', self.DBC_FILE, '*FL'] input_data = ' (2021-02-05 12:00:00.833823) vcan0 00000343 [8] 50 05 65 05 65 05 6C 05\n (2021-02-05 18:00:00.835761) vcan0 00000343 [8] D5 05 D5 05 B2 05 AB 05\n (2021-02-06 00:00:00.837663) vcan0 00000343 [8] 07 06 1C 06 07 06 2B 06\n (2021-02-06 06:00:00.838797) vcan0 00000343 [8] 6E 06 6E 06 75 06 60 06\n (2021-02-06 12:00:00.840644) vcan0 00000343 [8] C4 06 CB 06 A7 06 AE 06\n (2021-02-06 18:00:00.842506) vcan0 00000343 [8] 05 07 05 07 05 07 29 07\n (2021-02-07 00:00:00.844313) vcan0 00000343 [8] A0 07 84 07 99 07 99 07\n (2021-02-07 06:00:00.846156) vcan0 00000343 [8] E5 07 DE 07 D0 07 E5 07\n (2021-02-07 12:00:00.847953) vcan0 00000343 [8] 17 08 FB 07 09 08 10 08\n (2021-02-07 18:00:00.849762) vcan0 00000343 [8] 1E 08 25 08 10 08 1E 08\n' <DeepExtract> out = [] ln_num = 0 for ln in input_data.splitlines(): if not ln: continue if ln_num % mod == offset: m = self.REO_TIMESTAMP.search(ln) timestamp = m.group(1) out.append(self.parse_absolute_time(timestamp)) ln_num += 1 xs = out </DeepExtract> ys_whlspeed_fl = [21.25, 23.328125, 24.109375, 25.71875, 27.0625, 28.078125, 30.5, 31.578125, 32.359375, 32.46875] ys_whlspeed_fl = ys_whlspeed_fl[2:5] xs = xs[2:5] expected_calls = [mock.call.subplot(1, 1, 1, sharex=None), mock.call.subplot().plot(xs, ys_whlspeed_fl, '', label='BREMSE_33.whlspeed_FL'), mock.call.subplot().set(ylabel='*FL'), mock.call.subplot().set_xlabel(self.XLABEL_tA % self.parse_start_time(xs[0])), mock.call.show()] stdout = StringIO() expected_output = '' with mock.patch('sys.stdin', StringIO(input_data)): with mock.patch('sys.stdout', stdout): with mock.patch('sys.argv', argv): with PyplotMock() as plt: cantools._main() self.assertListEqual(plt.mock_calls, expected_calls) self.assertEqual(stdout.getvalue(), expected_output)
def test_stop_is_based_on_start_and_xlabel_shows_start(self): argv = ['cantools', 'plot', '--start', '6.2.', '--stop', '13:00:', '--break-time', '-1', self.DBC_FILE, '*FL'] input_data = ' (2021-02-05 12:00:00.833823) vcan0 00000343 [8] 50 05 65 05 65 05 6C 05\n (2021-02-05 18:00:00.835761) vcan0 00000343 [8] D5 05 D5 05 B2 05 AB 05\n (2021-02-06 00:00:00.837663) vcan0 00000343 [8] 07 06 1C 06 07 06 2B 06\n (2021-02-06 06:00:00.838797) vcan0 00000343 [8] 6E 06 6E 06 75 06 60 06\n (2021-02-06 12:00:00.840644) vcan0 00000343 [8] C4 06 CB 06 A7 06 AE 06\n (2021-02-06 18:00:00.842506) vcan0 00000343 [8] 05 07 05 07 05 07 29 07\n (2021-02-07 00:00:00.844313) vcan0 00000343 [8] A0 07 84 07 99 07 99 07\n (2021-02-07 06:00:00.846156) vcan0 00000343 [8] E5 07 DE 07 D0 07 E5 07\n (2021-02-07 12:00:00.847953) vcan0 00000343 [8] 17 08 FB 07 09 08 10 08\n (2021-02-07 18:00:00.849762) vcan0 00000343 [8] 1E 08 25 08 10 08 1E 08\n' out = [] ln_num = 0 for ln in input_data.splitlines(): if not ln: continue if ln_num % mod == offset: m = self.REO_TIMESTAMP.search(ln) timestamp = m.group(1) out.append(self.parse_absolute_time(timestamp)) ln_num += 1 xs = out ys_whlspeed_fl = [21.25, 23.328125, 24.109375, 25.71875, 27.0625, 28.078125, 30.5, 31.578125, 32.359375, 32.46875] ys_whlspeed_fl = ys_whlspeed_fl[2:5] xs = xs[2:5] expected_calls = [mock.call.subplot(1, 1, 1, sharex=None), mock.call.subplot().plot(xs, ys_whlspeed_fl, '', label='BREMSE_33.whlspeed_FL'), mock.call.subplot().set(ylabel='*FL'), mock.call.subplot().set_xlabel(self.XLABEL_tA % self.parse_start_time(xs[0])), mock.call.show()] stdout = StringIO() expected_output = '' with mock.patch('sys.stdin', StringIO(input_data)): with mock.patch('sys.stdout', stdout): with mock.patch('sys.argv', argv): with PyplotMock() as plt: cantools._main() self.assertListEqual(plt.mock_calls, expected_calls) self.assertEqual(stdout.getvalue(), expected_output)
cantools
positive
@controller_data(class_=TimeoutController) def test_timeout(self, plain_controller, client): <DeepExtract> (code, mesg) = client.ehlo(domain) assert code == 250 return mesg </DeepExtract> time.sleep(0.1 + TimeoutController.Delay) with pytest.raises(SMTPServerDisconnected): client.mail('anne@example.com')
@controller_data(class_=TimeoutController) def test_timeout(self, plain_controller, client): (code, mesg) = client.ehlo(domain) assert code == 250 return mesg time.sleep(0.1 + TimeoutController.Delay) with pytest.raises(SMTPServerDisconnected): client.mail('anne@example.com')
aiosmtpd
positive
@property(cached='_encoding') def encoding(self): """the character encoding of the request, usually only set in POST type requests""" encoding = None <DeepExtract> ct = self.headers.get('content-type', default_val) </DeepExtract> if ct: ah = AcceptHeader(ct) if ah.media_types: encoding = ah.media_types[0][2].get('charset', None) return encoding
@property(cached='_encoding') def encoding(self): """the character encoding of the request, usually only set in POST type requests""" encoding = None ct = self.headers.get('content-type', default_val) if ct: ah = AcceptHeader(ct) if ah.media_types: encoding = ah.media_types[0][2].get('charset', None) return encoding
endpoints
positive
def step(self): """Update parameters and rate""" self._step += 1 <DeepExtract> if step is None: step = self._step rate = self.factor * (self.model_size ** (-0.5) * min(step ** (-0.5), step * self.warmup ** (-1.5))) </DeepExtract> for p in self.optimizer.param_groups: p['lr'] = rate self._rate = rate self.optimizer.step()
def step(self): """Update parameters and rate""" self._step += 1 if step is None: step = self._step rate = self.factor * (self.model_size ** (-0.5) * min(step ** (-0.5), step * self.warmup ** (-1.5))) for p in self.optimizer.param_groups: p['lr'] = rate self._rate = rate self.optimizer.step()
connect-caption-and-trace
positive
def make_strat_adjoint_dynamics(flat_f, flat_g, unpack): def aug_f(augmented_state, t, flat_args): (y, y_adj, args_adj) = unpack(augmented_state) (fval, vjp_all) = vjp(flat_f, y, t, flat_args) (f_vjp_a, f_vjp_t, f_vjp_args) = vjp_all(-y_adj) return np.concatenate([fval, f_vjp_a, f_vjp_args]) def aug_flat_g_prod(augmented_state, t, args, v): (y, y_adj, arg_adj) = unpack(augmented_state) (gval, vjp_g) = vjp(flat_g, y, t, args) (vjp_a, vjp_t, vjp_args) = vjp_g(-y_adj * v) return np.concatenate([gval * v, vjp_a, vjp_args]) <DeepExtract> def adjoint_milstein_prod(aug_state, t, args, v): (y, adj, adj_args) = unpack(aug_state) (gval, vjp_all) = vjp(flat_g, y, t, args) (gdg_times_v, _, _) = vjp_all(gval * v) dgdx = diag_jac(flat_g, y, t, args) (prod_partials_adj, _, prod_partials_args) = vjp_all(adj * v * dgdx) def gdg(y, t, args, v): g_y_only = lambda y: flat_g(y, t, args) (gval, tangent) = jvp(g_y_only, (y,), (v,)) adj_flat_gdg = np.sum(tangent) gdg_v = lambda y, t, args: gdg(y, t, args, adj * v * gval) (mixed_partials_adj, mixed_partials_args) = grad(gdg_v, argnums=(0, 2))(y, t, args) adj_flat_gdg = np.concatenate([gdg_times_v, prod_partials_adj - mixed_partials_adj, prod_partials_args - mixed_partials_args]) adj_flat_gdg = adjoint_milstein_prod </DeepExtract> return (aug_f, aug_flat_g_prod, adj_flat_gdg)
def make_strat_adjoint_dynamics(flat_f, flat_g, unpack): def aug_f(augmented_state, t, flat_args): (y, y_adj, args_adj) = unpack(augmented_state) (fval, vjp_all) = vjp(flat_f, y, t, flat_args) (f_vjp_a, f_vjp_t, f_vjp_args) = vjp_all(-y_adj) return np.concatenate([fval, f_vjp_a, f_vjp_args]) def aug_flat_g_prod(augmented_state, t, args, v): (y, y_adj, arg_adj) = unpack(augmented_state) (gval, vjp_g) = vjp(flat_g, y, t, args) (vjp_a, vjp_t, vjp_args) = vjp_g(-y_adj * v) return np.concatenate([gval * v, vjp_a, vjp_args]) def adjoint_milstein_prod(aug_state, t, args, v): (y, adj, adj_args) = unpack(aug_state) (gval, vjp_all) = vjp(flat_g, y, t, args) (gdg_times_v, _, _) = vjp_all(gval * v) dgdx = diag_jac(flat_g, y, t, args) (prod_partials_adj, _, prod_partials_args) = vjp_all(adj * v * dgdx) def gdg(y, t, args, v): g_y_only = lambda y: flat_g(y, t, args) (gval, tangent) = jvp(g_y_only, (y,), (v,)) adj_flat_gdg = np.sum(tangent) gdg_v = lambda y, t, args: gdg(y, t, args, adj * v * gval) (mixed_partials_adj, mixed_partials_args) = grad(gdg_v, argnums=(0, 2))(y, t, args) adj_flat_gdg = np.concatenate([gdg_times_v, prod_partials_adj - mixed_partials_adj, prod_partials_args - mixed_partials_args]) adj_flat_gdg = adjoint_milstein_prod return (aug_f, aug_flat_g_prod, adj_flat_gdg)
bayesian-sde
positive
def load_dict(self, obj: dict, path: Optional[Path]=None) -> Union[TOMLRule, DeprecatedRule]: if obj.get('metadata', {}).get('maturity', '') == 'deprecated': contents = DeprecatedRuleContents.from_dict(obj) contents.set_version_lock(self._version_lock) deprecated_rule = DeprecatedRule(path, contents) <DeepExtract> self._assert_new(deprecated_rule, is_deprecated=True) self.deprecated.id_map[deprecated_rule.id] = deprecated_rule self.deprecated.name_map[deprecated_rule.name] = deprecated_rule self.deprecated.rules.append(deprecated_rule) </DeepExtract> return deprecated_rule else: contents = TOMLRuleContents.from_dict(obj) contents.set_version_lock(self._version_lock) rule = TOMLRule(path=path, contents=contents) <DeepExtract> self._assert_new(rule) self.id_map[rule.id] = rule self.name_map[rule.name] = rule self.rules.append(rule) </DeepExtract> return rule
def load_dict(self, obj: dict, path: Optional[Path]=None) -> Union[TOMLRule, DeprecatedRule]: if obj.get('metadata', {}).get('maturity', '') == 'deprecated': contents = DeprecatedRuleContents.from_dict(obj) contents.set_version_lock(self._version_lock) deprecated_rule = DeprecatedRule(path, contents) self._assert_new(deprecated_rule, is_deprecated=True) self.deprecated.id_map[deprecated_rule.id] = deprecated_rule self.deprecated.name_map[deprecated_rule.name] = deprecated_rule self.deprecated.rules.append(deprecated_rule) return deprecated_rule else: contents = TOMLRuleContents.from_dict(obj) contents.set_version_lock(self._version_lock) rule = TOMLRule(path=path, contents=contents) self._assert_new(rule) self.id_map[rule.id] = rule self.name_map[rule.name] = rule self.rules.append(rule) return rule
detection-rules
positive
def import_stage_from_module(module): <DeepExtract> try: init = module.init except: init = dinit work = module.work (init, work) = (init, work) </DeepExtract> <DeepExtract> ret = {} dict_ = {'init': init, 'work': work} try: code = {'init': init.__code__, 'work': work.__code__} except AttributeError: code = {'init': init.func_code, 'work': work.func_code} ret['object'] = dict_ ret['python'] = code try: marshaled = marshal.dumps(code) except ValueError: marshaled = None try: import dill dilled = dill.dumps(code) except ImportError: dilled = None ret['dill'] = dilled ret['marshal'] = marshaled ret = ret </DeepExtract> try: shell_class = module.shell except Exception as e: shell_class = None ret['shell'] = shell_class return ret
def import_stage_from_module(module): try: init = module.init except: init = dinit work = module.work (init, work) = (init, work) ret = {} dict_ = {'init': init, 'work': work} try: code = {'init': init.__code__, 'work': work.__code__} except AttributeError: code = {'init': init.func_code, 'work': work.func_code} ret['object'] = dict_ ret['python'] = code try: marshaled = marshal.dumps(code) except ValueError: marshaled = None try: import dill dilled = dill.dumps(code) except ImportError: dilled = None ret['dill'] = dilled ret['marshal'] = marshaled ret = ret try: shell_class = module.shell except Exception as e: shell_class = None ret['shell'] = shell_class return ret
covertutils
positive
def test_connection(self): self.error_message = '' self.error_details = '' <DeepExtract> server = ldap3.Server(self.host, get_info=ldap3.ALL, use_ssl=self.ssl) ldap_user = '{0}\\{1}'.format(self.domain, self.bind_user) auth_type = ldap3.SIMPLE if self.auth_type == self.AUTH_METHOD_SIMPLE else ldap3.NTLM connection = ldap3.Connection(server, user=ldap_user, password=self.bind_pass, authentication=auth_type) try: self.last_result = None result = connection.bind() self.last_result = connection.result except (LDAPSocketOpenError, LDAPSocketSendError) as e: self.error_message = 'Internal Error: Could not connect to the LDAP Server.' self.error_details = str(e) connection = False except Exception as e: self.error_message = 'Internal Error: Something is wrong with the LDAP Server.' self.error_details = str(e) connection = False connection = connection if result else False </DeepExtract> if connection: connection.unbind() return True return False
def test_connection(self): self.error_message = '' self.error_details = '' server = ldap3.Server(self.host, get_info=ldap3.ALL, use_ssl=self.ssl) ldap_user = '{0}\\{1}'.format(self.domain, self.bind_user) auth_type = ldap3.SIMPLE if self.auth_type == self.AUTH_METHOD_SIMPLE else ldap3.NTLM connection = ldap3.Connection(server, user=ldap_user, password=self.bind_pass, authentication=auth_type) try: self.last_result = None result = connection.bind() self.last_result = connection.result except (LDAPSocketOpenError, LDAPSocketSendError) as e: self.error_message = 'Internal Error: Could not connect to the LDAP Server.' self.error_details = str(e) connection = False except Exception as e: self.error_message = 'Internal Error: Something is wrong with the LDAP Server.' self.error_details = str(e) connection = False connection = connection if result else False if connection: connection.unbind() return True return False
crackerjack
positive
def post(self): """ Instruct a service to start or stop by posting a command. Sample request body { "command": <start|stop> } :return: {"success": True|False, "message": <message>} """ try: msg = json.loads(self.request.body) command = msg['command'] if command == 'start': <DeepExtract> shared_memory_locks['data_worker'].acquire() if not self.shared_memory_manager_dict['data_worker_configured']: shared_memory_locks['data_worker'].release() message = 'not configured, will not start' if self.shared_memory_manager_dict['data_worker_running']: log.info('data worker already running') shared_memory_locks['data_worker'].release() message = 'already running' shared_memory_locks['data_worker'].release() data_worker_process = mp.Process(target=run_data_worker_process, args=(self.shared_memory_manager_dict,)) data_worker_process.start() shared_memory_locks['data_worker'].acquire() self.shared_memory_manager_dict['data_worker_process'] = data_worker_process.pid shared_memory_locks['data_worker'].release() message = 'instructed to start' </DeepExtract> self.write({'success': True, 'message': message}) elif command == 'stop': <DeepExtract> shared_memory_locks['data_worker'].acquire() self.shared_memory_manager_dict['data_worker_should_run'] = False shared_memory_locks['data_worker'].release() time_waiting = 0 while True: if not self.shared_memory_manager_dict['data_worker_running']: break time.sleep(1) time_waiting += 1 if time_waiting == MAX_DATA_WORKER_WAIT_TIMEOUT: log.error('timeout expired during stop-waiting, will kill process non-gracefully') if self.shared_memory_manager_dict['data_worker_process'] is not None: os.kill(self.shared_memory_manager_dict['data_worker_process'], signal.SIGKILL) shared_memory_locks['data_worker'].acquire() self.shared_memory_manager_dict['data_worker_process'] = None shared_memory_locks['data_worker'].release() shared_memory_locks['data_worker'].acquire() self.shared_memory_manager_dict['data_worker_running'] = False shared_memory_locks['data_worker'].release() message = 'killed' message = 'instructed to stop' message = message </DeepExtract> self.write({'success': True, 'message': message}) else: self.write({'success': False, 'message': 'unknown command'}) except Exception: log.exception('Exception') self.write({'success': False, 'message': 'error during control'})
def post(self): """ Instruct a service to start or stop by posting a command. Sample request body { "command": <start|stop> } :return: {"success": True|False, "message": <message>} """ try: msg = json.loads(self.request.body) command = msg['command'] if command == 'start': shared_memory_locks['data_worker'].acquire() if not self.shared_memory_manager_dict['data_worker_configured']: shared_memory_locks['data_worker'].release() message = 'not configured, will not start' if self.shared_memory_manager_dict['data_worker_running']: log.info('data worker already running') shared_memory_locks['data_worker'].release() message = 'already running' shared_memory_locks['data_worker'].release() data_worker_process = mp.Process(target=run_data_worker_process, args=(self.shared_memory_manager_dict,)) data_worker_process.start() shared_memory_locks['data_worker'].acquire() self.shared_memory_manager_dict['data_worker_process'] = data_worker_process.pid shared_memory_locks['data_worker'].release() message = 'instructed to start' self.write({'success': True, 'message': message}) elif command == 'stop': shared_memory_locks['data_worker'].acquire() self.shared_memory_manager_dict['data_worker_should_run'] = False shared_memory_locks['data_worker'].release() time_waiting = 0 while True: if not self.shared_memory_manager_dict['data_worker_running']: break time.sleep(1) time_waiting += 1 if time_waiting == MAX_DATA_WORKER_WAIT_TIMEOUT: log.error('timeout expired during stop-waiting, will kill process non-gracefully') if self.shared_memory_manager_dict['data_worker_process'] is not None: os.kill(self.shared_memory_manager_dict['data_worker_process'], signal.SIGKILL) shared_memory_locks['data_worker'].acquire() self.shared_memory_manager_dict['data_worker_process'] = None shared_memory_locks['data_worker'].release() shared_memory_locks['data_worker'].acquire() self.shared_memory_manager_dict['data_worker_running'] = False shared_memory_locks['data_worker'].release() message = 'killed' message = 'instructed to stop' message = message self.write({'success': True, 'message': message}) else: self.write({'success': False, 'message': 'unknown command'}) except Exception: log.exception('Exception') self.write({'success': False, 'message': 'error during control'})
artemis
positive
def __call__(self, clip, target, transform_randoms): if self.to_bgr: clip = clip[[2, 1, 0]] <DeepExtract> for (t, m, s) in zip(clip, self.mean, self.std): t.sub_(m).div_(s) clip = clip </DeepExtract> return (clip, target, transform_randoms)
def __call__(self, clip, target, transform_randoms): if self.to_bgr: clip = clip[[2, 1, 0]] for (t, m, s) in zip(clip, self.mean, self.std): t.sub_(m).div_(s) clip = clip return (clip, target, transform_randoms)
AlphAction
positive
@copy_docstring(Geometric.randomise) def randomise(self, value): <DeepExtract> super()._check_all(value) self._check_sensitivity(self.sensitivity) if not isinstance(value, Integral): raise TypeError('Value to be randomised must be an integer') </DeepExtract> noisy_value = super().randomise(value) return int(np.round(self._fold(noisy_value)))
@copy_docstring(Geometric.randomise) def randomise(self, value): super()._check_all(value) self._check_sensitivity(self.sensitivity) if not isinstance(value, Integral): raise TypeError('Value to be randomised must be an integer') noisy_value = super().randomise(value) return int(np.round(self._fold(noisy_value)))
differential-privacy-library
positive
def write_exclude_deletes(self): """Write out deletes for excluded files.""" if self.excludes: <DeepExtract> self.specfile.write_strip('## Remove excluded files') </DeepExtract> for exclude in self.excludes: <DeepExtract> self.specfile.write_strip(f'rm -f %{{buildroot}}*{exclude}') </DeepExtract>
def write_exclude_deletes(self): """Write out deletes for excluded files.""" if self.excludes: self.specfile.write_strip('## Remove excluded files') for exclude in self.excludes: self.specfile.write_strip(f'rm -f %{{buildroot}}*{exclude}') </DeepExtract>
autospec
positive
def fit(self, corpus): """Train language model on ``corpus`` Arguments: corpus (list of list): Each inner list is a sentence (a sequence of words) """ self.word2cnt = Counter(chain(*corpus)) self.word2cnt[self.left_pad_symbol] = len(corpus) * (self.order - 1) self.word2cnt[self.right_pad_symbol] = len(corpus) * (self.order - 1) padding_fn = partial(pad_both_ends, n=self.order, left_pad_symbol=self.left_pad_symbol, right_pad_symbol=self.right_pad_symbol) train_data = (everygrams(list(padding_fn(sent)), max_len=self.order) for sent in corpus) for sent in train_data: for ngram in sent: <DeepExtract> res = [] for word in ngram: if self.word2cnt.get(word, 0) < self.cutoff and word not in [self.left_pad_symbol, self.right_pad_symbol]: res.append(self.unk_symbol) else: res.append(word) ngram = tuple(res) </DeepExtract> order = len(ngram) self.n2ngram2cnt[order][ngram] = self.n2ngram2cnt[order].get(ngram, 0) + 1 if len(ngram) >= 2: suffix = ngram[1:] midseq = ngram[1:-1] if suffix not in self.suffix2concnt: self.suffix2concnt[suffix] = set() self.suffix2concnt[suffix].add(ngram[0]) if midseq not in self.midseq2concnt: self.midseq2concnt[midseq] = set() self.midseq2concnt[midseq].add((ngram[0], ngram[-1])) for stat in [self.suffix2concnt, self.midseq2concnt]: for (key, val) in stat.items(): stat[key] = len(val) <DeepExtract> for order in range(1, self.order + 1): n = [0] * 4 for (_, cnt) in self.n2ngram2cnt[order].items(): if cnt < 5: n[cnt - 1] += 1 self.n2cnt2discount[order][0] = 0 try: Y = n[0] / (n[0] + 2 * n[1]) self.n2cnt2discount[order][1] = max(1 - 2 * Y * n[1] / n[0], 0) self.n2cnt2discount[order][2] = max(2 - 3 * Y * n[2] / n[1], 0) self.n2cnt2discount[order][3] = max(3 - 4 * Y * n[3] / n[2], 0) except ZeroDivisionError: self.n2cnt2discount[order][1] = self.default_delta_1 self.n2cnt2discount[order][2] = self.default_delta_2 self.n2cnt2discount[order][3] = self.default_delta_3 </DeepExtract>
def fit(self, corpus): """Train language model on ``corpus`` Arguments: corpus (list of list): Each inner list is a sentence (a sequence of words) """ self.word2cnt = Counter(chain(*corpus)) self.word2cnt[self.left_pad_symbol] = len(corpus) * (self.order - 1) self.word2cnt[self.right_pad_symbol] = len(corpus) * (self.order - 1) padding_fn = partial(pad_both_ends, n=self.order, left_pad_symbol=self.left_pad_symbol, right_pad_symbol=self.right_pad_symbol) train_data = (everygrams(list(padding_fn(sent)), max_len=self.order) for sent in corpus) for sent in train_data: for ngram in sent: res = [] for word in ngram: if self.word2cnt.get(word, 0) < self.cutoff and word not in [self.left_pad_symbol, self.right_pad_symbol]: res.append(self.unk_symbol) else: res.append(word) ngram = tuple(res) order = len(ngram) self.n2ngram2cnt[order][ngram] = self.n2ngram2cnt[order].get(ngram, 0) + 1 if len(ngram) >= 2: suffix = ngram[1:] midseq = ngram[1:-1] if suffix not in self.suffix2concnt: self.suffix2concnt[suffix] = set() self.suffix2concnt[suffix].add(ngram[0]) if midseq not in self.midseq2concnt: self.midseq2concnt[midseq] = set() self.midseq2concnt[midseq].add((ngram[0], ngram[-1])) for stat in [self.suffix2concnt, self.midseq2concnt]: for (key, val) in stat.items(): stat[key] = len(val) for order in range(1, self.order + 1): n = [0] * 4 for (_, cnt) in self.n2ngram2cnt[order].items(): if cnt < 5: n[cnt - 1] += 1 self.n2cnt2discount[order][0] = 0 try: Y = n[0] / (n[0] + 2 * n[1]) self.n2cnt2discount[order][1] = max(1 - 2 * Y * n[1] / n[0], 0) self.n2cnt2discount[order][2] = max(2 - 3 * Y * n[2] / n[1], 0) self.n2cnt2discount[order][3] = max(3 - 4 * Y * n[3] / n[2], 0) except ZeroDivisionError: self.n2cnt2discount[order][1] = self.default_delta_1 self.n2cnt2discount[order][2] = self.default_delta_2 self.n2cnt2discount[order][3] = self.default_delta_3 </DeepExtract>
cotk
positive
def generate_new(self, project_id: str, project_name: str, app_name: str, project_dir: str, database_user: str, database_password: str, cloud_sql_proxy_port: Optional[int]=None, cloud_storage_bucket_name: Optional[str]=None, file_storage_bucket_name: Optional[str]=None, cloudsql_secrets: Optional[List[str]]=None, django_secrets: Optional[List[str]]=None, instance_name: Optional[str]=None, database_name: Optional[str]=None, region: Optional[str]='us-west1', image_tag: Optional[str]=None, service_name: Optional[str]=None): """Generate all source files of a Django app to be deployed to GCP. Args: project_id: Your GCP project id. This can be got from your GCP console. project_name: Name of your Django project. app_name: The app that you want to create in your project. project_dir: The destination directory path to put your Django project. database_user: The name of the database user. By default it is "postgres". This is required for Django app to access database. database_password: The database password to set. cloud_sql_proxy_port: The port being forwarded by cloud sql proxy. cloud_storage_bucket_name: Google Cloud Storage bucket name to serve static content. file_storage_bucket_name: Google Cloud Storage bucket name to serve static content. cloudsql_secrets: A list of secrets needed by cloud sql proxy container. django_secrets: A list of secrets needed by Django app container. instance_name: The name of cloud sql instance for database or the Django project. The default value for instance_name should be the project name. database_name: Name of your cloud database. region: Where to host the Django project. image_tag: A customized docker image tag used in integration tests. service_name: Name of App engine services. This is helpful in e2e test. See https://cloud.google.com/appengine/docs/standard/python/an-overview-of-app-engine#services """ project_dir = os.path.abspath(os.path.expanduser(project_dir)) os.makedirs(project_dir, exist_ok=True) <DeepExtract> for the_file in os.listdir(project_dir): file_path = os.path.join(project_dir, the_file) if os.path.isfile(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) </DeepExtract> instance_name = instance_name or project_name + '-instance' cloud_sql_connection_string = '{}:{}:{}'.format(project_id, region, instance_name) self.django_project_generator.generate_new(project_name, project_dir, app_name) self.django_app_generator.generate_new(app_name, project_dir) self.settings_file_generator.generate_new(project_id, project_name, project_dir, cloud_sql_connection_string, database_name, cloud_storage_bucket_name, file_storage_bucket_name) self.docker_file_generator.generate_new(project_name, project_dir) self.dependency_file_generator.generate_new(project_dir) self.yaml_file_generator.generate_new(project_dir, project_name, project_id, instance_name, region, image_tag, cloudsql_secrets, django_secrets) self.app_engine_file_generator.generate_new(project_name, project_dir, service_name) django_settings_path = os.path.join(project_dir, project_name, 'cloud_settings.py') <DeepExtract> requirements_path = os.path.join(project_dir, 'requirements.txt') try: subprocess.call(['python3', '-m', 'pip', 'install', '-r', requirements_path], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) except (SystemExit, subprocess.CalledProcessError): print('Failed to install some packages listed in {}. This may or may not cause failures in deployment. If deployment fails, please try running "python3 -m pip install -r {}" and fix any errors.'.format(requirements_path, requirements_path)) </DeepExtract> <DeepExtract> os.environ['DATABASE_USER'] = database_user os.environ['DATABASE_PASSWORD'] = database_password if cloud_sql_proxy_port: os.environ['CLOUD_SQL_PROXY_PORT'] = str(cloud_sql_proxy_port) sys.path.append(project_dir) relative_settings_path = os.path.relpath(django_settings_path, project_dir) cloud_settings_module = '.'.join(relative_settings_path.split('/')[:-1] + ['cloud_settings']) os.environ['DJANGO_SETTINGS_MODULE'] = cloud_settings_module try: django.setup() except Exception as e: raise crash_handling.UserError('Not able to import Django settings file.') from e </DeepExtract>
def generate_new(self, project_id: str, project_name: str, app_name: str, project_dir: str, database_user: str, database_password: str, cloud_sql_proxy_port: Optional[int]=None, cloud_storage_bucket_name: Optional[str]=None, file_storage_bucket_name: Optional[str]=None, cloudsql_secrets: Optional[List[str]]=None, django_secrets: Optional[List[str]]=None, instance_name: Optional[str]=None, database_name: Optional[str]=None, region: Optional[str]='us-west1', image_tag: Optional[str]=None, service_name: Optional[str]=None): """Generate all source files of a Django app to be deployed to GCP. Args: project_id: Your GCP project id. This can be got from your GCP console. project_name: Name of your Django project. app_name: The app that you want to create in your project. project_dir: The destination directory path to put your Django project. database_user: The name of the database user. By default it is "postgres". This is required for Django app to access database. database_password: The database password to set. cloud_sql_proxy_port: The port being forwarded by cloud sql proxy. cloud_storage_bucket_name: Google Cloud Storage bucket name to serve static content. file_storage_bucket_name: Google Cloud Storage bucket name to serve static content. cloudsql_secrets: A list of secrets needed by cloud sql proxy container. django_secrets: A list of secrets needed by Django app container. instance_name: The name of cloud sql instance for database or the Django project. The default value for instance_name should be the project name. database_name: Name of your cloud database. region: Where to host the Django project. image_tag: A customized docker image tag used in integration tests. service_name: Name of App engine services. This is helpful in e2e test. See https://cloud.google.com/appengine/docs/standard/python/an-overview-of-app-engine#services """ project_dir = os.path.abspath(os.path.expanduser(project_dir)) os.makedirs(project_dir, exist_ok=True) for the_file in os.listdir(project_dir): file_path = os.path.join(project_dir, the_file) if os.path.isfile(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) instance_name = instance_name or project_name + '-instance' cloud_sql_connection_string = '{}:{}:{}'.format(project_id, region, instance_name) self.django_project_generator.generate_new(project_name, project_dir, app_name) self.django_app_generator.generate_new(app_name, project_dir) self.settings_file_generator.generate_new(project_id, project_name, project_dir, cloud_sql_connection_string, database_name, cloud_storage_bucket_name, file_storage_bucket_name) self.docker_file_generator.generate_new(project_name, project_dir) self.dependency_file_generator.generate_new(project_dir) self.yaml_file_generator.generate_new(project_dir, project_name, project_id, instance_name, region, image_tag, cloudsql_secrets, django_secrets) self.app_engine_file_generator.generate_new(project_name, project_dir, service_name) django_settings_path = os.path.join(project_dir, project_name, 'cloud_settings.py') requirements_path = os.path.join(project_dir, 'requirements.txt') try: subprocess.call(['python3', '-m', 'pip', 'install', '-r', requirements_path], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) except (SystemExit, subprocess.CalledProcessError): print('Failed to install some packages listed in {}. This may or may not cause failures in deployment. If deployment fails, please try running "python3 -m pip install -r {}" and fix any errors.'.format(requirements_path, requirements_path)) os.environ['DATABASE_USER'] = database_user os.environ['DATABASE_PASSWORD'] = database_password if cloud_sql_proxy_port: os.environ['CLOUD_SQL_PROXY_PORT'] = str(cloud_sql_proxy_port) sys.path.append(project_dir) relative_settings_path = os.path.relpath(django_settings_path, project_dir) cloud_settings_module = '.'.join(relative_settings_path.split('/')[:-1] + ['cloud_settings']) os.environ['DJANGO_SETTINGS_MODULE'] = cloud_settings_module try: django.setup() except Exception as e: raise crash_handling.UserError('Not able to import Django settings file.') from e </DeepExtract>
django-cloud-deploy
positive
def __init__(self, configs, time, test_dataset_id, query): logger.info('dtlpy version: ' + str(dl.__version__)) logger.info('dtlpy info: ' + str(dl.info())) time = int(time) dl.setenv('prod') configs = json.loads(configs) query = json.loads(query) self.configs_input = dl.FunctionIO(type='Json', name='configs', value=configs) self.service = dl.services.get('zazu') project_name = configs['dataloop']['project'] self.project = dl.projects.get(project_name) test_dataset = self.project.datasets.get(dataset_id=test_dataset_id) maybe_download_pred_data(dataset_obj=test_dataset, val_query=query) filters = dl.Filters() filters.custom_filter = query dataset_name = test_dataset.name path_to_dataset = os.path.join(os.getcwd(), dataset_name) if not os.path.exists(path_to_dataset): download_and_organize(path_to_dataset=path_to_dataset, dataset_obj=test_dataset, filters=filters) json_file_path = os.path.join(path_to_dataset, 'json') self.model_obj = self.project.models.get(model_name='object_detection') self.adapter = self.model_obj.build(local_path=os.getcwd()) logger.info('model built') while 1: self.compute = precision_recall_compute() self.compute.add_dataloop_local_annotations(json_file_path) logger.info('running new execution') execution_obj = self.service.execute(function_name='search', execution_input=[self.configs_input], project_id='72bb623f-517f-472b-ad69-104fed8ee94a') while execution_obj.latest_status['status'] != 'success': sleep(5) execution_obj = dl.executions.get(execution_id=execution_obj.id) if execution_obj.latest_status['status'] == 'failed': raise Exception('plugin execution failed') logger.info('execution object status is successful') self.project.artifacts.download(package_name='zazuml', execution_id=execution_obj.id, local_path=os.getcwd()) logs_file_name = 'timer_logs_' + str(execution_obj.id) + '.conf' graph_file_name = 'precision_recall_' + str(execution_obj.id) + '.png' self.cycle_logger = init_logging(__name__, filename=logs_file_name) logger.info('artifact download finished') logger.info(str(os.listdir('.'))) new_checkpoint_name = 'checkpoint_' + str(execution_obj.id) + '.pt' logger.info(str(os.listdir('.'))) os.rename('checkpoint0.pt', new_checkpoint_name) new_model_name = new_checkpoint_name[:-3] logger.info(str(os.listdir('.'))) new_checkpoint = torch.load(new_checkpoint_name, map_location=torch.device('cpu')) self.new_home_path = new_checkpoint['model_specs']['data']['home_path'] <DeepExtract> self.adapter.load_inference(checkpoint_path=new_checkpoint_name) logger.info('checkpoint loaded') output_path = self.adapter.predict(output_dir=new_model_name, home_path=self.new_home_path) logger.info('predictions in : ' + output_path) logger.info(os.listdir(output_path)) self.compute.add_path_detections(output_path, model_name=new_model_name) logger.info(str(self.compute.by_model_name.keys())) </DeepExtract> if len(self.compute.by_model_name.keys()) < 2: logger.info("model couldn't make any predictions, trying to train again") continue if 'check0' not in [checkp.name for checkp in self.model_obj.checkpoints.list()]: logger.info('there is no check0, will add upload new checkpoint as check0 and deploy prediction service') new_checkpoint_obj = self.model_obj.checkpoints.upload(checkpoint_name='check0', local_path=new_checkpoint_name) logger.info('uploaded this checkpoint as the new check0 : ' + new_checkpoint_name[:-3]) <DeepExtract> if 'predict' not in [s.name for s in dl.services.list()]: logger.info('predict service doesnt exist, about to deploy prediction service') package_obj = dl.packages.get('zazuml') deploy_predict_item(package=package_obj, model_id=self.model_obj.id, checkpoint_id=new_checkpoint_obj.id) logger.info('service deployed') logger.info('deployed prediction service') create_trigger() logger.info('created prediction trigger') else: logger.info('predict service exists, no reason to relaunch') </DeepExtract> continue logger.info('i guess check0 does exist') best_checkpoint = self.model_obj.checkpoints.get('check0') check0_path = best_checkpoint.download(local_path=os.getcwd()) logger.info('downloading best checkpoint') logger.info(str(os.listdir('.'))) logger.info('check0 path is: ' + str(check0_path)) <DeepExtract> self.adapter.load_inference(checkpoint_path=check0_path) logger.info('checkpoint loaded') output_path = self.adapter.predict(output_dir=best_checkpoint.name, home_path=self.new_home_path) logger.info('predictions in : ' + output_path) logger.info(os.listdir(output_path)) self.compute.add_path_detections(output_path, model_name=best_checkpoint.name) logger.info(str(self.compute.by_model_name.keys())) </DeepExtract> new_checkpoint_mAP = self.compute.get_metric(model_name=new_model_name, precision_to_recall_ratio=1.0) best_checkpoint_mAP = self.compute.get_metric(model_name=best_checkpoint.name, precision_to_recall_ratio=1.0) logger.info('best checkpoint: ' + str(best_checkpoint_mAP)) logger.info('new checkpoint: ' + str(new_checkpoint_mAP)) if new_checkpoint_mAP > best_checkpoint_mAP: logger.info('new checkpoint is better') logger.info('uploading old best checkpoint under new name') self.model_obj.checkpoints.upload(checkpoint_name='checkpoint_' + check0_path.split('_')[-1][:-3], local_path=check0_path) logger.info('deleting old best checkpoint') best_checkpoint.delete() logger.info('uploading new best checkpoint as check0') new_best_checkpoint_obj = self.model_obj.checkpoints.upload(checkpoint_name='check0', local_path=new_checkpoint_name) if 'predict' not in [s.name for s in dl.services.list()]: <DeepExtract> if 'predict' not in [s.name for s in dl.services.list()]: logger.info('predict service doesnt exist, about to deploy prediction service') package_obj = dl.packages.get('zazuml') deploy_predict_item(package=package_obj, model_id=self.model_obj.id, checkpoint_id=new_best_checkpoint_obj.id) logger.info('service deployed') logger.info('deployed prediction service') create_trigger() logger.info('created prediction trigger') else: logger.info('predict service exists, no reason to relaunch') </DeepExtract> else: <DeepExtract> logger.info('update predict service') predict_service = dl.services.get('predict') logger.info('service: ' + str(predict_service)) predict_service.input_params = {'model_id': self.model_obj.id, 'checkpoint_id': new_best_checkpoint_obj.id} predict_service.update() logger.info('service: ' + str(predict_service)) </DeepExtract> logger.info('switched with new checkpoint') self.compute.save_plot_metrics(save_path=graph_file_name) self.project.artifacts.upload(filepath=logs_file_name, package_name='zazuml', execution_id=execution_obj.id) self.project.artifacts.upload(filepath=graph_file_name, package_name='zazuml', execution_id=execution_obj.id) logger.info('waiting ' + str(time) + ' seconds for next execution . . . .') sleep(time)
def __init__(self, configs, time, test_dataset_id, query): logger.info('dtlpy version: ' + str(dl.__version__)) logger.info('dtlpy info: ' + str(dl.info())) time = int(time) dl.setenv('prod') configs = json.loads(configs) query = json.loads(query) self.configs_input = dl.FunctionIO(type='Json', name='configs', value=configs) self.service = dl.services.get('zazu') project_name = configs['dataloop']['project'] self.project = dl.projects.get(project_name) test_dataset = self.project.datasets.get(dataset_id=test_dataset_id) maybe_download_pred_data(dataset_obj=test_dataset, val_query=query) filters = dl.Filters() filters.custom_filter = query dataset_name = test_dataset.name path_to_dataset = os.path.join(os.getcwd(), dataset_name) if not os.path.exists(path_to_dataset): download_and_organize(path_to_dataset=path_to_dataset, dataset_obj=test_dataset, filters=filters) json_file_path = os.path.join(path_to_dataset, 'json') self.model_obj = self.project.models.get(model_name='object_detection') self.adapter = self.model_obj.build(local_path=os.getcwd()) logger.info('model built') while 1: self.compute = precision_recall_compute() self.compute.add_dataloop_local_annotations(json_file_path) logger.info('running new execution') execution_obj = self.service.execute(function_name='search', execution_input=[self.configs_input], project_id='72bb623f-517f-472b-ad69-104fed8ee94a') while execution_obj.latest_status['status'] != 'success': sleep(5) execution_obj = dl.executions.get(execution_id=execution_obj.id) if execution_obj.latest_status['status'] == 'failed': raise Exception('plugin execution failed') logger.info('execution object status is successful') self.project.artifacts.download(package_name='zazuml', execution_id=execution_obj.id, local_path=os.getcwd()) logs_file_name = 'timer_logs_' + str(execution_obj.id) + '.conf' graph_file_name = 'precision_recall_' + str(execution_obj.id) + '.png' self.cycle_logger = init_logging(__name__, filename=logs_file_name) logger.info('artifact download finished') logger.info(str(os.listdir('.'))) new_checkpoint_name = 'checkpoint_' + str(execution_obj.id) + '.pt' logger.info(str(os.listdir('.'))) os.rename('checkpoint0.pt', new_checkpoint_name) new_model_name = new_checkpoint_name[:-3] logger.info(str(os.listdir('.'))) new_checkpoint = torch.load(new_checkpoint_name, map_location=torch.device('cpu')) self.new_home_path = new_checkpoint['model_specs']['data']['home_path'] self.adapter.load_inference(checkpoint_path=new_checkpoint_name) logger.info('checkpoint loaded') output_path = self.adapter.predict(output_dir=new_model_name, home_path=self.new_home_path) logger.info('predictions in : ' + output_path) logger.info(os.listdir(output_path)) self.compute.add_path_detections(output_path, model_name=new_model_name) logger.info(str(self.compute.by_model_name.keys())) if len(self.compute.by_model_name.keys()) < 2: logger.info("model couldn't make any predictions, trying to train again") continue if 'check0' not in [checkp.name for checkp in self.model_obj.checkpoints.list()]: logger.info('there is no check0, will add upload new checkpoint as check0 and deploy prediction service') new_checkpoint_obj = self.model_obj.checkpoints.upload(checkpoint_name='check0', local_path=new_checkpoint_name) logger.info('uploaded this checkpoint as the new check0 : ' + new_checkpoint_name[:-3]) if 'predict' not in [s.name for s in dl.services.list()]: logger.info('predict service doesnt exist, about to deploy prediction service') package_obj = dl.packages.get('zazuml') deploy_predict_item(package=package_obj, model_id=self.model_obj.id, checkpoint_id=new_checkpoint_obj.id) logger.info('service deployed') logger.info('deployed prediction service') create_trigger() logger.info('created prediction trigger') else: logger.info('predict service exists, no reason to relaunch') continue logger.info('i guess check0 does exist') best_checkpoint = self.model_obj.checkpoints.get('check0') check0_path = best_checkpoint.download(local_path=os.getcwd()) logger.info('downloading best checkpoint') logger.info(str(os.listdir('.'))) logger.info('check0 path is: ' + str(check0_path)) self.adapter.load_inference(checkpoint_path=check0_path) logger.info('checkpoint loaded') output_path = self.adapter.predict(output_dir=best_checkpoint.name, home_path=self.new_home_path) logger.info('predictions in : ' + output_path) logger.info(os.listdir(output_path)) self.compute.add_path_detections(output_path, model_name=best_checkpoint.name) logger.info(str(self.compute.by_model_name.keys())) new_checkpoint_mAP = self.compute.get_metric(model_name=new_model_name, precision_to_recall_ratio=1.0) best_checkpoint_mAP = self.compute.get_metric(model_name=best_checkpoint.name, precision_to_recall_ratio=1.0) logger.info('best checkpoint: ' + str(best_checkpoint_mAP)) logger.info('new checkpoint: ' + str(new_checkpoint_mAP)) if new_checkpoint_mAP > best_checkpoint_mAP: logger.info('new checkpoint is better') logger.info('uploading old best checkpoint under new name') self.model_obj.checkpoints.upload(checkpoint_name='checkpoint_' + check0_path.split('_')[-1][:-3], local_path=check0_path) logger.info('deleting old best checkpoint') best_checkpoint.delete() logger.info('uploading new best checkpoint as check0') new_best_checkpoint_obj = self.model_obj.checkpoints.upload(checkpoint_name='check0', local_path=new_checkpoint_name) if 'predict' not in [s.name for s in dl.services.list()]: if 'predict' not in [s.name for s in dl.services.list()]: logger.info('predict service doesnt exist, about to deploy prediction service') package_obj = dl.packages.get('zazuml') deploy_predict_item(package=package_obj, model_id=self.model_obj.id, checkpoint_id=new_best_checkpoint_obj.id) logger.info('service deployed') logger.info('deployed prediction service') create_trigger() logger.info('created prediction trigger') else: logger.info('predict service exists, no reason to relaunch') else: logger.info('update predict service') predict_service = dl.services.get('predict') logger.info('service: ' + str(predict_service)) predict_service.input_params = {'model_id': self.model_obj.id, 'checkpoint_id': new_best_checkpoint_obj.id} predict_service.update() logger.info('service: ' + str(predict_service)) logger.info('switched with new checkpoint') self.compute.save_plot_metrics(save_path=graph_file_name) self.project.artifacts.upload(filepath=logs_file_name, package_name='zazuml', execution_id=execution_obj.id) self.project.artifacts.upload(filepath=graph_file_name, package_name='zazuml', execution_id=execution_obj.id) logger.info('waiting ' + str(time) + ' seconds for next execution . . . .') sleep(time)
AutoML
positive
def test_event_with_container_field(self): self.statsd._container_id = 'fake-container-id' self.statsd.event('Title', 'L1\nL2', priority='low', date_happened=1375296969) event2 = u'_e{5,6}:Title|L1\\nL2|d:1375296969|p:low|c:fake-container-id\n' <DeepExtract> if telemetry_metrics(metrics=0, events=1, bytes_sent=len(event2)) is None: telemetry_metrics(metrics=0, events=1, bytes_sent=len(event2)) = telemetry_metrics(bytes_sent=len(event2)) if event2: event2 = '\n'.join([event2, telemetry_metrics(metrics=0, events=1, bytes_sent=len(event2))]) else: event2 = telemetry_metrics(metrics=0, events=1, bytes_sent=len(event2)) return self.assertEqual(event2, self.recv(2)) </DeepExtract> self.statsd._reset_telemetry() self.statsd.event('Title', u'♬ â€\xa0øU â€\xa0øU ¥ºu T0µ ♪', aggregation_key='key', tags=['t1', 't2:v2']) event3 = u'_e{5,32}:Title|♬ â€\xa0øU â€\xa0øU ¥ºu T0µ ♪|k:key|#t1,t2:v2|c:fake-container-id\n' <DeepExtract> if telemetry_metrics(metrics=0, events=1, bytes_sent=len(event3)) is None: telemetry_metrics(metrics=0, events=1, bytes_sent=len(event3)) = telemetry_metrics(bytes_sent=len(event3)) if event3: event3 = '\n'.join([event3, telemetry_metrics(metrics=0, events=1, bytes_sent=len(event3))]) else: event3 = telemetry_metrics(metrics=0, events=1, bytes_sent=len(event3)) return self.assertEqual(event3, self.recv(2, reset_wait=True)) </DeepExtract> self.statsd._container_id = None
def test_event_with_container_field(self): self.statsd._container_id = 'fake-container-id' self.statsd.event('Title', 'L1\nL2', priority='low', date_happened=1375296969) event2 = u'_e{5,6}:Title|L1\\nL2|d:1375296969|p:low|c:fake-container-id\n' if telemetry_metrics(metrics=0, events=1, bytes_sent=len(event2)) is None: telemetry_metrics(metrics=0, events=1, bytes_sent=len(event2)) = telemetry_metrics(bytes_sent=len(event2)) if event2: event2 = '\n'.join([event2, telemetry_metrics(metrics=0, events=1, bytes_sent=len(event2))]) else: event2 = telemetry_metrics(metrics=0, events=1, bytes_sent=len(event2)) return self.assertEqual(event2, self.recv(2)) self.statsd._reset_telemetry() self.statsd.event('Title', u'♬ â€\xa0øU â€\xa0øU ¥ºu T0µ ♪', aggregation_key='key', tags=['t1', 't2:v2']) event3 = u'_e{5,32}:Title|♬ â€\xa0øU â€\xa0øU ¥ºu T0µ ♪|k:key|#t1,t2:v2|c:fake-container-id\n' if telemetry_metrics(metrics=0, events=1, bytes_sent=len(event3)) is None: telemetry_metrics(metrics=0, events=1, bytes_sent=len(event3)) = telemetry_metrics(bytes_sent=len(event3)) if event3: event3 = '\n'.join([event3, telemetry_metrics(metrics=0, events=1, bytes_sent=len(event3))]) else: event3 = telemetry_metrics(metrics=0, events=1, bytes_sent=len(event3)) return self.assertEqual(event3, self.recv(2, reset_wait=True)) self.statsd._container_id = None
datadogpy
positive
def indent_xml(node, level=0, indentstring=' '): text2indent = '\n' + level * indentstring if len(node): if not node.text or not node.text.strip(): node.text = text2indent + indentstring for subnode in node: <DeepExtract> text2indent = '\n' + level + 1 * indentstring if len(subnode): if not subnode.text or not subnode.text.strip(): subnode.text = text2indent + indentstring for subnode in subnode: indent_xml(subnode, level + 1 + 1) if not subnode.tail or not subnode.tail.strip(): subnode.tail = text2indent + indentstring if not subnode.tail or not subnode.tail.strip(): subnode.tail = text2indent elif level + 1 and (not subnode.tail or not subnode.tail.strip()): subnode.tail = text2indent </DeepExtract> if not subnode.tail or not subnode.tail.strip(): subnode.tail = text2indent + indentstring if not subnode.tail or not subnode.tail.strip(): subnode.tail = text2indent elif level and (not node.tail or not node.tail.strip()): node.tail = text2indent
def indent_xml(node, level=0, indentstring=' '): text2indent = '\n' + level * indentstring if len(node): if not node.text or not node.text.strip(): node.text = text2indent + indentstring for subnode in node: text2indent = '\n' + level + 1 * indentstring if len(subnode): if not subnode.text or not subnode.text.strip(): subnode.text = text2indent + indentstring for subnode in subnode: indent_xml(subnode, level + 1 + 1) if not subnode.tail or not subnode.tail.strip(): subnode.tail = text2indent + indentstring if not subnode.tail or not subnode.tail.strip(): subnode.tail = text2indent elif level + 1 and (not subnode.tail or not subnode.tail.strip()): subnode.tail = text2indent if not subnode.tail or not subnode.tail.strip(): subnode.tail = text2indent + indentstring if not subnode.tail or not subnode.tail.strip(): subnode.tail = text2indent elif level and (not node.tail or not node.tail.strip()): node.tail = text2indent
bots
positive
def _get_tally_report(plaintext_ballot: PlaintextTally, selection_names: dict[str, str], contest_names: dict[str, str], selection_write_ins: dict[str, bool], parties: dict[str, str]) -> list: tally_report = [] contests = plaintext_ballot.contests.values() for tally_contest in contests: selections = list(tally_contest.selections.values()) <DeepExtract> non_write_in_selections = [selection for selection in selections if not selection_write_ins[selection.object_id]] non_write_in_total = sum([selection.tally for selection in non_write_in_selections]) non_write_in_selections_report = _get_selections_report(non_write_in_selections, selection_names, parties, non_write_in_total) write_ins = [selection.tally for selection in selections if selection_write_ins[selection.object_id]] any_write_ins = len(write_ins) > 0 write_ins_total = sum(write_ins) if any_write_ins else None contest_details = {'selections': non_write_in_selections_report, 'nonWriteInTotal': non_write_in_total, 'writeInTotal': write_ins_total} </DeepExtract> contest_name = contest_names.get(tally_contest.object_id, 'n/a') tally_report.append({'name': contest_name, 'details': contest_details}) return tally_report
def _get_tally_report(plaintext_ballot: PlaintextTally, selection_names: dict[str, str], contest_names: dict[str, str], selection_write_ins: dict[str, bool], parties: dict[str, str]) -> list: tally_report = [] contests = plaintext_ballot.contests.values() for tally_contest in contests: selections = list(tally_contest.selections.values()) non_write_in_selections = [selection for selection in selections if not selection_write_ins[selection.object_id]] non_write_in_total = sum([selection.tally for selection in non_write_in_selections]) non_write_in_selections_report = _get_selections_report(non_write_in_selections, selection_names, parties, non_write_in_total) write_ins = [selection.tally for selection in selections if selection_write_ins[selection.object_id]] any_write_ins = len(write_ins) > 0 write_ins_total = sum(write_ins) if any_write_ins else None contest_details = {'selections': non_write_in_selections_report, 'nonWriteInTotal': non_write_in_total, 'writeInTotal': write_ins_total} contest_name = contest_names.get(tally_contest.object_id, 'n/a') tally_report.append({'name': contest_name, 'details': contest_details}) return tally_report
electionguard-python
positive
def testIntegrationFixedReplayREM(self): """Test the FixedReplayMultiHeadDQN agent.""" assert FLAGS.replay_dir is not None, 'Please provide a replay directory' tf.logging.info('####### Training the REM agent #####') tf.logging.info('####### REM base_dir: {}'.format(FLAGS.base_dir)) tf.logging.info('####### replay_dir: {}'.format(FLAGS.replay_dir)) <DeepExtract> FLAGS.gin_bindings = ["create_runner.schedule='continuous_train_and_eval'", 'FixedReplayRunner.training_steps=100', 'FixedReplayRunner.evaluation_steps=10', 'FixedReplayRunner.num_iterations=1', 'FixedReplayRunner.max_steps_per_episode=100'] FLAGS.alsologtostderr = True FLAGS.gin_files = ['batch_rl/fixed_replay/configs/rem.gin'] FLAGS.agent_name = 'multi_head_dqn' </DeepExtract> train.main([]) <DeepExtract> self.assertTrue(os.path.exists(os.path.join(self._checkpoint_dir, 'ckpt.0'))) self.assertTrue(os.path.exists(os.path.join(self._checkpoint_dir, 'checkpoint'))) self.assertTrue(os.path.exists(os.path.join(self._checkpoint_dir, 'sentinel_checkpoint_complete.0'))) self.assertTrue(os.path.exists(os.path.join(self._logging_dir, 'log_0'))) </DeepExtract> shutil.rmtree(FLAGS.base_dir)
def testIntegrationFixedReplayREM(self): """Test the FixedReplayMultiHeadDQN agent.""" assert FLAGS.replay_dir is not None, 'Please provide a replay directory' tf.logging.info('####### Training the REM agent #####') tf.logging.info('####### REM base_dir: {}'.format(FLAGS.base_dir)) tf.logging.info('####### replay_dir: {}'.format(FLAGS.replay_dir)) FLAGS.gin_bindings = ["create_runner.schedule='continuous_train_and_eval'", 'FixedReplayRunner.training_steps=100', 'FixedReplayRunner.evaluation_steps=10', 'FixedReplayRunner.num_iterations=1', 'FixedReplayRunner.max_steps_per_episode=100'] FLAGS.alsologtostderr = True FLAGS.gin_files = ['batch_rl/fixed_replay/configs/rem.gin'] FLAGS.agent_name = 'multi_head_dqn' train.main([]) self.assertTrue(os.path.exists(os.path.join(self._checkpoint_dir, 'ckpt.0'))) self.assertTrue(os.path.exists(os.path.join(self._checkpoint_dir, 'checkpoint'))) self.assertTrue(os.path.exists(os.path.join(self._checkpoint_dir, 'sentinel_checkpoint_complete.0'))) self.assertTrue(os.path.exists(os.path.join(self._logging_dir, 'log_0'))) shutil.rmtree(FLAGS.base_dir)
CQL
positive
def get_rendered(topics, user): <DeepExtract> request = self.request_factory.get('/') middleware = SessionMiddleware(lambda r: HttpResponse('Response')) middleware.process_request(request) request.session.save() request = request </DeepExtract> request.user = user ForumPermissionMiddleware(lambda r: HttpResponse('Response')).process_request(request) t = Template(self.loadstatement + '{% get_unread_topics topics request.user as unread_topics %}') c = Context({'topics': topics, 'request': request}) rendered = t.render(c) return (c, rendered)
def get_rendered(topics, user): request = self.request_factory.get('/') middleware = SessionMiddleware(lambda r: HttpResponse('Response')) middleware.process_request(request) request.session.save() request = request request.user = user ForumPermissionMiddleware(lambda r: HttpResponse('Response')).process_request(request) t = Template(self.loadstatement + '{% get_unread_topics topics request.user as unread_topics %}') c = Context({'topics': topics, 'request': request}) rendered = t.render(c) return (c, rendered)
django-machina
positive
def test_failure(self): validator1 = DummyValidator('msg1') validator2 = DummyValidator('msg2') <DeepExtract> from colander import Invalid exc = Invalid([validator1, validator2], msg, val) validator = exc </DeepExtract> <DeepExtract> from colander import Invalid try: validator(*arg, **kw) except Invalid as e: e = e else: raise AssertionError('Invalid not raised') </DeepExtract> self.assertEqual(e.msg, ['msg1', 'msg2'])
def test_failure(self): validator1 = DummyValidator('msg1') validator2 = DummyValidator('msg2') from colander import Invalid exc = Invalid([validator1, validator2], msg, val) validator = exc from colander import Invalid try: validator(*arg, **kw) except Invalid as e: e = e else: raise AssertionError('Invalid not raised') self.assertEqual(e.msg, ['msg1', 'msg2'])
colander
positive