before
stringlengths
0
955k
after
stringlengths
0
877k
repo
stringlengths
1
74
type
stringclasses
1 value
def _calc_maximun_ious(self): <DeepExtract> ious_for_each_gt = [] for (truth_box, truth_label) in zip(self._boxes2, true_labels): x1 = self._boxes1[:, 0] y1 = self._boxes1[:, 1] x2 = self._boxes1[:, 2] y2 = self._boxes1[:, 3] x1_gt = truth_box[0] y1_gt = truth_box[1] x2_gt = truth_box[2] y2_gt = truth_box[3] xx1 = np.maximum(x1, x1_gt) yy1 = np.maximum(y1, y1_gt) xx2 = np.minimum(x2, x2_gt) yy2 = np.minimum(y2, y2_gt) w = np.maximum(0, xx2 - xx1 + 1) h = np.maximum(0, yy2 - yy1 + 1) intersections = w * h As = (x2 - x1 + 1) * (y2 - y1 + 1) B = (x2_gt - x1_gt + 1) * (y2_gt - y1_gt + 1) label_score = (labels == truth_label).astype(np.float) ious = label_score * intersections.astype(float) / (As + B - intersections) ious_for_each_gt.append(ious) ious_for_each_gt = np.array(ious_for_each_gt) ious_for_each_gt = ious_for_each_gt.T </DeepExtract> ious = np.max(ious_for_each_gt, axis=0) return ious
def _calc_maximun_ious(self): ious_for_each_gt = [] for (truth_box, truth_label) in zip(self._boxes2, true_labels): x1 = self._boxes1[:, 0] y1 = self._boxes1[:, 1] x2 = self._boxes1[:, 2] y2 = self._boxes1[:, 3] x1_gt = truth_box[0] y1_gt = truth_box[1] x2_gt = truth_box[2] y2_gt = truth_box[3] xx1 = np.maximum(x1, x1_gt) yy1 = np.maximum(y1, y1_gt) xx2 = np.minimum(x2, x2_gt) yy2 = np.minimum(y2, y2_gt) w = np.maximum(0, xx2 - xx1 + 1) h = np.maximum(0, yy2 - yy1 + 1) intersections = w * h As = (x2 - x1 + 1) * (y2 - y1 + 1) B = (x2_gt - x1_gt + 1) * (y2_gt - y1_gt + 1) label_score = (labels == truth_label).astype(np.float) ious = label_score * intersections.astype(float) / (As + B - intersections) ious_for_each_gt.append(ious) ious_for_each_gt = np.array(ious_for_each_gt) ious_for_each_gt = ious_for_each_gt.T ious = np.max(ious_for_each_gt, axis=0) return ious
aXeleRate
positive
def run_riscv_test_code(self, filnm): riscv_machine.re_init() riscv_machine.base = 'hex' riscv_machine.flavor = 'riscv' <DeepExtract> with open(TEST_DIR_NAME + filnm, 'r') as prog: test_code = prog.read() </DeepExtract> assemble(test_code, riscv_machine)
def run_riscv_test_code(self, filnm): riscv_machine.re_init() riscv_machine.base = 'hex' riscv_machine.flavor = 'riscv' with open(TEST_DIR_NAME + filnm, 'r') as prog: test_code = prog.read() assemble(test_code, riscv_machine)
Emu86
positive
def get_current_arm_state(self): h_min = ARM_MIN_HEIGHT h_max = ARM_MAX_HEIGHT agent_base_location = 0.9009995460510254 event = self.controller.last_event offset = event.metadata['agent']['position']['y'] - agent_base_location h_max += offset h_min += offset joints = event.metadata['arm']['joints'] arm = joints[-1] assert arm['name'] == 'robot_arm_4_jnt' xyz_dict = copy.deepcopy(arm['rootRelativePosition']) height_arm = joints[0]['position']['y'] xyz_dict['h'] = (height_arm - h_min) / (h_max - h_min) <DeepExtract> corrected_dict = copy.deepcopy('realtive hand') for (k, v) in corrected_dict.items(): if math.isnan(v) or math.isinf(v): corrected_dict[k] = 0 xyz_dict = corrected_dict </DeepExtract> return xyz_dict
def get_current_arm_state(self): h_min = ARM_MIN_HEIGHT h_max = ARM_MAX_HEIGHT agent_base_location = 0.9009995460510254 event = self.controller.last_event offset = event.metadata['agent']['position']['y'] - agent_base_location h_max += offset h_min += offset joints = event.metadata['arm']['joints'] arm = joints[-1] assert arm['name'] == 'robot_arm_4_jnt' xyz_dict = copy.deepcopy(arm['rootRelativePosition']) height_arm = joints[0]['position']['y'] xyz_dict['h'] = (height_arm - h_min) / (h_max - h_min) corrected_dict = copy.deepcopy('realtive hand') for (k, v) in corrected_dict.items(): if math.isnan(v) or math.isinf(v): corrected_dict[k] = 0 xyz_dict = corrected_dict return xyz_dict
allenact
positive
def __init__(self, distort_limit=0.05, shift_limit=0.05, prob=0.5): super().__init__(prob) <DeepExtract> if isinstance(shift_limit, tuple): self.shift_limit = shift_limit else: self.shift_limit = (-shift_limit if low is None else low, shift_limit) </DeepExtract> <DeepExtract> if isinstance(distort_limit, tuple): self.distort_limit = distort_limit else: self.distort_limit = (-distort_limit if low is None else low, distort_limit) </DeepExtract> <DeepExtract> if isinstance(shift_limit, tuple): self.shift_limit = shift_limit else: self.shift_limit = (-shift_limit if low is None else low, shift_limit) </DeepExtract>
def __init__(self, distort_limit=0.05, shift_limit=0.05, prob=0.5): super().__init__(prob) if isinstance(shift_limit, tuple): self.shift_limit = shift_limit else: self.shift_limit = (-shift_limit if low is None else low, shift_limit) if isinstance(distort_limit, tuple): self.distort_limit = distort_limit else: self.distort_limit = (-distort_limit if low is None else low, distort_limit) if isinstance(shift_limit, tuple): self.shift_limit = shift_limit else: self.shift_limit = (-shift_limit if low is None else low, shift_limit) </DeepExtract>
dsb2018_topcoders
positive
def find_unmatched(self): <DeepExtract> db = self.get_db() cur = db.cursor() </DeepExtract> try: sql = 'select name, address from functions' cur.execute(sql) rows = cur.fetchall() if len(rows) > 0: choose = self.chooser('Unmatched in secondary', self, False) for row in rows: name = row['name'] if name not in self.matched1: ea = row[1] choose.add_item(CChooser.Item(ea, name)) self.unmatched_second = choose sql = 'select name, address from diff.functions' cur.execute(sql) rows = cur.fetchall() if len(rows) > 0: choose = self.chooser('Unmatched in primary', self, False) for row in rows: name = row['name'] if name not in self.matched2: ea = row['address'] choose.add_item(CChooser.Item(ea, name)) self.unmatched_primary = choose finally: cur.close()
def find_unmatched(self): db = self.get_db() cur = db.cursor() try: sql = 'select name, address from functions' cur.execute(sql) rows = cur.fetchall() if len(rows) > 0: choose = self.chooser('Unmatched in secondary', self, False) for row in rows: name = row['name'] if name not in self.matched1: ea = row[1] choose.add_item(CChooser.Item(ea, name)) self.unmatched_second = choose sql = 'select name, address from diff.functions' cur.execute(sql) rows = cur.fetchall() if len(rows) > 0: choose = self.chooser('Unmatched in primary', self, False) for row in rows: name = row['name'] if name not in self.matched2: ea = row['address'] choose.add_item(CChooser.Item(ea, name)) self.unmatched_primary = choose finally: cur.close()
diaphora
positive
def de_bruijn(k, n): """ De Bruijn sequence for alphabet k and subsequences of length n. """ try: _ = int(k) alphabet = list(map(str, range(k))) except (ValueError, TypeError): alphabet = k k = len(k) a = [0] * k * n sequence = [] def db(t, p): if t > n: if n % p == 0: sequence.extend(a[1:p + 1]) else: a[t] = a[t - p] <DeepExtract> if t + 1 > n: if n % p == 0: sequence.extend(a[1:p + 1]) else: a[t + 1] = a[t + 1 - p] db(t + 1 + 1, p) for j in range(a[t + 1 - p] + 1, k): a[t + 1] = j db(t + 1 + 1, t + 1) </DeepExtract> for j in range(a[t - p] + 1, k): a[t] = j <DeepExtract> if t + 1 > n: if n % t == 0: sequence.extend(a[1:t + 1]) else: a[t + 1] = a[t + 1 - t] db(t + 1 + 1, t) for j in range(a[t + 1 - t] + 1, k): a[t + 1] = j db(t + 1 + 1, t + 1) </DeepExtract> <DeepExtract> if 1 > n: if n % 1 == 0: sequence.extend(a[1:1 + 1]) else: a[1] = a[1 - 1] db(1 + 1, 1) for j in range(a[1 - 1] + 1, k): a[1] = j db(1 + 1, 1) </DeepExtract> return ''.join((alphabet[i] for i in sequence))
def de_bruijn(k, n): """ De Bruijn sequence for alphabet k and subsequences of length n. """ try: _ = int(k) alphabet = list(map(str, range(k))) except (ValueError, TypeError): alphabet = k k = len(k) a = [0] * k * n sequence = [] def db(t, p): if t > n: if n % p == 0: sequence.extend(a[1:p + 1]) else: a[t] = a[t - p] if t + 1 > n: if n % p == 0: sequence.extend(a[1:p + 1]) else: a[t + 1] = a[t + 1 - p] db(t + 1 + 1, p) for j in range(a[t + 1 - p] + 1, k): a[t + 1] = j db(t + 1 + 1, t + 1) for j in range(a[t - p] + 1, k): a[t] = j if t + 1 > n: if n % t == 0: sequence.extend(a[1:t + 1]) else: a[t + 1] = a[t + 1 - t] db(t + 1 + 1, t) for j in range(a[t + 1 - t] + 1, k): a[t + 1] = j db(t + 1 + 1, t + 1) if 1 > n: if n % 1 == 0: sequence.extend(a[1:1 + 1]) else: a[1] = a[1 - 1] db(1 + 1, 1) for j in range(a[1 - 1] + 1, k): a[1] = j db(1 + 1, 1) return ''.join((alphabet[i] for i in sequence))
Coursera-Bioinformatics
positive
def end_class(self, cls: ClassDefinition) -> None: if self.shape.expression is None: <DeepExtract> if not self.shape.expression: self.shape.expression = TripleConstraint(predicate=RDF.type, min=0, max=-1) elif not isinstance(self.shape.expression, EachOf): self.shape.expression = EachOf(expressions=[self.shape.expression, TripleConstraint(predicate=RDF.type, min=0, max=-1)]) else: self.shape.expression.expressions.append(TripleConstraint(predicate=RDF.type, min=0, max=-1)) </DeepExtract> <DeepExtract> if isinstance(cls, (TypeDefinition, ClassDefinition, EnumDefinition)): cls_or_type = cls else: cls_or_type = self.class_or_type_for(cls) self.shape.expression.id = self.namespaces.uri_for(self.namespaces.uri_or_curie_for(self.schema_defaults[cls_or_type.from_schema], camelcase(cls_or_type.name) + '_tes')) </DeepExtract> self.shape.expression = EachOf(expressions=[self.shape.expression, self._type_arc(cls.class_uri, not bool(self.class_identifier(cls)))]) self.shape.closed = not (cls.abstract or cls.mixin) if cls.name in self.synopsis.isarefs: childrenExprs = [] for child_classname in sorted(list(self.synopsis.isarefs[cls.name].classrefs)): childrenExprs.append(self._class_or_type_uri(child_classname)) if not (cls.mixin or cls.abstract) or len(childrenExprs) == 1: childrenExprs.insert(0, self.shape) self.shapes.append(ShapeOr(id=self._class_or_type_uri(cls), shapeExprs=childrenExprs)) else: self.shapes.append(ShapeOr(id=self._class_or_type_uri(cls), shapeExprs=childrenExprs)) <DeepExtract> if isinstance(cls, (TypeDefinition, ClassDefinition, EnumDefinition)): cls_or_type = cls else: cls_or_type = self.class_or_type_for(cls) self.shape.id = self.namespaces.uri_for(self.namespaces.uri_or_curie_for(self.schema_defaults[cls_or_type.from_schema], camelcase(cls_or_type.name) + '_struct')) </DeepExtract> self.shapes.append(self.shape) else: <DeepExtract> if isinstance(cls, (TypeDefinition, ClassDefinition, EnumDefinition)): cls_or_type = cls else: cls_or_type = self.class_or_type_for(cls) self.shape.id = self.namespaces.uri_for(self.namespaces.uri_or_curie_for(self.schema_defaults[cls_or_type.from_schema], camelcase(cls_or_type.name) + suffix)) </DeepExtract> self.shapes.append(self.shape)
def end_class(self, cls: ClassDefinition) -> None: if self.shape.expression is None: if not self.shape.expression: self.shape.expression = TripleConstraint(predicate=RDF.type, min=0, max=-1) elif not isinstance(self.shape.expression, EachOf): self.shape.expression = EachOf(expressions=[self.shape.expression, TripleConstraint(predicate=RDF.type, min=0, max=-1)]) else: self.shape.expression.expressions.append(TripleConstraint(predicate=RDF.type, min=0, max=-1)) if isinstance(cls, (TypeDefinition, ClassDefinition, EnumDefinition)): cls_or_type = cls else: cls_or_type = self.class_or_type_for(cls) self.shape.expression.id = self.namespaces.uri_for(self.namespaces.uri_or_curie_for(self.schema_defaults[cls_or_type.from_schema], camelcase(cls_or_type.name) + '_tes')) self.shape.expression = EachOf(expressions=[self.shape.expression, self._type_arc(cls.class_uri, not bool(self.class_identifier(cls)))]) self.shape.closed = not (cls.abstract or cls.mixin) if cls.name in self.synopsis.isarefs: childrenExprs = [] for child_classname in sorted(list(self.synopsis.isarefs[cls.name].classrefs)): childrenExprs.append(self._class_or_type_uri(child_classname)) if not (cls.mixin or cls.abstract) or len(childrenExprs) == 1: childrenExprs.insert(0, self.shape) self.shapes.append(ShapeOr(id=self._class_or_type_uri(cls), shapeExprs=childrenExprs)) else: self.shapes.append(ShapeOr(id=self._class_or_type_uri(cls), shapeExprs=childrenExprs)) if isinstance(cls, (TypeDefinition, ClassDefinition, EnumDefinition)): cls_or_type = cls else: cls_or_type = self.class_or_type_for(cls) self.shape.id = self.namespaces.uri_for(self.namespaces.uri_or_curie_for(self.schema_defaults[cls_or_type.from_schema], camelcase(cls_or_type.name) + '_struct')) self.shapes.append(self.shape) else: if isinstance(cls, (TypeDefinition, ClassDefinition, EnumDefinition)): cls_or_type = cls else: cls_or_type = self.class_or_type_for(cls) self.shape.id = self.namespaces.uri_for(self.namespaces.uri_or_curie_for(self.schema_defaults[cls_or_type.from_schema], camelcase(cls_or_type.name) + suffix)) self.shapes.append(self.shape)
biolinkml
positive
def test_decorator_steal(self): """ If the lock is already acquired, but is older than our limit then the decorator should steal it. """ @lock('x', wait=True, steal_after_ms=10) def do_something(): return True <DeepExtract> identifier_hash = hashlib.md5('x'.encode()).hexdigest() return DatastoreLock.objects.create(identifier_hash=identifier_hash, **kwargs) </DeepExtract> self.assertTrue(do_something())
def test_decorator_steal(self): """ If the lock is already acquired, but is older than our limit then the decorator should steal it. """ @lock('x', wait=True, steal_after_ms=10) def do_something(): return True identifier_hash = hashlib.md5('x'.encode()).hexdigest() return DatastoreLock.objects.create(identifier_hash=identifier_hash, **kwargs) self.assertTrue(do_something())
djangae
positive
def post(self, request, *args, **kwargs): if self.flight and self.source_advertisement: <DeepExtract> instance = self.source_advertisement.__copy__() instance.flight = self.flight instance.save() new_ad = instance </DeepExtract> messages.success(self.request, _('Successfully created %(ad)s from a copy.') % {'ad': new_ad}) return redirect(reverse('advertisement_update', kwargs={'advertiser_slug': self.advertiser.slug, 'flight_slug': self.flight.slug, 'advertisement_slug': new_ad.slug})) return redirect(reverse('flight_detail', kwargs={'advertiser_slug': self.advertiser.slug, 'flight_slug': self.flight.slug}))
def post(self, request, *args, **kwargs): if self.flight and self.source_advertisement: instance = self.source_advertisement.__copy__() instance.flight = self.flight instance.save() new_ad = instance messages.success(self.request, _('Successfully created %(ad)s from a copy.') % {'ad': new_ad}) return redirect(reverse('advertisement_update', kwargs={'advertiser_slug': self.advertiser.slug, 'flight_slug': self.flight.slug, 'advertisement_slug': new_ad.slug})) return redirect(reverse('flight_detail', kwargs={'advertiser_slug': self.advertiser.slug, 'flight_slug': self.flight.slug}))
ethical-ad-server
positive
def _read_n(length): """Read n bytes from the socket.""" if self._socket is None: raise TCPConnectionError(self._logprefix + 'not connected') buf = bytes() while length > 0: try: data = self._socket.recv(length) except socket.error as exception: <DeepExtract> raise TCPConnectionError('%s%s: %s' % (self._logprefix, 'failed to read data', exception)) </DeepExtract> if not data: raise TCPConnectionError(self._logprefix + 'connection closed') buf += data length -= len(data) return buf
def _read_n(length): """Read n bytes from the socket.""" if self._socket is None: raise TCPConnectionError(self._logprefix + 'not connected') buf = bytes() while length > 0: try: data = self._socket.recv(length) except socket.error as exception: raise TCPConnectionError('%s%s: %s' % (self._logprefix, 'failed to read data', exception)) if not data: raise TCPConnectionError(self._logprefix + 'connection closed') buf += data length -= len(data) return buf
carla-rl
positive
def test_convtbc(self): conv_tbc = ConvTBC(4, 5, kernel_size=3, padding=1) conv1d = nn.Conv1d(4, 5, kernel_size=3, padding=1) conv_tbc.weight.data.copy_(conv1d.weight.data.transpose(0, 2)) conv_tbc.bias.data.copy_(conv1d.bias.data) input_tbc = torch.randn(7, 2, 4, requires_grad=True) input1d = input_tbc.data.transpose(0, 1).transpose(1, 2) input1d.requires_grad = True output_tbc = conv_tbc(input_tbc) output1d = conv1d(input1d) <DeepExtract> self.assertEqual(output_tbc.data.transpose(0, 1).transpose(1, 2).size(), output1d.data.size(), 'size mismatch') self.assertLess((output_tbc.data.transpose(0, 1).transpose(1, 2) - output1d.data).abs().max(), 0.0001) </DeepExtract> grad_tbc = torch.randn(output_tbc.size()) grad1d = grad_tbc.transpose(0, 1).transpose(1, 2).contiguous() output_tbc.backward(grad_tbc) output1d.backward(grad1d) <DeepExtract> self.assertEqual(conv_tbc.weight.grad.data.transpose(0, 2).size(), conv1d.weight.grad.data.size(), 'size mismatch') self.assertLess((conv_tbc.weight.grad.data.transpose(0, 2) - conv1d.weight.grad.data).abs().max(), 0.0001) </DeepExtract> <DeepExtract> self.assertEqual(conv_tbc.bias.grad.data.size(), conv1d.bias.grad.data.size(), 'size mismatch') self.assertLess((conv_tbc.bias.grad.data - conv1d.bias.grad.data).abs().max(), 0.0001) </DeepExtract> <DeepExtract> self.assertEqual(input_tbc.grad.data.transpose(0, 1).transpose(1, 2).size(), input1d.grad.data.size(), 'size mismatch') self.assertLess((input_tbc.grad.data.transpose(0, 1).transpose(1, 2) - input1d.grad.data).abs().max(), 0.0001) </DeepExtract>
def test_convtbc(self): conv_tbc = ConvTBC(4, 5, kernel_size=3, padding=1) conv1d = nn.Conv1d(4, 5, kernel_size=3, padding=1) conv_tbc.weight.data.copy_(conv1d.weight.data.transpose(0, 2)) conv_tbc.bias.data.copy_(conv1d.bias.data) input_tbc = torch.randn(7, 2, 4, requires_grad=True) input1d = input_tbc.data.transpose(0, 1).transpose(1, 2) input1d.requires_grad = True output_tbc = conv_tbc(input_tbc) output1d = conv1d(input1d) self.assertEqual(output_tbc.data.transpose(0, 1).transpose(1, 2).size(), output1d.data.size(), 'size mismatch') self.assertLess((output_tbc.data.transpose(0, 1).transpose(1, 2) - output1d.data).abs().max(), 0.0001) grad_tbc = torch.randn(output_tbc.size()) grad1d = grad_tbc.transpose(0, 1).transpose(1, 2).contiguous() output_tbc.backward(grad_tbc) output1d.backward(grad1d) self.assertEqual(conv_tbc.weight.grad.data.transpose(0, 2).size(), conv1d.weight.grad.data.size(), 'size mismatch') self.assertLess((conv_tbc.weight.grad.data.transpose(0, 2) - conv1d.weight.grad.data).abs().max(), 0.0001) self.assertEqual(conv_tbc.bias.grad.data.size(), conv1d.bias.grad.data.size(), 'size mismatch') self.assertLess((conv_tbc.bias.grad.data - conv1d.bias.grad.data).abs().max(), 0.0001) self.assertEqual(input_tbc.grad.data.transpose(0, 1).transpose(1, 2).size(), input1d.grad.data.size(), 'size mismatch') self.assertLess((input_tbc.grad.data.transpose(0, 1).transpose(1, 2) - input1d.grad.data).abs().max(), 0.0001) </DeepExtract>
control-length
positive
def set_neighborlist(self, neighborlist): """Function to write neighborlist to ase atoms object. This function provides a uniform way in which to attach a neighbor list to an atoms object. Can be used in conjunction with the `get_neighborlist` function. Parameters ---------- self : class An ase atoms object to attach feature vector to. neighborlist : dict The neighbor list dict to attach. """ <DeepExtract> try: self.catlearn except AttributeError: self.catlearn = {} </DeepExtract> self.catlearn['neighborlist'] = neighborlist
def set_neighborlist(self, neighborlist): """Function to write neighborlist to ase atoms object. This function provides a uniform way in which to attach a neighbor list to an atoms object. Can be used in conjunction with the `get_neighborlist` function. Parameters ---------- self : class An ase atoms object to attach feature vector to. neighborlist : dict The neighbor list dict to attach. """ try: self.catlearn except AttributeError: self.catlearn = {} self.catlearn['neighborlist'] = neighborlist
CatLearn
positive
def _get_content_words_in_sentence(self, sentence): <DeepExtract> normalized_words = [self.normalize_word(w) for w in sentence.words] </DeepExtract> <DeepExtract> normalized_content_words = [w for w in normalized_words if w not in self.stop_words] </DeepExtract> return normalized_content_words
def _get_content_words_in_sentence(self, sentence): normalized_words = [self.normalize_word(w) for w in sentence.words] normalized_content_words = [w for w in normalized_words if w not in self.stop_words] return normalized_content_words
acl2017-interactive_summarizer
positive
def f1_score(tp: int, fp: int, fn: int, tn: int) -> float: <DeepExtract> p = tp / (tp + fp) </DeepExtract> <DeepExtract> r = tp / (tp + fn) </DeepExtract> return 2 * p * r / (p + r)
def f1_score(tp: int, fp: int, fn: int, tn: int) -> float: p = tp / (tp + fp) r = tp / (tp + fn) return 2 * p * r / (p + r)
data-science-from-scratch
positive
def main(_): if FLAGS.do_train: if os.path.exists(FLAGS.output_dir): try: os.removedirs(FLAGS.output_dir) os.makedirs(FLAGS.output_dir) except: tf.logging.info('***** Running evaluation *****') tf.logging.warning(FLAGS.output_dir + ' is not empty, here use shutil.rmtree(FLAGS.output_dir)!') shutil.rmtree(FLAGS.output_dir) os.makedirs(FLAGS.output_dir) else: os.makedirs(FLAGS.output_dir) tf.logging.set_verbosity(tf.logging.INFO) processors = {'atis': Atis_Slot_Filling_and_Intent_Detection_Processor, 'snips': Snips_Slot_Filling_and_Intent_Detection_Processor} tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, FLAGS.init_checkpoint) if not FLAGS.do_train and (not FLAGS.do_eval) and (not FLAGS.do_predict): raise ValueError("At least one of `do_train`, `do_eval` or `do_predict' must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) if FLAGS.max_seq_length > bert_config.max_position_embeddings: raise ValueError('Cannot use sequence length %d because the BERT model was only trained up to sequence length %d' % (FLAGS.max_seq_length, bert_config.max_position_embeddings)) tf.gfile.MakeDirs(FLAGS.output_dir) task_name = FLAGS.task_name.lower() if task_name not in processors: raise ValueError('Task not found: %s' % task_name) processor = processors[task_name]() intent_label_list = processor.get_intent_labels() slot_label_list = processor.get_slot_labels() intent_id2label = {} for (i, label) in enumerate(intent_label_list): intent_id2label[i] = label slot_id2label = {} for (i, label) in enumerate(slot_label_list): slot_id2label[i] = label tokenizer = tokenization.FullTokenizer(vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig(cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) train_examples = None num_train_steps = None num_warmup_steps = None if FLAGS.do_train: train_examples = processor.get_train_examples(FLAGS.data_dir) num_train_steps = int(len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs) num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion) <DeepExtract> def model_fn(features, labels, mode, params): """The `model_fn` for TPUEstimator.""" tf.logging.info('*** Features ***') for name in sorted(features.keys()): tf.logging.info(' name = %s, shape = %s' % (name, features[name].shape)) input_ids = features['input_ids'] slot_label_ids = features['slot_ids'] input_mask = features['input_mask'] segment_ids = features['segment_ids'] intent_label_ids = features['label_ids'] is_real_example = None if 'is_real_example' in features: is_real_example = tf.cast(features['is_real_example'], dtype=tf.float32) else: is_real_example = tf.ones(tf.shape(intent_label_ids), dtype=tf.float32) is_training = mode == tf.estimator.ModeKeys.TRAIN (total_loss, intent_loss, intent_per_example_loss, intent_logits, intent_predictions, slot_loss, slot_per_example_loss, slot_logits, slot_predictions) = create_model(bert_config, is_training, input_ids, input_mask, segment_ids, slot_label_ids, intent_label_ids, len(slot_label_list), len(intent_label_list), FLAGS.use_tpu) tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if FLAGS.init_checkpoint: (assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, FLAGS.init_checkpoint) if FLAGS.use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(FLAGS.init_checkpoint, assignment_map) model_fn = tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(FLAGS.init_checkpoint, assignment_map) tf.logging.info('**** Trainable Variables ****') for var in tvars: init_string = '' if var.name in initialized_variable_names: init_string = ', *INIT_FROM_CKPT*' tf.logging.info(' name = %s, shape = %s%s', var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer(total_loss, FLAGS.learning_rate, num_train_steps, num_warmup_steps, FLAGS.use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(intent_per_example_loss, intent_label_ids, intent_logits, slot_per_example_loss, slot_label_ids, slot_logits, is_real_example): intent_predictions = tf.argmax(intent_logits, axis=-1, output_type=tf.int32) intent_accuracy = tf.metrics.accuracy(labels=intent_label_ids, predictions=intent_predictions, weights=is_real_example) intent_loss = tf.metrics.mean(values=intent_per_example_loss, weights=is_real_example) slot_predictions = tf.argmax(slot_logits, axis=-1, output_type=tf.int32) slot_pos_indices_list = list(range(len(slot_label_list)))[4:] pos_indices_list = slot_pos_indices_list[:-1] slot_precision_macro = tf_metrics.precision(slot_label_ids, slot_predictions, len(slot_label_list), slot_pos_indices_list, average='macro') slot_recall_macro = tf_metrics.recall(slot_label_ids, slot_predictions, len(slot_label_list), slot_pos_indices_list, average='macro') slot_f_macro = tf_metrics.f1(slot_label_ids, slot_predictions, len(slot_label_list), slot_pos_indices_list, average='macro') slot_precision_micro = tf_metrics.precision(slot_label_ids, slot_predictions, len(slot_label_list), slot_pos_indices_list, average='micro') slot_recall_micro = tf_metrics.recall(slot_label_ids, slot_predictions, len(slot_label_list), slot_pos_indices_list, average='micro') slot_f_micro = tf_metrics.f1(slot_label_ids, slot_predictions, len(slot_label_list), slot_pos_indices_list, average='micro') slot_loss = tf.metrics.mean(values=slot_per_example_loss, weights=is_real_example) model_fn = {'eval_intent_accuracy': intent_accuracy, 'eval_intent_loss': intent_loss, 'eval_slot_precision(macro)': slot_precision_macro, 'eval_slot_recall(macro)': slot_recall_macro, 'eval_slot_f(macro)': slot_f_macro, 'eval_slot_precision(micro)': slot_precision_micro, 'eval_slot_recall(micro)': slot_recall_micro, 'eval_slot_f(micro)': slot_f_micro, 'eval_slot_loss': slot_loss} eval_metrics = (metric_fn, [intent_per_example_loss, intent_label_ids, intent_logits, slot_per_example_loss, slot_label_ids, slot_logits, is_real_example]) output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, predictions={'intent_predictions': intent_predictions, 'slot_predictions': slot_predictions}, scaffold_fn=scaffold_fn) model_fn = output_spec model_fn = model_fn </DeepExtract> estimator = tf.contrib.tpu.TPUEstimator(use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, predict_batch_size=FLAGS.predict_batch_size) if FLAGS.do_train: train_file = os.path.join(FLAGS.output_dir, 'train.tf_record') <DeepExtract> writer = tf.python_io.TFRecordWriter(train_file) for (ex_index, example) in enumerate(train_examples): if ex_index % 10000 == 0: tf.logging.info('Writing example %d of %d' % (ex_index, len(train_examples))) feature = convert_single_example(ex_index, example, slot_label_list, intent_label_list, FLAGS.max_seq_length, tokenizer) def create_int_feature(values): f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return f features = collections.OrderedDict() features['input_ids'] = create_int_feature(feature.input_ids) features['slot_ids'] = create_int_feature(feature.slot_ids) features['input_mask'] = create_int_feature(feature.input_mask) features['segment_ids'] = create_int_feature(feature.segment_ids) features['label_ids'] = create_int_feature([feature.label_id]) features['is_real_example'] = create_int_feature([int(feature.is_real_example)]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) writer.close() </DeepExtract> tf.logging.info('***** Running training *****') tf.logging.info(' Num examples = %d', len(train_examples)) tf.logging.info(' Batch size = %d', FLAGS.train_batch_size) tf.logging.info(' Num steps = %d', num_train_steps) <DeepExtract> name_to_features = {'input_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'slot_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'input_mask': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'segment_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'label_ids': tf.FixedLenFeature([], tf.int64), 'is_real_example': tf.FixedLenFeature([], tf.int64)} def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t train_input_fn = example def input_fn(params): """The actual input function.""" batch_size = params['batch_size'] d = tf.data.TFRecordDataset(train_file) if True: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.apply(tf.contrib.data.map_and_batch(lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=True)) train_input_fn = d train_input_fn = input_fn </DeepExtract> estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) if FLAGS.do_eval: eval_examples = processor.get_dev_examples(FLAGS.data_dir) num_actual_eval_examples = len(eval_examples) if FLAGS.use_tpu: while len(eval_examples) % FLAGS.eval_batch_size != 0: eval_examples.append(PaddingInputExample()) eval_file = os.path.join(FLAGS.output_dir, 'eval.tf_record') <DeepExtract> writer = tf.python_io.TFRecordWriter(eval_file) for (ex_index, example) in enumerate(eval_examples): if ex_index % 10000 == 0: tf.logging.info('Writing example %d of %d' % (ex_index, len(eval_examples))) feature = convert_single_example(ex_index, example, slot_label_list, intent_label_list, FLAGS.max_seq_length, tokenizer) def create_int_feature(values): f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return f features = collections.OrderedDict() features['input_ids'] = create_int_feature(feature.input_ids) features['slot_ids'] = create_int_feature(feature.slot_ids) features['input_mask'] = create_int_feature(feature.input_mask) features['segment_ids'] = create_int_feature(feature.segment_ids) features['label_ids'] = create_int_feature([feature.label_id]) features['is_real_example'] = create_int_feature([int(feature.is_real_example)]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) writer.close() </DeepExtract> tf.logging.info('***** Running evaluation *****') tf.logging.info(' Num examples = %d (%d actual, %d padding)', len(eval_examples), num_actual_eval_examples, len(eval_examples) - num_actual_eval_examples) tf.logging.info(' Batch size = %d', FLAGS.eval_batch_size) eval_steps = None if FLAGS.use_tpu: assert len(eval_examples) % FLAGS.eval_batch_size == 0 eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size) eval_drop_remainder = True if FLAGS.use_tpu else False <DeepExtract> name_to_features = {'input_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'slot_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'input_mask': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'segment_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'label_ids': tf.FixedLenFeature([], tf.int64), 'is_real_example': tf.FixedLenFeature([], tf.int64)} def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t eval_input_fn = example def input_fn(params): """The actual input function.""" batch_size = params['batch_size'] d = tf.data.TFRecordDataset(eval_file) if False: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.apply(tf.contrib.data.map_and_batch(lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=eval_drop_remainder)) eval_input_fn = d eval_input_fn = input_fn </DeepExtract> result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, 'eval_results.txt') with tf.gfile.GFile(output_eval_file, 'w') as writer: tf.logging.info('***** Eval results *****') for key in sorted(result.keys()): tf.logging.info(' %s = %s', key, str(result[key])) writer.write('%s = %s\n' % (key, str(result[key]))) if FLAGS.do_predict: predict_examples = processor.get_test_examples(FLAGS.data_dir) num_actual_predict_examples = len(predict_examples) if FLAGS.use_tpu: while len(predict_examples) % FLAGS.predict_batch_size != 0: predict_examples.append(PaddingInputExample()) predict_file = os.path.join(FLAGS.output_dir, 'predict.tf_record') <DeepExtract> writer = tf.python_io.TFRecordWriter(predict_file) for (ex_index, example) in enumerate(predict_examples): if ex_index % 10000 == 0: tf.logging.info('Writing example %d of %d' % (ex_index, len(predict_examples))) feature = convert_single_example(ex_index, example, slot_label_list, intent_label_list, FLAGS.max_seq_length, tokenizer) def create_int_feature(values): f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return f features = collections.OrderedDict() features['input_ids'] = create_int_feature(feature.input_ids) features['slot_ids'] = create_int_feature(feature.slot_ids) features['input_mask'] = create_int_feature(feature.input_mask) features['segment_ids'] = create_int_feature(feature.segment_ids) features['label_ids'] = create_int_feature([feature.label_id]) features['is_real_example'] = create_int_feature([int(feature.is_real_example)]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) writer.close() </DeepExtract> tf.logging.info('***** Running prediction*****') tf.logging.info(' Num examples = %d (%d actual, %d padding)', len(predict_examples), num_actual_predict_examples, len(predict_examples) - num_actual_predict_examples) tf.logging.info(' Batch size = %d', FLAGS.predict_batch_size) predict_drop_remainder = True if FLAGS.use_tpu else False <DeepExtract> name_to_features = {'input_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'slot_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'input_mask': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'segment_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'label_ids': tf.FixedLenFeature([], tf.int64), 'is_real_example': tf.FixedLenFeature([], tf.int64)} def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t predict_input_fn = example def input_fn(params): """The actual input function.""" batch_size = params['batch_size'] d = tf.data.TFRecordDataset(predict_file) if False: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.apply(tf.contrib.data.map_and_batch(lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=predict_drop_remainder)) predict_input_fn = d predict_input_fn = input_fn </DeepExtract> result = estimator.predict(input_fn=predict_input_fn) intent_output_predict_file = os.path.join(FLAGS.output_dir, 'intent_prediction_test_results.txt') slot_output_predict_file = os.path.join(FLAGS.output_dir, 'slot_filling_test_results.txt') with tf.gfile.GFile(intent_output_predict_file, 'w') as intent_writer: with tf.gfile.GFile(slot_output_predict_file, 'w') as slot_writer: num_written_lines = 0 tf.logging.info('***** Intent Predict and Slot Filling results *****') for (i, prediction) in enumerate(result): intent_prediction = prediction['intent_predictions'] slot_predictions = prediction['slot_predictions'] if i >= num_actual_predict_examples: break intent_output_line = str(intent_id2label[intent_prediction]) + '\n' intent_writer.write(intent_output_line) slot_output_line = ' '.join((slot_id2label[id] for id in slot_predictions if id != 0)) + '\n' slot_writer.write(slot_output_line) num_written_lines += 1 assert num_written_lines == num_actual_predict_examples if FLAGS.calculate_model_score: path_to_label_file = os.path.join(FLAGS.data_dir, 'test') path_to_predict_label_file = FLAGS.output_dir log_out_file = path_to_predict_label_file if FLAGS.task_name.lower() == 'snips': intent_slot_reports = tf_metrics.Snips_Slot_Filling_and_Intent_Detection_Calculate(path_to_label_file, path_to_predict_label_file, log_out_file) elif FLAGS.task_name.lower() == 'atis': intent_slot_reports = tf_metrics.Atis_Slot_Filling_and_Intent_Detection_Calculate(path_to_label_file, path_to_predict_label_file, log_out_file) else: raise ValueError('Not this calculate_model_score') intent_slot_reports.show_intent_prediction_report(store_report=True) intent_slot_reports.show_slot_filling_report(store_report=True)
def main(_): if FLAGS.do_train: if os.path.exists(FLAGS.output_dir): try: os.removedirs(FLAGS.output_dir) os.makedirs(FLAGS.output_dir) except: tf.logging.info('***** Running evaluation *****') tf.logging.warning(FLAGS.output_dir + ' is not empty, here use shutil.rmtree(FLAGS.output_dir)!') shutil.rmtree(FLAGS.output_dir) os.makedirs(FLAGS.output_dir) else: os.makedirs(FLAGS.output_dir) tf.logging.set_verbosity(tf.logging.INFO) processors = {'atis': Atis_Slot_Filling_and_Intent_Detection_Processor, 'snips': Snips_Slot_Filling_and_Intent_Detection_Processor} tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, FLAGS.init_checkpoint) if not FLAGS.do_train and (not FLAGS.do_eval) and (not FLAGS.do_predict): raise ValueError("At least one of `do_train`, `do_eval` or `do_predict' must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) if FLAGS.max_seq_length > bert_config.max_position_embeddings: raise ValueError('Cannot use sequence length %d because the BERT model was only trained up to sequence length %d' % (FLAGS.max_seq_length, bert_config.max_position_embeddings)) tf.gfile.MakeDirs(FLAGS.output_dir) task_name = FLAGS.task_name.lower() if task_name not in processors: raise ValueError('Task not found: %s' % task_name) processor = processors[task_name]() intent_label_list = processor.get_intent_labels() slot_label_list = processor.get_slot_labels() intent_id2label = {} for (i, label) in enumerate(intent_label_list): intent_id2label[i] = label slot_id2label = {} for (i, label) in enumerate(slot_label_list): slot_id2label[i] = label tokenizer = tokenization.FullTokenizer(vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig(cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) train_examples = None num_train_steps = None num_warmup_steps = None if FLAGS.do_train: train_examples = processor.get_train_examples(FLAGS.data_dir) num_train_steps = int(len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs) num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion) def model_fn(features, labels, mode, params): """The `model_fn` for TPUEstimator.""" tf.logging.info('*** Features ***') for name in sorted(features.keys()): tf.logging.info(' name = %s, shape = %s' % (name, features[name].shape)) input_ids = features['input_ids'] slot_label_ids = features['slot_ids'] input_mask = features['input_mask'] segment_ids = features['segment_ids'] intent_label_ids = features['label_ids'] is_real_example = None if 'is_real_example' in features: is_real_example = tf.cast(features['is_real_example'], dtype=tf.float32) else: is_real_example = tf.ones(tf.shape(intent_label_ids), dtype=tf.float32) is_training = mode == tf.estimator.ModeKeys.TRAIN (total_loss, intent_loss, intent_per_example_loss, intent_logits, intent_predictions, slot_loss, slot_per_example_loss, slot_logits, slot_predictions) = create_model(bert_config, is_training, input_ids, input_mask, segment_ids, slot_label_ids, intent_label_ids, len(slot_label_list), len(intent_label_list), FLAGS.use_tpu) tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if FLAGS.init_checkpoint: (assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, FLAGS.init_checkpoint) if FLAGS.use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(FLAGS.init_checkpoint, assignment_map) model_fn = tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(FLAGS.init_checkpoint, assignment_map) tf.logging.info('**** Trainable Variables ****') for var in tvars: init_string = '' if var.name in initialized_variable_names: init_string = ', *INIT_FROM_CKPT*' tf.logging.info(' name = %s, shape = %s%s', var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer(total_loss, FLAGS.learning_rate, num_train_steps, num_warmup_steps, FLAGS.use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(intent_per_example_loss, intent_label_ids, intent_logits, slot_per_example_loss, slot_label_ids, slot_logits, is_real_example): intent_predictions = tf.argmax(intent_logits, axis=-1, output_type=tf.int32) intent_accuracy = tf.metrics.accuracy(labels=intent_label_ids, predictions=intent_predictions, weights=is_real_example) intent_loss = tf.metrics.mean(values=intent_per_example_loss, weights=is_real_example) slot_predictions = tf.argmax(slot_logits, axis=-1, output_type=tf.int32) slot_pos_indices_list = list(range(len(slot_label_list)))[4:] pos_indices_list = slot_pos_indices_list[:-1] slot_precision_macro = tf_metrics.precision(slot_label_ids, slot_predictions, len(slot_label_list), slot_pos_indices_list, average='macro') slot_recall_macro = tf_metrics.recall(slot_label_ids, slot_predictions, len(slot_label_list), slot_pos_indices_list, average='macro') slot_f_macro = tf_metrics.f1(slot_label_ids, slot_predictions, len(slot_label_list), slot_pos_indices_list, average='macro') slot_precision_micro = tf_metrics.precision(slot_label_ids, slot_predictions, len(slot_label_list), slot_pos_indices_list, average='micro') slot_recall_micro = tf_metrics.recall(slot_label_ids, slot_predictions, len(slot_label_list), slot_pos_indices_list, average='micro') slot_f_micro = tf_metrics.f1(slot_label_ids, slot_predictions, len(slot_label_list), slot_pos_indices_list, average='micro') slot_loss = tf.metrics.mean(values=slot_per_example_loss, weights=is_real_example) model_fn = {'eval_intent_accuracy': intent_accuracy, 'eval_intent_loss': intent_loss, 'eval_slot_precision(macro)': slot_precision_macro, 'eval_slot_recall(macro)': slot_recall_macro, 'eval_slot_f(macro)': slot_f_macro, 'eval_slot_precision(micro)': slot_precision_micro, 'eval_slot_recall(micro)': slot_recall_micro, 'eval_slot_f(micro)': slot_f_micro, 'eval_slot_loss': slot_loss} eval_metrics = (metric_fn, [intent_per_example_loss, intent_label_ids, intent_logits, slot_per_example_loss, slot_label_ids, slot_logits, is_real_example]) output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, predictions={'intent_predictions': intent_predictions, 'slot_predictions': slot_predictions}, scaffold_fn=scaffold_fn) model_fn = output_spec model_fn = model_fn estimator = tf.contrib.tpu.TPUEstimator(use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, predict_batch_size=FLAGS.predict_batch_size) if FLAGS.do_train: train_file = os.path.join(FLAGS.output_dir, 'train.tf_record') writer = tf.python_io.TFRecordWriter(train_file) for (ex_index, example) in enumerate(train_examples): if ex_index % 10000 == 0: tf.logging.info('Writing example %d of %d' % (ex_index, len(train_examples))) feature = convert_single_example(ex_index, example, slot_label_list, intent_label_list, FLAGS.max_seq_length, tokenizer) def create_int_feature(values): f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return f features = collections.OrderedDict() features['input_ids'] = create_int_feature(feature.input_ids) features['slot_ids'] = create_int_feature(feature.slot_ids) features['input_mask'] = create_int_feature(feature.input_mask) features['segment_ids'] = create_int_feature(feature.segment_ids) features['label_ids'] = create_int_feature([feature.label_id]) features['is_real_example'] = create_int_feature([int(feature.is_real_example)]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) writer.close() tf.logging.info('***** Running training *****') tf.logging.info(' Num examples = %d', len(train_examples)) tf.logging.info(' Batch size = %d', FLAGS.train_batch_size) tf.logging.info(' Num steps = %d', num_train_steps) name_to_features = {'input_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'slot_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'input_mask': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'segment_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'label_ids': tf.FixedLenFeature([], tf.int64), 'is_real_example': tf.FixedLenFeature([], tf.int64)} def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t train_input_fn = example def input_fn(params): """The actual input function.""" batch_size = params['batch_size'] d = tf.data.TFRecordDataset(train_file) if True: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.apply(tf.contrib.data.map_and_batch(lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=True)) train_input_fn = d train_input_fn = input_fn estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) if FLAGS.do_eval: eval_examples = processor.get_dev_examples(FLAGS.data_dir) num_actual_eval_examples = len(eval_examples) if FLAGS.use_tpu: while len(eval_examples) % FLAGS.eval_batch_size != 0: eval_examples.append(PaddingInputExample()) eval_file = os.path.join(FLAGS.output_dir, 'eval.tf_record') writer = tf.python_io.TFRecordWriter(eval_file) for (ex_index, example) in enumerate(eval_examples): if ex_index % 10000 == 0: tf.logging.info('Writing example %d of %d' % (ex_index, len(eval_examples))) feature = convert_single_example(ex_index, example, slot_label_list, intent_label_list, FLAGS.max_seq_length, tokenizer) def create_int_feature(values): f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return f features = collections.OrderedDict() features['input_ids'] = create_int_feature(feature.input_ids) features['slot_ids'] = create_int_feature(feature.slot_ids) features['input_mask'] = create_int_feature(feature.input_mask) features['segment_ids'] = create_int_feature(feature.segment_ids) features['label_ids'] = create_int_feature([feature.label_id]) features['is_real_example'] = create_int_feature([int(feature.is_real_example)]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) writer.close() tf.logging.info('***** Running evaluation *****') tf.logging.info(' Num examples = %d (%d actual, %d padding)', len(eval_examples), num_actual_eval_examples, len(eval_examples) - num_actual_eval_examples) tf.logging.info(' Batch size = %d', FLAGS.eval_batch_size) eval_steps = None if FLAGS.use_tpu: assert len(eval_examples) % FLAGS.eval_batch_size == 0 eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size) eval_drop_remainder = True if FLAGS.use_tpu else False name_to_features = {'input_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'slot_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'input_mask': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'segment_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'label_ids': tf.FixedLenFeature([], tf.int64), 'is_real_example': tf.FixedLenFeature([], tf.int64)} def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t eval_input_fn = example def input_fn(params): """The actual input function.""" batch_size = params['batch_size'] d = tf.data.TFRecordDataset(eval_file) if False: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.apply(tf.contrib.data.map_and_batch(lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=eval_drop_remainder)) eval_input_fn = d eval_input_fn = input_fn result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, 'eval_results.txt') with tf.gfile.GFile(output_eval_file, 'w') as writer: tf.logging.info('***** Eval results *****') for key in sorted(result.keys()): tf.logging.info(' %s = %s', key, str(result[key])) writer.write('%s = %s\n' % (key, str(result[key]))) if FLAGS.do_predict: predict_examples = processor.get_test_examples(FLAGS.data_dir) num_actual_predict_examples = len(predict_examples) if FLAGS.use_tpu: while len(predict_examples) % FLAGS.predict_batch_size != 0: predict_examples.append(PaddingInputExample()) predict_file = os.path.join(FLAGS.output_dir, 'predict.tf_record') writer = tf.python_io.TFRecordWriter(predict_file) for (ex_index, example) in enumerate(predict_examples): if ex_index % 10000 == 0: tf.logging.info('Writing example %d of %d' % (ex_index, len(predict_examples))) feature = convert_single_example(ex_index, example, slot_label_list, intent_label_list, FLAGS.max_seq_length, tokenizer) def create_int_feature(values): f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return f features = collections.OrderedDict() features['input_ids'] = create_int_feature(feature.input_ids) features['slot_ids'] = create_int_feature(feature.slot_ids) features['input_mask'] = create_int_feature(feature.input_mask) features['segment_ids'] = create_int_feature(feature.segment_ids) features['label_ids'] = create_int_feature([feature.label_id]) features['is_real_example'] = create_int_feature([int(feature.is_real_example)]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) writer.close() tf.logging.info('***** Running prediction*****') tf.logging.info(' Num examples = %d (%d actual, %d padding)', len(predict_examples), num_actual_predict_examples, len(predict_examples) - num_actual_predict_examples) tf.logging.info(' Batch size = %d', FLAGS.predict_batch_size) predict_drop_remainder = True if FLAGS.use_tpu else False name_to_features = {'input_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'slot_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'input_mask': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'segment_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'label_ids': tf.FixedLenFeature([], tf.int64), 'is_real_example': tf.FixedLenFeature([], tf.int64)} def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t predict_input_fn = example def input_fn(params): """The actual input function.""" batch_size = params['batch_size'] d = tf.data.TFRecordDataset(predict_file) if False: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.apply(tf.contrib.data.map_and_batch(lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=predict_drop_remainder)) predict_input_fn = d predict_input_fn = input_fn result = estimator.predict(input_fn=predict_input_fn) intent_output_predict_file = os.path.join(FLAGS.output_dir, 'intent_prediction_test_results.txt') slot_output_predict_file = os.path.join(FLAGS.output_dir, 'slot_filling_test_results.txt') with tf.gfile.GFile(intent_output_predict_file, 'w') as intent_writer: with tf.gfile.GFile(slot_output_predict_file, 'w') as slot_writer: num_written_lines = 0 tf.logging.info('***** Intent Predict and Slot Filling results *****') for (i, prediction) in enumerate(result): intent_prediction = prediction['intent_predictions'] slot_predictions = prediction['slot_predictions'] if i >= num_actual_predict_examples: break intent_output_line = str(intent_id2label[intent_prediction]) + '\n' intent_writer.write(intent_output_line) slot_output_line = ' '.join((slot_id2label[id] for id in slot_predictions if id != 0)) + '\n' slot_writer.write(slot_output_line) num_written_lines += 1 assert num_written_lines == num_actual_predict_examples if FLAGS.calculate_model_score: path_to_label_file = os.path.join(FLAGS.data_dir, 'test') path_to_predict_label_file = FLAGS.output_dir log_out_file = path_to_predict_label_file if FLAGS.task_name.lower() == 'snips': intent_slot_reports = tf_metrics.Snips_Slot_Filling_and_Intent_Detection_Calculate(path_to_label_file, path_to_predict_label_file, log_out_file) elif FLAGS.task_name.lower() == 'atis': intent_slot_reports = tf_metrics.Atis_Slot_Filling_and_Intent_Detection_Calculate(path_to_label_file, path_to_predict_label_file, log_out_file) else: raise ValueError('Not this calculate_model_score') intent_slot_reports.show_intent_prediction_report(store_report=True) intent_slot_reports.show_slot_filling_report(store_report=True)
BERT-for-Sequence-Labeling-and-Text-Classification
positive
def visit_Return(self, node): <DeepExtract> self.new_lines = max(self.new_lines, n) </DeepExtract> <DeepExtract> if self.new_lines: if self.result: self.result.append('\n' * self.new_lines) self.result.append(self.indent_with * self.indentation) self.new_lines = 0 self.result.append('return ') </DeepExtract> <DeepExtract> f = self.get_visitor(node.value) if f is not None: return f(node.value) return self.generic_visit(node.value) </DeepExtract>
def visit_Return(self, node): self.new_lines = max(self.new_lines, n) if self.new_lines: if self.result: self.result.append('\n' * self.new_lines) self.result.append(self.indent_with * self.indentation) self.new_lines = 0 self.result.append('return ') f = self.get_visitor(node.value) if f is not None: return f(node.value) return self.generic_visit(node.value) </DeepExtract>
atsf4g-co
positive
def forward(self, x, incremental_state=None, query=None, unfold=None): """Assuming the input, x, of the shape T x B x C and producing an output in the shape T x B x C args: x: Input of shape T x B x C, i.e. (timesteps, batch_size, input_size) incremental_state: A dict to keep the state unfold: unfold the input or not. If not, we use the matrix trick instead query: use the specified query to predict the conv filters """ unfold = x.size(0) > 512 if unfold is None else unfold unfold = unfold or incremental_state is not None assert query is None or not self.in_proj if query is None: query = x if unfold: <DeepExtract> (T, B, C) = x.size() (K, H) = (self.kernel_size, self.num_heads) R = C // H assert R * H == C == self.input_size if self.in_proj: proj = self.weight_linear(x) x = proj.narrow(2, 0, self.input_size).contiguous() weight = proj.narrow(2, self.input_size, H * K).contiguous().view(T * B * H, -1) else: weight = self.weight_linear(query).view(T * B * H, -1) assert not self.renorm_padding or incremental_state is not None if incremental_state is not None: input_buffer = self._get_input_buffer(incremental_state) if input_buffer is None: input_buffer = x.new() x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3) if self.kernel_size > 1: self._set_input_buffer(incremental_state, x_unfold[:, :, :, -self.kernel_size + 1:]) x_unfold = x_unfold.view(T * B * H, R, -1) else: padding_l = self.padding_l if K > T and padding_l == K - 1: weight = weight.narrow(1, K - T, T) (K, padding_l) = (T, T - 1) x_unfold = unfold1d(x, K, padding_l, 0) x_unfold = x_unfold.view(T * B * H, R, K) if self.weight_softmax and (not self.renorm_padding): weight = F.softmax(weight, dim=1) weight = weight.narrow(1, 0, K) if incremental_state is not None: weight = weight[:, -x_unfold.size(2):] K = weight.size(1) if self.weight_softmax and self.renorm_padding: weight = F.softmax(weight, dim=1) weight = F.dropout(weight, self.weight_dropout, training=self.training, inplace=False) output = torch.bmm(x_unfold, weight.unsqueeze(2)) output = output.view(T, B, C) output = output </DeepExtract> else: <DeepExtract> (T, B, C) = x.size() (K, H) = (self.kernel_size, self.num_heads) R = C // H assert R * H == C == self.input_size if self.in_proj: proj = self.weight_linear(x) x = proj.narrow(2, 0, self.input_size).contiguous() weight = proj.narrow(2, self.input_size, H * K).contiguous().view(T * B * H, -1) else: weight = self.weight_linear(query).view(T * B * H, -1) if not self.renorm_padding: if self.weight_softmax: weight = F.softmax(weight, dim=1) weight = F.dropout(weight, self.weight_dropout, training=self.training, inplace=False) weight = weight.narrow(1, 0, K).contiguous() weight = weight.view(T, B * H, K).transpose(0, 1) x = x.view(T, B * H, R).transpose(0, 1) if self.weight_softmax and self.renorm_padding: weight_expanded = weight.new(B * H, T, T + K - 1).fill_(float('-inf')) weight_expanded.as_strided((B * H, T, K), (T * (T + K - 1), T + K, 1)).copy_(weight) weight_expanded = weight_expanded.narrow(2, self.padding_l, T) weight_expanded = F.softmax(weight_expanded, dim=2) weight_expanded = F.dropout(weight_expanded, self.weight_dropout, training=self.training, inplace=False) else: P = self.padding_l if K > T and P == K - 1: weight = weight.narrow(2, K - T, T) (K, P) = (T, T - 1) weight_expanded = weight.new_zeros(B * H, T, T + K - 1, requires_grad=False) weight_expanded.as_strided((B * H, T, K), (T * (T + K - 1), T + K, 1)).copy_(weight) weight_expanded = weight_expanded.narrow(2, P, T) output = torch.bmm(weight_expanded, x) output = output.transpose(0, 1).contiguous().view(T, B, C) output = output </DeepExtract> if self.conv_bias is not None: output = output + self.conv_bias.view(1, 1, -1) return output
def forward(self, x, incremental_state=None, query=None, unfold=None): """Assuming the input, x, of the shape T x B x C and producing an output in the shape T x B x C args: x: Input of shape T x B x C, i.e. (timesteps, batch_size, input_size) incremental_state: A dict to keep the state unfold: unfold the input or not. If not, we use the matrix trick instead query: use the specified query to predict the conv filters """ unfold = x.size(0) > 512 if unfold is None else unfold unfold = unfold or incremental_state is not None assert query is None or not self.in_proj if query is None: query = x if unfold: (T, B, C) = x.size() (K, H) = (self.kernel_size, self.num_heads) R = C // H assert R * H == C == self.input_size if self.in_proj: proj = self.weight_linear(x) x = proj.narrow(2, 0, self.input_size).contiguous() weight = proj.narrow(2, self.input_size, H * K).contiguous().view(T * B * H, -1) else: weight = self.weight_linear(query).view(T * B * H, -1) assert not self.renorm_padding or incremental_state is not None if incremental_state is not None: input_buffer = self._get_input_buffer(incremental_state) if input_buffer is None: input_buffer = x.new() x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3) if self.kernel_size > 1: self._set_input_buffer(incremental_state, x_unfold[:, :, :, -self.kernel_size + 1:]) x_unfold = x_unfold.view(T * B * H, R, -1) else: padding_l = self.padding_l if K > T and padding_l == K - 1: weight = weight.narrow(1, K - T, T) (K, padding_l) = (T, T - 1) x_unfold = unfold1d(x, K, padding_l, 0) x_unfold = x_unfold.view(T * B * H, R, K) if self.weight_softmax and (not self.renorm_padding): weight = F.softmax(weight, dim=1) weight = weight.narrow(1, 0, K) if incremental_state is not None: weight = weight[:, -x_unfold.size(2):] K = weight.size(1) if self.weight_softmax and self.renorm_padding: weight = F.softmax(weight, dim=1) weight = F.dropout(weight, self.weight_dropout, training=self.training, inplace=False) output = torch.bmm(x_unfold, weight.unsqueeze(2)) output = output.view(T, B, C) output = output else: (T, B, C) = x.size() (K, H) = (self.kernel_size, self.num_heads) R = C // H assert R * H == C == self.input_size if self.in_proj: proj = self.weight_linear(x) x = proj.narrow(2, 0, self.input_size).contiguous() weight = proj.narrow(2, self.input_size, H * K).contiguous().view(T * B * H, -1) else: weight = self.weight_linear(query).view(T * B * H, -1) if not self.renorm_padding: if self.weight_softmax: weight = F.softmax(weight, dim=1) weight = F.dropout(weight, self.weight_dropout, training=self.training, inplace=False) weight = weight.narrow(1, 0, K).contiguous() weight = weight.view(T, B * H, K).transpose(0, 1) x = x.view(T, B * H, R).transpose(0, 1) if self.weight_softmax and self.renorm_padding: weight_expanded = weight.new(B * H, T, T + K - 1).fill_(float('-inf')) weight_expanded.as_strided((B * H, T, K), (T * (T + K - 1), T + K, 1)).copy_(weight) weight_expanded = weight_expanded.narrow(2, self.padding_l, T) weight_expanded = F.softmax(weight_expanded, dim=2) weight_expanded = F.dropout(weight_expanded, self.weight_dropout, training=self.training, inplace=False) else: P = self.padding_l if K > T and P == K - 1: weight = weight.narrow(2, K - T, T) (K, P) = (T, T - 1) weight_expanded = weight.new_zeros(B * H, T, T + K - 1, requires_grad=False) weight_expanded.as_strided((B * H, T, K), (T * (T + K - 1), T + K, 1)).copy_(weight) weight_expanded = weight_expanded.narrow(2, P, T) output = torch.bmm(weight_expanded, x) output = output.transpose(0, 1).contiguous().view(T, B, C) output = output if self.conv_bias is not None: output = output + self.conv_bias.view(1, 1, -1) return output
DisCo
positive
def step_reward(self, observation: tf.Tensor, action: tf.Tensor, next_observation: tf.Tensor) -> tf.Tensor: """ Return the step reward for the transition from `observation` to `next_observation` via `action`. """ is_batched_nested_tensors([observation, action, next_observation], [self._observation_spec, self._action_spec, self._observation_spec]) <DeepExtract> pass </DeepExtract> is_batched_nested_tensors(rewards, self._reward_spec) return rewards
def step_reward(self, observation: tf.Tensor, action: tf.Tensor, next_observation: tf.Tensor) -> tf.Tensor: """ Return the step reward for the transition from `observation` to `next_observation` via `action`. """ is_batched_nested_tensors([observation, action, next_observation], [self._observation_spec, self._action_spec, self._observation_spec]) pass is_batched_nested_tensors(rewards, self._reward_spec) return rewards
bellman
positive
def test_ExceptionsAsConnectionAttributes(self): <DeepExtract> try: con = self.driver.connect(*self.connect_args, **self.connect_kw_args) except AttributeError: self.fail('No connect method found in self.driver module') </DeepExtract> drv = self.driver self.assertTrue(con.Warning is drv.Warning) self.assertTrue(con.Error is drv.Error) self.assertTrue(con.InterfaceError is drv.InterfaceError) self.assertTrue(con.DatabaseError is drv.DatabaseError) self.assertTrue(con.OperationalError is drv.OperationalError) self.assertTrue(con.IntegrityError is drv.IntegrityError) self.assertTrue(con.InternalError is drv.InternalError) self.assertTrue(con.ProgrammingError is drv.ProgrammingError) self.assertTrue(con.NotSupportedError is drv.NotSupportedError)
def test_ExceptionsAsConnectionAttributes(self): try: con = self.driver.connect(*self.connect_args, **self.connect_kw_args) except AttributeError: self.fail('No connect method found in self.driver module') drv = self.driver self.assertTrue(con.Warning is drv.Warning) self.assertTrue(con.Error is drv.Error) self.assertTrue(con.InterfaceError is drv.InterfaceError) self.assertTrue(con.DatabaseError is drv.DatabaseError) self.assertTrue(con.OperationalError is drv.OperationalError) self.assertTrue(con.IntegrityError is drv.IntegrityError) self.assertTrue(con.InternalError is drv.InternalError) self.assertTrue(con.ProgrammingError is drv.ProgrammingError) self.assertTrue(con.NotSupportedError is drv.NotSupportedError)
aws-servicebroker
positive
def __init__(self, num_quantiles): self.num_quantiles = num_quantiles super().__init__(Box(low=-jnp.inf, high=jnp.inf, shape=[num_quantiles])) def check_shape(x, name): if not isinstance(x, jnp.ndarray): raise TypeError(f'expected an jax.numpy.ndarray, got: {type(x)}') return x def mean(dist_params): <DeepExtract> if not isinstance(dist_params['values'], jnp.ndarray): raise TypeError(f"expected an jax.numpy.ndarray, got: {type(dist_params['values'])}") values = dist_params['values'] </DeepExtract> return jnp.mean(values, axis=-1) def sample(dist_params, rng): <DeepExtract> if not isinstance(dist_params['values'], jnp.ndarray): raise TypeError(f"expected an jax.numpy.ndarray, got: {type(dist_params['values'])}") values = dist_params['values'] </DeepExtract> return jax.random.choice(rng, values, values.shape, replace=True) def log_proba(dist_params, X): <DeepExtract> if not isinstance(X, jnp.ndarray): raise TypeError(f'expected an jax.numpy.ndarray, got: {type(X)}') X = X </DeepExtract> <DeepExtract> if not isinstance(dist_params['values'], jnp.ndarray): raise TypeError(f"expected an jax.numpy.ndarray, got: {type(dist_params['values'])}") values = dist_params['values'] </DeepExtract> occurrences = jnp.mean(X[None, ...] == values[..., None], axis=-1) return jnp.log(occurrences) def affine_transform(dist_params, scale, shift, value_transform=None): chex.assert_rank([dist_params['values'], scale, shift], [2, {0, 1}, {0, 1}]) <DeepExtract> if not isinstance(dist_params['values'], jnp.ndarray): raise TypeError(f"expected an jax.numpy.ndarray, got: {type(dist_params['values'])}") values = dist_params['values'] </DeepExtract> <DeepExtract> if not isinstance(dist_params['quantile_fractions'], jnp.ndarray): raise TypeError(f"expected an jax.numpy.ndarray, got: {type(dist_params['quantile_fractions'])}") quantile_fractions = dist_params['quantile_fractions'] </DeepExtract> batch_size = values.shape[0] if isscalar(scale): scale = jnp.full(shape=(batch_size, 1), fill_value=jnp.squeeze(scale)) if isscalar(shift): shift = jnp.full(shape=(batch_size, 1), fill_value=jnp.squeeze(shift)) scale = jnp.reshape(scale, (batch_size, 1)) shift = jnp.reshape(shift, (batch_size, 1)) chex.assert_shape(values, (batch_size, self.num_quantiles)) chex.assert_shape([scale, shift], (batch_size, 1)) if value_transform is None: f = f_inv = lambda x: x else: (f, f_inv) = value_transform return {'values': f(shift + scale * f_inv(values)), 'quantile_fractions': quantile_fractions} self._sample_func = jit(sample) self._mean_func = jit(mean) self._log_proba_func = jit(log_proba) self._affine_transform_func = jit(affine_transform, static_argnums=(3,))
def __init__(self, num_quantiles): self.num_quantiles = num_quantiles super().__init__(Box(low=-jnp.inf, high=jnp.inf, shape=[num_quantiles])) def check_shape(x, name): if not isinstance(x, jnp.ndarray): raise TypeError(f'expected an jax.numpy.ndarray, got: {type(x)}') return x def mean(dist_params): if not isinstance(dist_params['values'], jnp.ndarray): raise TypeError(f"expected an jax.numpy.ndarray, got: {type(dist_params['values'])}") values = dist_params['values'] return jnp.mean(values, axis=-1) def sample(dist_params, rng): if not isinstance(dist_params['values'], jnp.ndarray): raise TypeError(f"expected an jax.numpy.ndarray, got: {type(dist_params['values'])}") values = dist_params['values'] return jax.random.choice(rng, values, values.shape, replace=True) def log_proba(dist_params, X): if not isinstance(X, jnp.ndarray): raise TypeError(f'expected an jax.numpy.ndarray, got: {type(X)}') X = X if not isinstance(dist_params['values'], jnp.ndarray): raise TypeError(f"expected an jax.numpy.ndarray, got: {type(dist_params['values'])}") values = dist_params['values'] occurrences = jnp.mean(X[None, ...] == values[..., None], axis=-1) return jnp.log(occurrences) def affine_transform(dist_params, scale, shift, value_transform=None): chex.assert_rank([dist_params['values'], scale, shift], [2, {0, 1}, {0, 1}]) if not isinstance(dist_params['values'], jnp.ndarray): raise TypeError(f"expected an jax.numpy.ndarray, got: {type(dist_params['values'])}") values = dist_params['values'] if not isinstance(dist_params['quantile_fractions'], jnp.ndarray): raise TypeError(f"expected an jax.numpy.ndarray, got: {type(dist_params['quantile_fractions'])}") quantile_fractions = dist_params['quantile_fractions'] batch_size = values.shape[0] if isscalar(scale): scale = jnp.full(shape=(batch_size, 1), fill_value=jnp.squeeze(scale)) if isscalar(shift): shift = jnp.full(shape=(batch_size, 1), fill_value=jnp.squeeze(shift)) scale = jnp.reshape(scale, (batch_size, 1)) shift = jnp.reshape(shift, (batch_size, 1)) chex.assert_shape(values, (batch_size, self.num_quantiles)) chex.assert_shape([scale, shift], (batch_size, 1)) if value_transform is None: f = f_inv = lambda x: x else: (f, f_inv) = value_transform return {'values': f(shift + scale * f_inv(values)), 'quantile_fractions': quantile_fractions} self._sample_func = jit(sample) self._mean_func = jit(mean) self._log_proba_func = jit(log_proba) self._affine_transform_func = jit(affine_transform, static_argnums=(3,))
coax
positive
def leakage_cal(obs, im=None, sites=[], leakage_tol=0.1, pol_fit=['RL', 'LR'], dtype='vis', const_fpol=False, inverse=False, minimizer_method='L-BFGS-B', ttype='direct', fft_pad_factor=2, show_solution=True, obs_apply=False): """Polarimetric calibration (detects and removes polarimetric leakage, based on consistency with a given image) Args: obs (Obsdata): The observation to be calibrated im (Image): the reference image used for calibration (not needed if using const_fpol = True) sites (list): list of sites to include in the polarimetric calibration. empty list calibrates all sites leakage_tol (float): leakage values exceeding this value will be disfavored by the prior pol_fit (list): list of visibilities to use; e.g., ['RL','LR'] or ['RR','LL','RL','LR'] dtype (str): Type of data to fit ('vis' for complex visibilities; 'amp' for just the amplitudes) const_fpol (bool): If true, solve for a single fractional polarization across all baselines in addition to leakage. For this option, the passed image is not used. minimizer_method (str): Method for scipy.optimize.minimize (e.g., 'CG', 'BFGS') ttype (str): if "fast" or "nfft" use FFT to produce visibilities. Else "direct" for DTFT fft_pad_factor (float): zero pad the image to fft_pad_factor * image size in FFT show_solution (bool): if True, display the solution as it is calculated Returns: (Obsdata): the calibrated observation, with computed leakage values added to the obs.tarr """ tstart = time.time() mask = [] if not const_fpol: im_circ = im.switch_polrep('circ') else: im_circ = None if dtype not in ['vis', 'amp']: raise Exception('dtype must be vis or amp') obs_test = obs.copy() obs_test = obs_test.switch_polrep('circ') if obs_test.frcal is False: print('Field rotation angles have not been corrected. Correcting now...') obs_test.data = simobs.apply_jones_inverse(obs_test, frcal=False, dcal=True, verbose=False) obs_test.frcal = True allsites = list(set(np.hstack((obs.data['t1'], obs.data['t2'])))) if len(sites) == 0: print('No stations specified for leakage calibration: defaulting to calibrating all !') sites = allsites for j in range(len(obs_test.tarr)): if obs_test.tarr[j]['site'] in sites: continue obs_test.tarr[j]['dr'] = obs_test.tarr[j]['dl'] = 0j sites = [s for s in sites if s in allsites] site_index = [list(obs.tarr['site']).index(s) for s in sites] if not const_fpol: (dataRR, sigmaRR, ARR) = iu.chisqdata(obs, im_circ, mask=mask, dtype=dtype, pol='RR', ttype=ttype, fft_pad_factor=fft_pad_factor) (dataLL, sigmaLL, ALL) = iu.chisqdata(obs, im_circ, mask=mask, dtype=dtype, pol='LL', ttype=ttype, fft_pad_factor=fft_pad_factor) (dataRL, sigmaRL, ARL) = iu.chisqdata(obs, im_circ, mask=mask, dtype=dtype, pol='RL', ttype=ttype, fft_pad_factor=fft_pad_factor) (dataLR, sigmaLR, ALR) = iu.chisqdata(obs, im_circ, mask=mask, dtype=dtype, pol='LR', ttype=ttype, fft_pad_factor=fft_pad_factor) if inverse: el1 = obs.unpack(['el1'], ang_unit='rad')['el1'] el2 = obs.unpack(['el2'], ang_unit='rad')['el2'] par1 = obs.unpack(['par_ang1'], ang_unit='rad')['par_ang1'] par2 = obs.unpack(['par_ang2'], ang_unit='rad')['par_ang2'] fr_elev1 = np.array([obs.tarr[obs.tkey[o['t1']]]['fr_elev'] for o in obs.data]) fr_elev2 = np.array([obs.tarr[obs.tkey[o['t2']]]['fr_elev'] for o in obs.data]) fr_par1 = np.array([obs.tarr[obs.tkey[o['t1']]]['fr_par'] for o in obs.data]) fr_par2 = np.array([obs.tarr[obs.tkey[o['t2']]]['fr_par'] for o in obs.data]) fr_off1 = np.array([obs.tarr[obs.tkey[o['t1']]]['fr_off'] for o in obs.data]) fr_off2 = np.array([obs.tarr[obs.tkey[o['t2']]]['fr_off'] for o in obs.data]) fr1 = fr_elev1 * el1 + fr_par1 * par1 + fr_off1 * np.pi / 180.0 fr2 = fr_elev2 * el2 + fr_par2 * par2 + fr_off2 * np.pi / 180.0 def chisq_total(data, im, D, inverse=False): if const_fpol: if inverse: fpol_model = D[-2] cpol_model = np.real(D[-1]) D1R = [D[2 * sites.index(o['t1'])] for o in data] D1L = [D[2 * sites.index(o['t1']) + 1] for o in data] D2R = [D[2 * sites.index(o['t2'])] for o in data] D2L = [D[2 * sites.index(o['t2']) + 1] for o in data] lrll = data['lrvis'] / data['llvis'] lrrr = data['lrvis'] / data['rrvis'] rlll = data['rlvis'] / data['llvis'] rlrr = data['rlvis'] / data['rrvis'] lrll_sigma = data['lrsigma'] / np.abs(data['llvis']) lrrr_sigma = data['lrsigma'] / np.abs(data['rrvis']) rlll_sigma = data['rlsigma'] / np.abs(data['llvis']) rlrr_sigma = data['rlsigma'] / np.abs(data['rrvis']) lrll_model = np.conjugate(fpol_model) * (1.0 + cpol_model) + D1L * np.exp(-2j * fr1) * (1.0 + 2.0 * cpol_model) + np.conjugate(D2R) * np.exp(-2j * fr2) lrrr_model = np.conjugate(fpol_model) * (1.0 - cpol_model) + D1L * np.exp(-2j * fr1) + np.conjugate(D2R) * np.exp(-2j * fr2) * (1.0 - 2.0 * cpol_model) rlll_model = fpol_model * (1.0 + cpol_model) + D1R * np.exp(2j * fr1) + np.conjugate(D2L) * np.exp(2j * fr2) * (1.0 + 2.0 * cpol_model) rlrr_model = fpol_model * (1.0 - cpol_model) + D1R * np.exp(2j * fr1) * (1.0 - 2.0 * cpol_model) + np.conjugate(D2L) * np.exp(2j * fr2) chisq = np.concatenate([np.abs((lrll - lrll_model) / lrll_sigma) ** 2, np.abs((lrrr - lrrr_model) / lrrr_sigma) ** 2, np.abs((rlll - rlll_model) / rlll_sigma) ** 2, np.abs((rlrr - rlrr_model) / rlrr_sigma) ** 2]) chisq = chisq[~np.isnan(chisq)] return np.mean(chisq) else: fpol_model = D[-2] cpol_model = np.real(D[-1]) fpol_data_1 = 2.0 * data['rlvis'] / (data['rrvis'] + data['llvis']) fpol_data_2 = 2.0 * np.conj(data['lrvis'] / (data['rrvis'] + data['llvis'])) fpol_sigma_1 = 2.0 / np.abs(data['rrvis'] + data['llvis']) * data['rlsigma'] fpol_sigma_2 = 2.0 / np.abs(data['rrvis'] + data['llvis']) * data['lrsigma'] return 0.5 * np.mean(np.abs((fpol_model - fpol_data_1) / fpol_sigma_1) ** 2 + np.abs((fpol_model - fpol_data_2) / fpol_sigma_2) ** 2) else: chisq_RR = chisq_LL = chisq_RL = chisq_LR = 0.0 if 'RR' in pol_fit: chisq_RR = iu.chisq(im.rrvec, ARR, obs_test.unpack_dat(data, ['rr' + dtype])['rr' + dtype], data['rrsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'LL' in pol_fit: chisq_LL = iu.chisq(im.llvec, ALL, obs_test.unpack_dat(data, ['ll' + dtype])['ll' + dtype], data['llsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'RL' in pol_fit: chisq_RL = iu.chisq(im.rlvec, ARL, obs_test.unpack_dat(data, ['rl' + dtype])['rl' + dtype], data['rlsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'LR' in pol_fit: chisq_LR = iu.chisq(im.lrvec, ALR, obs_test.unpack_dat(data, ['lr' + dtype])['lr' + dtype], data['lrsigma'], dtype=dtype, ttype=ttype, mask=mask) return (chisq_RR + chisq_LL + chisq_RL + chisq_LR) / len(pol_fit) print('Finding leakage for sites:', sites) def errfunc(Dpar): D = Dpar.astype(np.float64).view(dtype=np.complex128) if not inverse: for isite in range(len(sites)): obs_test.tarr['dr'][site_index[isite]] = D[2 * isite] obs_test.tarr['dl'][site_index[isite]] = D[2 * isite + 1] data = simobs.apply_jones_inverse(obs_test, dcal=False, verbose=False) else: data = obs.data <DeepExtract> if const_fpol: if inverse: fpol_model = D[-2] cpol_model = np.real(D[-1]) D1R = [D[2 * sites.index(o['t1'])] for o in data] D1L = [D[2 * sites.index(o['t1']) + 1] for o in data] D2R = [D[2 * sites.index(o['t2'])] for o in data] D2L = [D[2 * sites.index(o['t2']) + 1] for o in data] lrll = data['lrvis'] / data['llvis'] lrrr = data['lrvis'] / data['rrvis'] rlll = data['rlvis'] / data['llvis'] rlrr = data['rlvis'] / data['rrvis'] lrll_sigma = data['lrsigma'] / np.abs(data['llvis']) lrrr_sigma = data['lrsigma'] / np.abs(data['rrvis']) rlll_sigma = data['rlsigma'] / np.abs(data['llvis']) rlrr_sigma = data['rlsigma'] / np.abs(data['rrvis']) lrll_model = np.conjugate(fpol_model) * (1.0 + cpol_model) + D1L * np.exp(-2j * fr1) * (1.0 + 2.0 * cpol_model) + np.conjugate(D2R) * np.exp(-2j * fr2) lrrr_model = np.conjugate(fpol_model) * (1.0 - cpol_model) + D1L * np.exp(-2j * fr1) + np.conjugate(D2R) * np.exp(-2j * fr2) * (1.0 - 2.0 * cpol_model) rlll_model = fpol_model * (1.0 + cpol_model) + D1R * np.exp(2j * fr1) + np.conjugate(D2L) * np.exp(2j * fr2) * (1.0 + 2.0 * cpol_model) rlrr_model = fpol_model * (1.0 - cpol_model) + D1R * np.exp(2j * fr1) * (1.0 - 2.0 * cpol_model) + np.conjugate(D2L) * np.exp(2j * fr2) chisq = np.concatenate([np.abs((lrll - lrll_model) / lrll_sigma) ** 2, np.abs((lrrr - lrrr_model) / lrrr_sigma) ** 2, np.abs((rlll - rlll_model) / rlll_sigma) ** 2, np.abs((rlrr - rlrr_model) / rlrr_sigma) ** 2]) chisq = chisq[~np.isnan(chisq)] chisq = np.mean(chisq) else: fpol_model = D[-2] cpol_model = np.real(D[-1]) fpol_data_1 = 2.0 * data['rlvis'] / (data['rrvis'] + data['llvis']) fpol_data_2 = 2.0 * np.conj(data['lrvis'] / (data['rrvis'] + data['llvis'])) fpol_sigma_1 = 2.0 / np.abs(data['rrvis'] + data['llvis']) * data['rlsigma'] fpol_sigma_2 = 2.0 / np.abs(data['rrvis'] + data['llvis']) * data['lrsigma'] chisq = 0.5 * np.mean(np.abs((fpol_model - fpol_data_1) / fpol_sigma_1) ** 2 + np.abs((fpol_model - fpol_data_2) / fpol_sigma_2) ** 2) else: chisq_RR = chisq_LL = chisq_RL = chisq_LR = 0.0 if 'RR' in pol_fit: chisq_RR = iu.chisq(im_circ.rrvec, ARR, obs_test.unpack_dat(data, ['rr' + dtype])['rr' + dtype], data['rrsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'LL' in pol_fit: chisq_LL = iu.chisq(im_circ.llvec, ALL, obs_test.unpack_dat(data, ['ll' + dtype])['ll' + dtype], data['llsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'RL' in pol_fit: chisq_RL = iu.chisq(im_circ.rlvec, ARL, obs_test.unpack_dat(data, ['rl' + dtype])['rl' + dtype], data['rlsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'LR' in pol_fit: chisq_LR = iu.chisq(im_circ.lrvec, ALR, obs_test.unpack_dat(data, ['lr' + dtype])['lr' + dtype], data['lrsigma'], dtype=dtype, ttype=ttype, mask=mask) chisq = (chisq_RR + chisq_LL + chisq_RL + chisq_LR) / len(pol_fit) </DeepExtract> chisq_D = np.sum(np.abs(D / leakage_tol) ** 2) return chisq + chisq_D optdict = {'maxiter': MAXIT} Dpar_guess = np.zeros((len(sites) + const_fpol * 2) * 2, dtype=np.complex128).view(dtype=np.float64) print('Minimizing...') res = opt.minimize(errfunc, Dpar_guess, method=minimizer_method, options=optdict) D_fit = res.x.astype(np.float64).view(dtype=np.complex128) for isite in range(len(sites)): obs_test.tarr['dr'][site_index[isite]] = D_fit[2 * isite] obs_test.tarr['dl'][site_index[isite]] = D_fit[2 * isite + 1] obs_test.data = simobs.apply_jones_inverse(obs_test, dcal=False, verbose=False) obs_test.dcal = True for j in range(len(obs_test.tarr)): if obs_test.tarr[j]['site'] in sites: continue obs_test.tarr[j]['dr'] = obs.tarr[j]['dr'] obs_test.tarr[j]['dl'] = obs.tarr[j]['dl'] if show_solution: if inverse is False: <DeepExtract> if const_fpol: if inverse: fpol_model = D_fit[-2] cpol_model = np.real(D_fit[-1]) D1R = [D_fit[2 * sites.index(o['t1'])] for o in obs.switch_polrep('circ').data] D1L = [D_fit[2 * sites.index(o['t1']) + 1] for o in obs.switch_polrep('circ').data] D2R = [D_fit[2 * sites.index(o['t2'])] for o in obs.switch_polrep('circ').data] D2L = [D_fit[2 * sites.index(o['t2']) + 1] for o in obs.switch_polrep('circ').data] lrll = obs.switch_polrep('circ').data['lrvis'] / obs.switch_polrep('circ').data['llvis'] lrrr = obs.switch_polrep('circ').data['lrvis'] / obs.switch_polrep('circ').data['rrvis'] rlll = obs.switch_polrep('circ').data['rlvis'] / obs.switch_polrep('circ').data['llvis'] rlrr = obs.switch_polrep('circ').data['rlvis'] / obs.switch_polrep('circ').data['rrvis'] lrll_sigma = obs.switch_polrep('circ').data['lrsigma'] / np.abs(obs.switch_polrep('circ').data['llvis']) lrrr_sigma = obs.switch_polrep('circ').data['lrsigma'] / np.abs(obs.switch_polrep('circ').data['rrvis']) rlll_sigma = obs.switch_polrep('circ').data['rlsigma'] / np.abs(obs.switch_polrep('circ').data['llvis']) rlrr_sigma = obs.switch_polrep('circ').data['rlsigma'] / np.abs(obs.switch_polrep('circ').data['rrvis']) lrll_model = np.conjugate(fpol_model) * (1.0 + cpol_model) + D1L * np.exp(-2j * fr1) * (1.0 + 2.0 * cpol_model) + np.conjugate(D2R) * np.exp(-2j * fr2) lrrr_model = np.conjugate(fpol_model) * (1.0 - cpol_model) + D1L * np.exp(-2j * fr1) + np.conjugate(D2R) * np.exp(-2j * fr2) * (1.0 - 2.0 * cpol_model) rlll_model = fpol_model * (1.0 + cpol_model) + D1R * np.exp(2j * fr1) + np.conjugate(D2L) * np.exp(2j * fr2) * (1.0 + 2.0 * cpol_model) rlrr_model = fpol_model * (1.0 - cpol_model) + D1R * np.exp(2j * fr1) * (1.0 - 2.0 * cpol_model) + np.conjugate(D2L) * np.exp(2j * fr2) chisq = np.concatenate([np.abs((lrll - lrll_model) / lrll_sigma) ** 2, np.abs((lrrr - lrrr_model) / lrrr_sigma) ** 2, np.abs((rlll - rlll_model) / rlll_sigma) ** 2, np.abs((rlrr - rlrr_model) / rlrr_sigma) ** 2]) chisq = chisq[~np.isnan(chisq)] chisq_orig = np.mean(chisq) else: fpol_model = D_fit[-2] cpol_model = np.real(D_fit[-1]) fpol_data_1 = 2.0 * obs.switch_polrep('circ').data['rlvis'] / (obs.switch_polrep('circ').data['rrvis'] + obs.switch_polrep('circ').data['llvis']) fpol_data_2 = 2.0 * np.conj(obs.switch_polrep('circ').data['lrvis'] / (obs.switch_polrep('circ').data['rrvis'] + obs.switch_polrep('circ').data['llvis'])) fpol_sigma_1 = 2.0 / np.abs(obs.switch_polrep('circ').data['rrvis'] + obs.switch_polrep('circ').data['llvis']) * obs.switch_polrep('circ').data['rlsigma'] fpol_sigma_2 = 2.0 / np.abs(obs.switch_polrep('circ').data['rrvis'] + obs.switch_polrep('circ').data['llvis']) * obs.switch_polrep('circ').data['lrsigma'] chisq_orig = 0.5 * np.mean(np.abs((fpol_model - fpol_data_1) / fpol_sigma_1) ** 2 + np.abs((fpol_model - fpol_data_2) / fpol_sigma_2) ** 2) else: chisq_RR = chisq_LL = chisq_RL = chisq_LR = 0.0 if 'RR' in pol_fit: chisq_RR = iu.chisq(im_circ.rrvec, ARR, obs_test.unpack_dat(obs.switch_polrep('circ').data, ['rr' + dtype])['rr' + dtype], obs.switch_polrep('circ').data['rrsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'LL' in pol_fit: chisq_LL = iu.chisq(im_circ.llvec, ALL, obs_test.unpack_dat(obs.switch_polrep('circ').data, ['ll' + dtype])['ll' + dtype], obs.switch_polrep('circ').data['llsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'RL' in pol_fit: chisq_RL = iu.chisq(im_circ.rlvec, ARL, obs_test.unpack_dat(obs.switch_polrep('circ').data, ['rl' + dtype])['rl' + dtype], obs.switch_polrep('circ').data['rlsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'LR' in pol_fit: chisq_LR = iu.chisq(im_circ.lrvec, ALR, obs_test.unpack_dat(obs.switch_polrep('circ').data, ['lr' + dtype])['lr' + dtype], obs.switch_polrep('circ').data['lrsigma'], dtype=dtype, ttype=ttype, mask=mask) chisq_orig = (chisq_RR + chisq_LL + chisq_RL + chisq_LR) / len(pol_fit) </DeepExtract> <DeepExtract> if const_fpol: if inverse: fpol_model = D_fit[-2] cpol_model = np.real(D_fit[-1]) D1R = [D_fit[2 * sites.index(o['t1'])] for o in obs_test.data] D1L = [D_fit[2 * sites.index(o['t1']) + 1] for o in obs_test.data] D2R = [D_fit[2 * sites.index(o['t2'])] for o in obs_test.data] D2L = [D_fit[2 * sites.index(o['t2']) + 1] for o in obs_test.data] lrll = obs_test.data['lrvis'] / obs_test.data['llvis'] lrrr = obs_test.data['lrvis'] / obs_test.data['rrvis'] rlll = obs_test.data['rlvis'] / obs_test.data['llvis'] rlrr = obs_test.data['rlvis'] / obs_test.data['rrvis'] lrll_sigma = obs_test.data['lrsigma'] / np.abs(obs_test.data['llvis']) lrrr_sigma = obs_test.data['lrsigma'] / np.abs(obs_test.data['rrvis']) rlll_sigma = obs_test.data['rlsigma'] / np.abs(obs_test.data['llvis']) rlrr_sigma = obs_test.data['rlsigma'] / np.abs(obs_test.data['rrvis']) lrll_model = np.conjugate(fpol_model) * (1.0 + cpol_model) + D1L * np.exp(-2j * fr1) * (1.0 + 2.0 * cpol_model) + np.conjugate(D2R) * np.exp(-2j * fr2) lrrr_model = np.conjugate(fpol_model) * (1.0 - cpol_model) + D1L * np.exp(-2j * fr1) + np.conjugate(D2R) * np.exp(-2j * fr2) * (1.0 - 2.0 * cpol_model) rlll_model = fpol_model * (1.0 + cpol_model) + D1R * np.exp(2j * fr1) + np.conjugate(D2L) * np.exp(2j * fr2) * (1.0 + 2.0 * cpol_model) rlrr_model = fpol_model * (1.0 - cpol_model) + D1R * np.exp(2j * fr1) * (1.0 - 2.0 * cpol_model) + np.conjugate(D2L) * np.exp(2j * fr2) chisq = np.concatenate([np.abs((lrll - lrll_model) / lrll_sigma) ** 2, np.abs((lrrr - lrrr_model) / lrrr_sigma) ** 2, np.abs((rlll - rlll_model) / rlll_sigma) ** 2, np.abs((rlrr - rlrr_model) / rlrr_sigma) ** 2]) chisq = chisq[~np.isnan(chisq)] chisq_new = np.mean(chisq) else: fpol_model = D_fit[-2] cpol_model = np.real(D_fit[-1]) fpol_data_1 = 2.0 * obs_test.data['rlvis'] / (obs_test.data['rrvis'] + obs_test.data['llvis']) fpol_data_2 = 2.0 * np.conj(obs_test.data['lrvis'] / (obs_test.data['rrvis'] + obs_test.data['llvis'])) fpol_sigma_1 = 2.0 / np.abs(obs_test.data['rrvis'] + obs_test.data['llvis']) * obs_test.data['rlsigma'] fpol_sigma_2 = 2.0 / np.abs(obs_test.data['rrvis'] + obs_test.data['llvis']) * obs_test.data['lrsigma'] chisq_new = 0.5 * np.mean(np.abs((fpol_model - fpol_data_1) / fpol_sigma_1) ** 2 + np.abs((fpol_model - fpol_data_2) / fpol_sigma_2) ** 2) else: chisq_RR = chisq_LL = chisq_RL = chisq_LR = 0.0 if 'RR' in pol_fit: chisq_RR = iu.chisq(im_circ.rrvec, ARR, obs_test.unpack_dat(obs_test.data, ['rr' + dtype])['rr' + dtype], obs_test.data['rrsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'LL' in pol_fit: chisq_LL = iu.chisq(im_circ.llvec, ALL, obs_test.unpack_dat(obs_test.data, ['ll' + dtype])['ll' + dtype], obs_test.data['llsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'RL' in pol_fit: chisq_RL = iu.chisq(im_circ.rlvec, ARL, obs_test.unpack_dat(obs_test.data, ['rl' + dtype])['rl' + dtype], obs_test.data['rlsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'LR' in pol_fit: chisq_LR = iu.chisq(im_circ.lrvec, ALR, obs_test.unpack_dat(obs_test.data, ['lr' + dtype])['lr' + dtype], obs_test.data['lrsigma'], dtype=dtype, ttype=ttype, mask=mask) chisq_new = (chisq_RR + chisq_LL + chisq_RL + chisq_LR) / len(pol_fit) </DeepExtract> else: <DeepExtract> if const_fpol: if inverse: fpol_model = D_fit * 0[-2] cpol_model = np.real(D_fit * 0[-1]) D1R = [D_fit * 0[2 * sites.index(o['t1'])] for o in obs.switch_polrep('circ').data] D1L = [D_fit * 0[2 * sites.index(o['t1']) + 1] for o in obs.switch_polrep('circ').data] D2R = [D_fit * 0[2 * sites.index(o['t2'])] for o in obs.switch_polrep('circ').data] D2L = [D_fit * 0[2 * sites.index(o['t2']) + 1] for o in obs.switch_polrep('circ').data] lrll = obs.switch_polrep('circ').data['lrvis'] / obs.switch_polrep('circ').data['llvis'] lrrr = obs.switch_polrep('circ').data['lrvis'] / obs.switch_polrep('circ').data['rrvis'] rlll = obs.switch_polrep('circ').data['rlvis'] / obs.switch_polrep('circ').data['llvis'] rlrr = obs.switch_polrep('circ').data['rlvis'] / obs.switch_polrep('circ').data['rrvis'] lrll_sigma = obs.switch_polrep('circ').data['lrsigma'] / np.abs(obs.switch_polrep('circ').data['llvis']) lrrr_sigma = obs.switch_polrep('circ').data['lrsigma'] / np.abs(obs.switch_polrep('circ').data['rrvis']) rlll_sigma = obs.switch_polrep('circ').data['rlsigma'] / np.abs(obs.switch_polrep('circ').data['llvis']) rlrr_sigma = obs.switch_polrep('circ').data['rlsigma'] / np.abs(obs.switch_polrep('circ').data['rrvis']) lrll_model = np.conjugate(fpol_model) * (1.0 + cpol_model) + D1L * np.exp(-2j * fr1) * (1.0 + 2.0 * cpol_model) + np.conjugate(D2R) * np.exp(-2j * fr2) lrrr_model = np.conjugate(fpol_model) * (1.0 - cpol_model) + D1L * np.exp(-2j * fr1) + np.conjugate(D2R) * np.exp(-2j * fr2) * (1.0 - 2.0 * cpol_model) rlll_model = fpol_model * (1.0 + cpol_model) + D1R * np.exp(2j * fr1) + np.conjugate(D2L) * np.exp(2j * fr2) * (1.0 + 2.0 * cpol_model) rlrr_model = fpol_model * (1.0 - cpol_model) + D1R * np.exp(2j * fr1) * (1.0 - 2.0 * cpol_model) + np.conjugate(D2L) * np.exp(2j * fr2) chisq = np.concatenate([np.abs((lrll - lrll_model) / lrll_sigma) ** 2, np.abs((lrrr - lrrr_model) / lrrr_sigma) ** 2, np.abs((rlll - rlll_model) / rlll_sigma) ** 2, np.abs((rlrr - rlrr_model) / rlrr_sigma) ** 2]) chisq = chisq[~np.isnan(chisq)] chisq_orig = np.mean(chisq) else: fpol_model = D_fit * 0[-2] cpol_model = np.real(D_fit * 0[-1]) fpol_data_1 = 2.0 * obs.switch_polrep('circ').data['rlvis'] / (obs.switch_polrep('circ').data['rrvis'] + obs.switch_polrep('circ').data['llvis']) fpol_data_2 = 2.0 * np.conj(obs.switch_polrep('circ').data['lrvis'] / (obs.switch_polrep('circ').data['rrvis'] + obs.switch_polrep('circ').data['llvis'])) fpol_sigma_1 = 2.0 / np.abs(obs.switch_polrep('circ').data['rrvis'] + obs.switch_polrep('circ').data['llvis']) * obs.switch_polrep('circ').data['rlsigma'] fpol_sigma_2 = 2.0 / np.abs(obs.switch_polrep('circ').data['rrvis'] + obs.switch_polrep('circ').data['llvis']) * obs.switch_polrep('circ').data['lrsigma'] chisq_orig = 0.5 * np.mean(np.abs((fpol_model - fpol_data_1) / fpol_sigma_1) ** 2 + np.abs((fpol_model - fpol_data_2) / fpol_sigma_2) ** 2) else: chisq_RR = chisq_LL = chisq_RL = chisq_LR = 0.0 if 'RR' in pol_fit: chisq_RR = iu.chisq(im_circ.rrvec, ARR, obs_test.unpack_dat(obs.switch_polrep('circ').data, ['rr' + dtype])['rr' + dtype], obs.switch_polrep('circ').data['rrsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'LL' in pol_fit: chisq_LL = iu.chisq(im_circ.llvec, ALL, obs_test.unpack_dat(obs.switch_polrep('circ').data, ['ll' + dtype])['ll' + dtype], obs.switch_polrep('circ').data['llsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'RL' in pol_fit: chisq_RL = iu.chisq(im_circ.rlvec, ARL, obs_test.unpack_dat(obs.switch_polrep('circ').data, ['rl' + dtype])['rl' + dtype], obs.switch_polrep('circ').data['rlsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'LR' in pol_fit: chisq_LR = iu.chisq(im_circ.lrvec, ALR, obs_test.unpack_dat(obs.switch_polrep('circ').data, ['lr' + dtype])['lr' + dtype], obs.switch_polrep('circ').data['lrsigma'], dtype=dtype, ttype=ttype, mask=mask) chisq_orig = (chisq_RR + chisq_LL + chisq_RL + chisq_LR) / len(pol_fit) </DeepExtract> <DeepExtract> if const_fpol: if inverse: fpol_model = D_fit[-2] cpol_model = np.real(D_fit[-1]) D1R = [D_fit[2 * sites.index(o['t1'])] for o in obs.switch_polrep('circ').data] D1L = [D_fit[2 * sites.index(o['t1']) + 1] for o in obs.switch_polrep('circ').data] D2R = [D_fit[2 * sites.index(o['t2'])] for o in obs.switch_polrep('circ').data] D2L = [D_fit[2 * sites.index(o['t2']) + 1] for o in obs.switch_polrep('circ').data] lrll = obs.switch_polrep('circ').data['lrvis'] / obs.switch_polrep('circ').data['llvis'] lrrr = obs.switch_polrep('circ').data['lrvis'] / obs.switch_polrep('circ').data['rrvis'] rlll = obs.switch_polrep('circ').data['rlvis'] / obs.switch_polrep('circ').data['llvis'] rlrr = obs.switch_polrep('circ').data['rlvis'] / obs.switch_polrep('circ').data['rrvis'] lrll_sigma = obs.switch_polrep('circ').data['lrsigma'] / np.abs(obs.switch_polrep('circ').data['llvis']) lrrr_sigma = obs.switch_polrep('circ').data['lrsigma'] / np.abs(obs.switch_polrep('circ').data['rrvis']) rlll_sigma = obs.switch_polrep('circ').data['rlsigma'] / np.abs(obs.switch_polrep('circ').data['llvis']) rlrr_sigma = obs.switch_polrep('circ').data['rlsigma'] / np.abs(obs.switch_polrep('circ').data['rrvis']) lrll_model = np.conjugate(fpol_model) * (1.0 + cpol_model) + D1L * np.exp(-2j * fr1) * (1.0 + 2.0 * cpol_model) + np.conjugate(D2R) * np.exp(-2j * fr2) lrrr_model = np.conjugate(fpol_model) * (1.0 - cpol_model) + D1L * np.exp(-2j * fr1) + np.conjugate(D2R) * np.exp(-2j * fr2) * (1.0 - 2.0 * cpol_model) rlll_model = fpol_model * (1.0 + cpol_model) + D1R * np.exp(2j * fr1) + np.conjugate(D2L) * np.exp(2j * fr2) * (1.0 + 2.0 * cpol_model) rlrr_model = fpol_model * (1.0 - cpol_model) + D1R * np.exp(2j * fr1) * (1.0 - 2.0 * cpol_model) + np.conjugate(D2L) * np.exp(2j * fr2) chisq = np.concatenate([np.abs((lrll - lrll_model) / lrll_sigma) ** 2, np.abs((lrrr - lrrr_model) / lrrr_sigma) ** 2, np.abs((rlll - rlll_model) / rlll_sigma) ** 2, np.abs((rlrr - rlrr_model) / rlrr_sigma) ** 2]) chisq = chisq[~np.isnan(chisq)] chisq_new = np.mean(chisq) else: fpol_model = D_fit[-2] cpol_model = np.real(D_fit[-1]) fpol_data_1 = 2.0 * obs.switch_polrep('circ').data['rlvis'] / (obs.switch_polrep('circ').data['rrvis'] + obs.switch_polrep('circ').data['llvis']) fpol_data_2 = 2.0 * np.conj(obs.switch_polrep('circ').data['lrvis'] / (obs.switch_polrep('circ').data['rrvis'] + obs.switch_polrep('circ').data['llvis'])) fpol_sigma_1 = 2.0 / np.abs(obs.switch_polrep('circ').data['rrvis'] + obs.switch_polrep('circ').data['llvis']) * obs.switch_polrep('circ').data['rlsigma'] fpol_sigma_2 = 2.0 / np.abs(obs.switch_polrep('circ').data['rrvis'] + obs.switch_polrep('circ').data['llvis']) * obs.switch_polrep('circ').data['lrsigma'] chisq_new = 0.5 * np.mean(np.abs((fpol_model - fpol_data_1) / fpol_sigma_1) ** 2 + np.abs((fpol_model - fpol_data_2) / fpol_sigma_2) ** 2) else: chisq_RR = chisq_LL = chisq_RL = chisq_LR = 0.0 if 'RR' in pol_fit: chisq_RR = iu.chisq(im_circ.rrvec, ARR, obs_test.unpack_dat(obs.switch_polrep('circ').data, ['rr' + dtype])['rr' + dtype], obs.switch_polrep('circ').data['rrsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'LL' in pol_fit: chisq_LL = iu.chisq(im_circ.llvec, ALL, obs_test.unpack_dat(obs.switch_polrep('circ').data, ['ll' + dtype])['ll' + dtype], obs.switch_polrep('circ').data['llsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'RL' in pol_fit: chisq_RL = iu.chisq(im_circ.rlvec, ARL, obs_test.unpack_dat(obs.switch_polrep('circ').data, ['rl' + dtype])['rl' + dtype], obs.switch_polrep('circ').data['rlsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'LR' in pol_fit: chisq_LR = iu.chisq(im_circ.lrvec, ALR, obs_test.unpack_dat(obs.switch_polrep('circ').data, ['lr' + dtype])['lr' + dtype], obs.switch_polrep('circ').data['lrsigma'], dtype=dtype, ttype=ttype, mask=mask) chisq_new = (chisq_RR + chisq_LL + chisq_RL + chisq_LR) / len(pol_fit) </DeepExtract> print('Original chi-squared: {:.4f}'.format(chisq_orig)) print('New chi-squared: {:.4f}\n'.format(chisq_new)) for isite in range(len(sites)): print(sites[isite]) print(' D_R: {:.4f}'.format(D_fit[2 * isite])) print(' D_L: {:.4f}\n'.format(D_fit[2 * isite + 1])) if const_fpol: print('Source Fractional Polarization Magnitude: {:.4f}'.format(np.abs(D_fit[-2]))) print('Source Fractional Polarization EVPA [deg]: {:.4f}\n'.format(90.0 / np.pi * np.angle(D_fit[-2]))) if inverse: print('Source Fractional Circular Polarization: {:.4f}'.format(np.real(D_fit[-1]))) tstop = time.time() print('\nleakage_cal time: %f s' % (tstop - tstart)) if obs_apply is not False: obs_test = obs_apply.copy() obs_test.tarr['dr'] *= 0.0 obs_test.tarr['dl'] *= 0.0 for isite in range(len(sites)): if sites[isite] in list(obs_test.tarr['site']): i_site = list(obs_test.tarr['site']).index(sites[isite]) obs_test.tarr['dr'][i_site] = D_fit[2 * isite] obs_test.tarr['dl'][i_site] = D_fit[2 * isite + 1] obs_test.data = simobs.apply_jones_inverse(obs_test, dcal=False, verbose=False) obs_test.dcal = True for j in range(len(obs_test.tarr)): if obs_test.tarr[j]['site'] in sites: continue obs_test.tarr[j]['dr'] = obs_apply.tarr[j]['dr'] obs_test.tarr[j]['dl'] = obs_apply.tarr[j]['dl'] else: obs_test = obs_test.switch_polrep(obs.polrep) if not const_fpol: return obs_test elif inverse: return [obs_test, D_fit[-2], D_fit[-1]] else: return [obs_test, D_fit[-2]]
def leakage_cal(obs, im=None, sites=[], leakage_tol=0.1, pol_fit=['RL', 'LR'], dtype='vis', const_fpol=False, inverse=False, minimizer_method='L-BFGS-B', ttype='direct', fft_pad_factor=2, show_solution=True, obs_apply=False): """Polarimetric calibration (detects and removes polarimetric leakage, based on consistency with a given image) Args: obs (Obsdata): The observation to be calibrated im (Image): the reference image used for calibration (not needed if using const_fpol = True) sites (list): list of sites to include in the polarimetric calibration. empty list calibrates all sites leakage_tol (float): leakage values exceeding this value will be disfavored by the prior pol_fit (list): list of visibilities to use; e.g., ['RL','LR'] or ['RR','LL','RL','LR'] dtype (str): Type of data to fit ('vis' for complex visibilities; 'amp' for just the amplitudes) const_fpol (bool): If true, solve for a single fractional polarization across all baselines in addition to leakage. For this option, the passed image is not used. minimizer_method (str): Method for scipy.optimize.minimize (e.g., 'CG', 'BFGS') ttype (str): if "fast" or "nfft" use FFT to produce visibilities. Else "direct" for DTFT fft_pad_factor (float): zero pad the image to fft_pad_factor * image size in FFT show_solution (bool): if True, display the solution as it is calculated Returns: (Obsdata): the calibrated observation, with computed leakage values added to the obs.tarr """ tstart = time.time() mask = [] if not const_fpol: im_circ = im.switch_polrep('circ') else: im_circ = None if dtype not in ['vis', 'amp']: raise Exception('dtype must be vis or amp') obs_test = obs.copy() obs_test = obs_test.switch_polrep('circ') if obs_test.frcal is False: print('Field rotation angles have not been corrected. Correcting now...') obs_test.data = simobs.apply_jones_inverse(obs_test, frcal=False, dcal=True, verbose=False) obs_test.frcal = True allsites = list(set(np.hstack((obs.data['t1'], obs.data['t2'])))) if len(sites) == 0: print('No stations specified for leakage calibration: defaulting to calibrating all !') sites = allsites for j in range(len(obs_test.tarr)): if obs_test.tarr[j]['site'] in sites: continue obs_test.tarr[j]['dr'] = obs_test.tarr[j]['dl'] = 0j sites = [s for s in sites if s in allsites] site_index = [list(obs.tarr['site']).index(s) for s in sites] if not const_fpol: (dataRR, sigmaRR, ARR) = iu.chisqdata(obs, im_circ, mask=mask, dtype=dtype, pol='RR', ttype=ttype, fft_pad_factor=fft_pad_factor) (dataLL, sigmaLL, ALL) = iu.chisqdata(obs, im_circ, mask=mask, dtype=dtype, pol='LL', ttype=ttype, fft_pad_factor=fft_pad_factor) (dataRL, sigmaRL, ARL) = iu.chisqdata(obs, im_circ, mask=mask, dtype=dtype, pol='RL', ttype=ttype, fft_pad_factor=fft_pad_factor) (dataLR, sigmaLR, ALR) = iu.chisqdata(obs, im_circ, mask=mask, dtype=dtype, pol='LR', ttype=ttype, fft_pad_factor=fft_pad_factor) if inverse: el1 = obs.unpack(['el1'], ang_unit='rad')['el1'] el2 = obs.unpack(['el2'], ang_unit='rad')['el2'] par1 = obs.unpack(['par_ang1'], ang_unit='rad')['par_ang1'] par2 = obs.unpack(['par_ang2'], ang_unit='rad')['par_ang2'] fr_elev1 = np.array([obs.tarr[obs.tkey[o['t1']]]['fr_elev'] for o in obs.data]) fr_elev2 = np.array([obs.tarr[obs.tkey[o['t2']]]['fr_elev'] for o in obs.data]) fr_par1 = np.array([obs.tarr[obs.tkey[o['t1']]]['fr_par'] for o in obs.data]) fr_par2 = np.array([obs.tarr[obs.tkey[o['t2']]]['fr_par'] for o in obs.data]) fr_off1 = np.array([obs.tarr[obs.tkey[o['t1']]]['fr_off'] for o in obs.data]) fr_off2 = np.array([obs.tarr[obs.tkey[o['t2']]]['fr_off'] for o in obs.data]) fr1 = fr_elev1 * el1 + fr_par1 * par1 + fr_off1 * np.pi / 180.0 fr2 = fr_elev2 * el2 + fr_par2 * par2 + fr_off2 * np.pi / 180.0 def chisq_total(data, im, D, inverse=False): if const_fpol: if inverse: fpol_model = D[-2] cpol_model = np.real(D[-1]) D1R = [D[2 * sites.index(o['t1'])] for o in data] D1L = [D[2 * sites.index(o['t1']) + 1] for o in data] D2R = [D[2 * sites.index(o['t2'])] for o in data] D2L = [D[2 * sites.index(o['t2']) + 1] for o in data] lrll = data['lrvis'] / data['llvis'] lrrr = data['lrvis'] / data['rrvis'] rlll = data['rlvis'] / data['llvis'] rlrr = data['rlvis'] / data['rrvis'] lrll_sigma = data['lrsigma'] / np.abs(data['llvis']) lrrr_sigma = data['lrsigma'] / np.abs(data['rrvis']) rlll_sigma = data['rlsigma'] / np.abs(data['llvis']) rlrr_sigma = data['rlsigma'] / np.abs(data['rrvis']) lrll_model = np.conjugate(fpol_model) * (1.0 + cpol_model) + D1L * np.exp(-2j * fr1) * (1.0 + 2.0 * cpol_model) + np.conjugate(D2R) * np.exp(-2j * fr2) lrrr_model = np.conjugate(fpol_model) * (1.0 - cpol_model) + D1L * np.exp(-2j * fr1) + np.conjugate(D2R) * np.exp(-2j * fr2) * (1.0 - 2.0 * cpol_model) rlll_model = fpol_model * (1.0 + cpol_model) + D1R * np.exp(2j * fr1) + np.conjugate(D2L) * np.exp(2j * fr2) * (1.0 + 2.0 * cpol_model) rlrr_model = fpol_model * (1.0 - cpol_model) + D1R * np.exp(2j * fr1) * (1.0 - 2.0 * cpol_model) + np.conjugate(D2L) * np.exp(2j * fr2) chisq = np.concatenate([np.abs((lrll - lrll_model) / lrll_sigma) ** 2, np.abs((lrrr - lrrr_model) / lrrr_sigma) ** 2, np.abs((rlll - rlll_model) / rlll_sigma) ** 2, np.abs((rlrr - rlrr_model) / rlrr_sigma) ** 2]) chisq = chisq[~np.isnan(chisq)] return np.mean(chisq) else: fpol_model = D[-2] cpol_model = np.real(D[-1]) fpol_data_1 = 2.0 * data['rlvis'] / (data['rrvis'] + data['llvis']) fpol_data_2 = 2.0 * np.conj(data['lrvis'] / (data['rrvis'] + data['llvis'])) fpol_sigma_1 = 2.0 / np.abs(data['rrvis'] + data['llvis']) * data['rlsigma'] fpol_sigma_2 = 2.0 / np.abs(data['rrvis'] + data['llvis']) * data['lrsigma'] return 0.5 * np.mean(np.abs((fpol_model - fpol_data_1) / fpol_sigma_1) ** 2 + np.abs((fpol_model - fpol_data_2) / fpol_sigma_2) ** 2) else: chisq_RR = chisq_LL = chisq_RL = chisq_LR = 0.0 if 'RR' in pol_fit: chisq_RR = iu.chisq(im.rrvec, ARR, obs_test.unpack_dat(data, ['rr' + dtype])['rr' + dtype], data['rrsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'LL' in pol_fit: chisq_LL = iu.chisq(im.llvec, ALL, obs_test.unpack_dat(data, ['ll' + dtype])['ll' + dtype], data['llsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'RL' in pol_fit: chisq_RL = iu.chisq(im.rlvec, ARL, obs_test.unpack_dat(data, ['rl' + dtype])['rl' + dtype], data['rlsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'LR' in pol_fit: chisq_LR = iu.chisq(im.lrvec, ALR, obs_test.unpack_dat(data, ['lr' + dtype])['lr' + dtype], data['lrsigma'], dtype=dtype, ttype=ttype, mask=mask) return (chisq_RR + chisq_LL + chisq_RL + chisq_LR) / len(pol_fit) print('Finding leakage for sites:', sites) def errfunc(Dpar): D = Dpar.astype(np.float64).view(dtype=np.complex128) if not inverse: for isite in range(len(sites)): obs_test.tarr['dr'][site_index[isite]] = D[2 * isite] obs_test.tarr['dl'][site_index[isite]] = D[2 * isite + 1] data = simobs.apply_jones_inverse(obs_test, dcal=False, verbose=False) else: data = obs.data if const_fpol: if inverse: fpol_model = D[-2] cpol_model = np.real(D[-1]) D1R = [D[2 * sites.index(o['t1'])] for o in data] D1L = [D[2 * sites.index(o['t1']) + 1] for o in data] D2R = [D[2 * sites.index(o['t2'])] for o in data] D2L = [D[2 * sites.index(o['t2']) + 1] for o in data] lrll = data['lrvis'] / data['llvis'] lrrr = data['lrvis'] / data['rrvis'] rlll = data['rlvis'] / data['llvis'] rlrr = data['rlvis'] / data['rrvis'] lrll_sigma = data['lrsigma'] / np.abs(data['llvis']) lrrr_sigma = data['lrsigma'] / np.abs(data['rrvis']) rlll_sigma = data['rlsigma'] / np.abs(data['llvis']) rlrr_sigma = data['rlsigma'] / np.abs(data['rrvis']) lrll_model = np.conjugate(fpol_model) * (1.0 + cpol_model) + D1L * np.exp(-2j * fr1) * (1.0 + 2.0 * cpol_model) + np.conjugate(D2R) * np.exp(-2j * fr2) lrrr_model = np.conjugate(fpol_model) * (1.0 - cpol_model) + D1L * np.exp(-2j * fr1) + np.conjugate(D2R) * np.exp(-2j * fr2) * (1.0 - 2.0 * cpol_model) rlll_model = fpol_model * (1.0 + cpol_model) + D1R * np.exp(2j * fr1) + np.conjugate(D2L) * np.exp(2j * fr2) * (1.0 + 2.0 * cpol_model) rlrr_model = fpol_model * (1.0 - cpol_model) + D1R * np.exp(2j * fr1) * (1.0 - 2.0 * cpol_model) + np.conjugate(D2L) * np.exp(2j * fr2) chisq = np.concatenate([np.abs((lrll - lrll_model) / lrll_sigma) ** 2, np.abs((lrrr - lrrr_model) / lrrr_sigma) ** 2, np.abs((rlll - rlll_model) / rlll_sigma) ** 2, np.abs((rlrr - rlrr_model) / rlrr_sigma) ** 2]) chisq = chisq[~np.isnan(chisq)] chisq = np.mean(chisq) else: fpol_model = D[-2] cpol_model = np.real(D[-1]) fpol_data_1 = 2.0 * data['rlvis'] / (data['rrvis'] + data['llvis']) fpol_data_2 = 2.0 * np.conj(data['lrvis'] / (data['rrvis'] + data['llvis'])) fpol_sigma_1 = 2.0 / np.abs(data['rrvis'] + data['llvis']) * data['rlsigma'] fpol_sigma_2 = 2.0 / np.abs(data['rrvis'] + data['llvis']) * data['lrsigma'] chisq = 0.5 * np.mean(np.abs((fpol_model - fpol_data_1) / fpol_sigma_1) ** 2 + np.abs((fpol_model - fpol_data_2) / fpol_sigma_2) ** 2) else: chisq_RR = chisq_LL = chisq_RL = chisq_LR = 0.0 if 'RR' in pol_fit: chisq_RR = iu.chisq(im_circ.rrvec, ARR, obs_test.unpack_dat(data, ['rr' + dtype])['rr' + dtype], data['rrsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'LL' in pol_fit: chisq_LL = iu.chisq(im_circ.llvec, ALL, obs_test.unpack_dat(data, ['ll' + dtype])['ll' + dtype], data['llsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'RL' in pol_fit: chisq_RL = iu.chisq(im_circ.rlvec, ARL, obs_test.unpack_dat(data, ['rl' + dtype])['rl' + dtype], data['rlsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'LR' in pol_fit: chisq_LR = iu.chisq(im_circ.lrvec, ALR, obs_test.unpack_dat(data, ['lr' + dtype])['lr' + dtype], data['lrsigma'], dtype=dtype, ttype=ttype, mask=mask) chisq = (chisq_RR + chisq_LL + chisq_RL + chisq_LR) / len(pol_fit) chisq_D = np.sum(np.abs(D / leakage_tol) ** 2) return chisq + chisq_D optdict = {'maxiter': MAXIT} Dpar_guess = np.zeros((len(sites) + const_fpol * 2) * 2, dtype=np.complex128).view(dtype=np.float64) print('Minimizing...') res = opt.minimize(errfunc, Dpar_guess, method=minimizer_method, options=optdict) D_fit = res.x.astype(np.float64).view(dtype=np.complex128) for isite in range(len(sites)): obs_test.tarr['dr'][site_index[isite]] = D_fit[2 * isite] obs_test.tarr['dl'][site_index[isite]] = D_fit[2 * isite + 1] obs_test.data = simobs.apply_jones_inverse(obs_test, dcal=False, verbose=False) obs_test.dcal = True for j in range(len(obs_test.tarr)): if obs_test.tarr[j]['site'] in sites: continue obs_test.tarr[j]['dr'] = obs.tarr[j]['dr'] obs_test.tarr[j]['dl'] = obs.tarr[j]['dl'] if show_solution: if inverse is False: if const_fpol: if inverse: fpol_model = D_fit[-2] cpol_model = np.real(D_fit[-1]) D1R = [D_fit[2 * sites.index(o['t1'])] for o in obs.switch_polrep('circ').data] D1L = [D_fit[2 * sites.index(o['t1']) + 1] for o in obs.switch_polrep('circ').data] D2R = [D_fit[2 * sites.index(o['t2'])] for o in obs.switch_polrep('circ').data] D2L = [D_fit[2 * sites.index(o['t2']) + 1] for o in obs.switch_polrep('circ').data] lrll = obs.switch_polrep('circ').data['lrvis'] / obs.switch_polrep('circ').data['llvis'] lrrr = obs.switch_polrep('circ').data['lrvis'] / obs.switch_polrep('circ').data['rrvis'] rlll = obs.switch_polrep('circ').data['rlvis'] / obs.switch_polrep('circ').data['llvis'] rlrr = obs.switch_polrep('circ').data['rlvis'] / obs.switch_polrep('circ').data['rrvis'] lrll_sigma = obs.switch_polrep('circ').data['lrsigma'] / np.abs(obs.switch_polrep('circ').data['llvis']) lrrr_sigma = obs.switch_polrep('circ').data['lrsigma'] / np.abs(obs.switch_polrep('circ').data['rrvis']) rlll_sigma = obs.switch_polrep('circ').data['rlsigma'] / np.abs(obs.switch_polrep('circ').data['llvis']) rlrr_sigma = obs.switch_polrep('circ').data['rlsigma'] / np.abs(obs.switch_polrep('circ').data['rrvis']) lrll_model = np.conjugate(fpol_model) * (1.0 + cpol_model) + D1L * np.exp(-2j * fr1) * (1.0 + 2.0 * cpol_model) + np.conjugate(D2R) * np.exp(-2j * fr2) lrrr_model = np.conjugate(fpol_model) * (1.0 - cpol_model) + D1L * np.exp(-2j * fr1) + np.conjugate(D2R) * np.exp(-2j * fr2) * (1.0 - 2.0 * cpol_model) rlll_model = fpol_model * (1.0 + cpol_model) + D1R * np.exp(2j * fr1) + np.conjugate(D2L) * np.exp(2j * fr2) * (1.0 + 2.0 * cpol_model) rlrr_model = fpol_model * (1.0 - cpol_model) + D1R * np.exp(2j * fr1) * (1.0 - 2.0 * cpol_model) + np.conjugate(D2L) * np.exp(2j * fr2) chisq = np.concatenate([np.abs((lrll - lrll_model) / lrll_sigma) ** 2, np.abs((lrrr - lrrr_model) / lrrr_sigma) ** 2, np.abs((rlll - rlll_model) / rlll_sigma) ** 2, np.abs((rlrr - rlrr_model) / rlrr_sigma) ** 2]) chisq = chisq[~np.isnan(chisq)] chisq_orig = np.mean(chisq) else: fpol_model = D_fit[-2] cpol_model = np.real(D_fit[-1]) fpol_data_1 = 2.0 * obs.switch_polrep('circ').data['rlvis'] / (obs.switch_polrep('circ').data['rrvis'] + obs.switch_polrep('circ').data['llvis']) fpol_data_2 = 2.0 * np.conj(obs.switch_polrep('circ').data['lrvis'] / (obs.switch_polrep('circ').data['rrvis'] + obs.switch_polrep('circ').data['llvis'])) fpol_sigma_1 = 2.0 / np.abs(obs.switch_polrep('circ').data['rrvis'] + obs.switch_polrep('circ').data['llvis']) * obs.switch_polrep('circ').data['rlsigma'] fpol_sigma_2 = 2.0 / np.abs(obs.switch_polrep('circ').data['rrvis'] + obs.switch_polrep('circ').data['llvis']) * obs.switch_polrep('circ').data['lrsigma'] chisq_orig = 0.5 * np.mean(np.abs((fpol_model - fpol_data_1) / fpol_sigma_1) ** 2 + np.abs((fpol_model - fpol_data_2) / fpol_sigma_2) ** 2) else: chisq_RR = chisq_LL = chisq_RL = chisq_LR = 0.0 if 'RR' in pol_fit: chisq_RR = iu.chisq(im_circ.rrvec, ARR, obs_test.unpack_dat(obs.switch_polrep('circ').data, ['rr' + dtype])['rr' + dtype], obs.switch_polrep('circ').data['rrsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'LL' in pol_fit: chisq_LL = iu.chisq(im_circ.llvec, ALL, obs_test.unpack_dat(obs.switch_polrep('circ').data, ['ll' + dtype])['ll' + dtype], obs.switch_polrep('circ').data['llsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'RL' in pol_fit: chisq_RL = iu.chisq(im_circ.rlvec, ARL, obs_test.unpack_dat(obs.switch_polrep('circ').data, ['rl' + dtype])['rl' + dtype], obs.switch_polrep('circ').data['rlsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'LR' in pol_fit: chisq_LR = iu.chisq(im_circ.lrvec, ALR, obs_test.unpack_dat(obs.switch_polrep('circ').data, ['lr' + dtype])['lr' + dtype], obs.switch_polrep('circ').data['lrsigma'], dtype=dtype, ttype=ttype, mask=mask) chisq_orig = (chisq_RR + chisq_LL + chisq_RL + chisq_LR) / len(pol_fit) if const_fpol: if inverse: fpol_model = D_fit[-2] cpol_model = np.real(D_fit[-1]) D1R = [D_fit[2 * sites.index(o['t1'])] for o in obs_test.data] D1L = [D_fit[2 * sites.index(o['t1']) + 1] for o in obs_test.data] D2R = [D_fit[2 * sites.index(o['t2'])] for o in obs_test.data] D2L = [D_fit[2 * sites.index(o['t2']) + 1] for o in obs_test.data] lrll = obs_test.data['lrvis'] / obs_test.data['llvis'] lrrr = obs_test.data['lrvis'] / obs_test.data['rrvis'] rlll = obs_test.data['rlvis'] / obs_test.data['llvis'] rlrr = obs_test.data['rlvis'] / obs_test.data['rrvis'] lrll_sigma = obs_test.data['lrsigma'] / np.abs(obs_test.data['llvis']) lrrr_sigma = obs_test.data['lrsigma'] / np.abs(obs_test.data['rrvis']) rlll_sigma = obs_test.data['rlsigma'] / np.abs(obs_test.data['llvis']) rlrr_sigma = obs_test.data['rlsigma'] / np.abs(obs_test.data['rrvis']) lrll_model = np.conjugate(fpol_model) * (1.0 + cpol_model) + D1L * np.exp(-2j * fr1) * (1.0 + 2.0 * cpol_model) + np.conjugate(D2R) * np.exp(-2j * fr2) lrrr_model = np.conjugate(fpol_model) * (1.0 - cpol_model) + D1L * np.exp(-2j * fr1) + np.conjugate(D2R) * np.exp(-2j * fr2) * (1.0 - 2.0 * cpol_model) rlll_model = fpol_model * (1.0 + cpol_model) + D1R * np.exp(2j * fr1) + np.conjugate(D2L) * np.exp(2j * fr2) * (1.0 + 2.0 * cpol_model) rlrr_model = fpol_model * (1.0 - cpol_model) + D1R * np.exp(2j * fr1) * (1.0 - 2.0 * cpol_model) + np.conjugate(D2L) * np.exp(2j * fr2) chisq = np.concatenate([np.abs((lrll - lrll_model) / lrll_sigma) ** 2, np.abs((lrrr - lrrr_model) / lrrr_sigma) ** 2, np.abs((rlll - rlll_model) / rlll_sigma) ** 2, np.abs((rlrr - rlrr_model) / rlrr_sigma) ** 2]) chisq = chisq[~np.isnan(chisq)] chisq_new = np.mean(chisq) else: fpol_model = D_fit[-2] cpol_model = np.real(D_fit[-1]) fpol_data_1 = 2.0 * obs_test.data['rlvis'] / (obs_test.data['rrvis'] + obs_test.data['llvis']) fpol_data_2 = 2.0 * np.conj(obs_test.data['lrvis'] / (obs_test.data['rrvis'] + obs_test.data['llvis'])) fpol_sigma_1 = 2.0 / np.abs(obs_test.data['rrvis'] + obs_test.data['llvis']) * obs_test.data['rlsigma'] fpol_sigma_2 = 2.0 / np.abs(obs_test.data['rrvis'] + obs_test.data['llvis']) * obs_test.data['lrsigma'] chisq_new = 0.5 * np.mean(np.abs((fpol_model - fpol_data_1) / fpol_sigma_1) ** 2 + np.abs((fpol_model - fpol_data_2) / fpol_sigma_2) ** 2) else: chisq_RR = chisq_LL = chisq_RL = chisq_LR = 0.0 if 'RR' in pol_fit: chisq_RR = iu.chisq(im_circ.rrvec, ARR, obs_test.unpack_dat(obs_test.data, ['rr' + dtype])['rr' + dtype], obs_test.data['rrsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'LL' in pol_fit: chisq_LL = iu.chisq(im_circ.llvec, ALL, obs_test.unpack_dat(obs_test.data, ['ll' + dtype])['ll' + dtype], obs_test.data['llsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'RL' in pol_fit: chisq_RL = iu.chisq(im_circ.rlvec, ARL, obs_test.unpack_dat(obs_test.data, ['rl' + dtype])['rl' + dtype], obs_test.data['rlsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'LR' in pol_fit: chisq_LR = iu.chisq(im_circ.lrvec, ALR, obs_test.unpack_dat(obs_test.data, ['lr' + dtype])['lr' + dtype], obs_test.data['lrsigma'], dtype=dtype, ttype=ttype, mask=mask) chisq_new = (chisq_RR + chisq_LL + chisq_RL + chisq_LR) / len(pol_fit) else: if const_fpol: if inverse: fpol_model = D_fit * 0[-2] cpol_model = np.real(D_fit * 0[-1]) D1R = [D_fit * 0[2 * sites.index(o['t1'])] for o in obs.switch_polrep('circ').data] D1L = [D_fit * 0[2 * sites.index(o['t1']) + 1] for o in obs.switch_polrep('circ').data] D2R = [D_fit * 0[2 * sites.index(o['t2'])] for o in obs.switch_polrep('circ').data] D2L = [D_fit * 0[2 * sites.index(o['t2']) + 1] for o in obs.switch_polrep('circ').data] lrll = obs.switch_polrep('circ').data['lrvis'] / obs.switch_polrep('circ').data['llvis'] lrrr = obs.switch_polrep('circ').data['lrvis'] / obs.switch_polrep('circ').data['rrvis'] rlll = obs.switch_polrep('circ').data['rlvis'] / obs.switch_polrep('circ').data['llvis'] rlrr = obs.switch_polrep('circ').data['rlvis'] / obs.switch_polrep('circ').data['rrvis'] lrll_sigma = obs.switch_polrep('circ').data['lrsigma'] / np.abs(obs.switch_polrep('circ').data['llvis']) lrrr_sigma = obs.switch_polrep('circ').data['lrsigma'] / np.abs(obs.switch_polrep('circ').data['rrvis']) rlll_sigma = obs.switch_polrep('circ').data['rlsigma'] / np.abs(obs.switch_polrep('circ').data['llvis']) rlrr_sigma = obs.switch_polrep('circ').data['rlsigma'] / np.abs(obs.switch_polrep('circ').data['rrvis']) lrll_model = np.conjugate(fpol_model) * (1.0 + cpol_model) + D1L * np.exp(-2j * fr1) * (1.0 + 2.0 * cpol_model) + np.conjugate(D2R) * np.exp(-2j * fr2) lrrr_model = np.conjugate(fpol_model) * (1.0 - cpol_model) + D1L * np.exp(-2j * fr1) + np.conjugate(D2R) * np.exp(-2j * fr2) * (1.0 - 2.0 * cpol_model) rlll_model = fpol_model * (1.0 + cpol_model) + D1R * np.exp(2j * fr1) + np.conjugate(D2L) * np.exp(2j * fr2) * (1.0 + 2.0 * cpol_model) rlrr_model = fpol_model * (1.0 - cpol_model) + D1R * np.exp(2j * fr1) * (1.0 - 2.0 * cpol_model) + np.conjugate(D2L) * np.exp(2j * fr2) chisq = np.concatenate([np.abs((lrll - lrll_model) / lrll_sigma) ** 2, np.abs((lrrr - lrrr_model) / lrrr_sigma) ** 2, np.abs((rlll - rlll_model) / rlll_sigma) ** 2, np.abs((rlrr - rlrr_model) / rlrr_sigma) ** 2]) chisq = chisq[~np.isnan(chisq)] chisq_orig = np.mean(chisq) else: fpol_model = D_fit * 0[-2] cpol_model = np.real(D_fit * 0[-1]) fpol_data_1 = 2.0 * obs.switch_polrep('circ').data['rlvis'] / (obs.switch_polrep('circ').data['rrvis'] + obs.switch_polrep('circ').data['llvis']) fpol_data_2 = 2.0 * np.conj(obs.switch_polrep('circ').data['lrvis'] / (obs.switch_polrep('circ').data['rrvis'] + obs.switch_polrep('circ').data['llvis'])) fpol_sigma_1 = 2.0 / np.abs(obs.switch_polrep('circ').data['rrvis'] + obs.switch_polrep('circ').data['llvis']) * obs.switch_polrep('circ').data['rlsigma'] fpol_sigma_2 = 2.0 / np.abs(obs.switch_polrep('circ').data['rrvis'] + obs.switch_polrep('circ').data['llvis']) * obs.switch_polrep('circ').data['lrsigma'] chisq_orig = 0.5 * np.mean(np.abs((fpol_model - fpol_data_1) / fpol_sigma_1) ** 2 + np.abs((fpol_model - fpol_data_2) / fpol_sigma_2) ** 2) else: chisq_RR = chisq_LL = chisq_RL = chisq_LR = 0.0 if 'RR' in pol_fit: chisq_RR = iu.chisq(im_circ.rrvec, ARR, obs_test.unpack_dat(obs.switch_polrep('circ').data, ['rr' + dtype])['rr' + dtype], obs.switch_polrep('circ').data['rrsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'LL' in pol_fit: chisq_LL = iu.chisq(im_circ.llvec, ALL, obs_test.unpack_dat(obs.switch_polrep('circ').data, ['ll' + dtype])['ll' + dtype], obs.switch_polrep('circ').data['llsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'RL' in pol_fit: chisq_RL = iu.chisq(im_circ.rlvec, ARL, obs_test.unpack_dat(obs.switch_polrep('circ').data, ['rl' + dtype])['rl' + dtype], obs.switch_polrep('circ').data['rlsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'LR' in pol_fit: chisq_LR = iu.chisq(im_circ.lrvec, ALR, obs_test.unpack_dat(obs.switch_polrep('circ').data, ['lr' + dtype])['lr' + dtype], obs.switch_polrep('circ').data['lrsigma'], dtype=dtype, ttype=ttype, mask=mask) chisq_orig = (chisq_RR + chisq_LL + chisq_RL + chisq_LR) / len(pol_fit) if const_fpol: if inverse: fpol_model = D_fit[-2] cpol_model = np.real(D_fit[-1]) D1R = [D_fit[2 * sites.index(o['t1'])] for o in obs.switch_polrep('circ').data] D1L = [D_fit[2 * sites.index(o['t1']) + 1] for o in obs.switch_polrep('circ').data] D2R = [D_fit[2 * sites.index(o['t2'])] for o in obs.switch_polrep('circ').data] D2L = [D_fit[2 * sites.index(o['t2']) + 1] for o in obs.switch_polrep('circ').data] lrll = obs.switch_polrep('circ').data['lrvis'] / obs.switch_polrep('circ').data['llvis'] lrrr = obs.switch_polrep('circ').data['lrvis'] / obs.switch_polrep('circ').data['rrvis'] rlll = obs.switch_polrep('circ').data['rlvis'] / obs.switch_polrep('circ').data['llvis'] rlrr = obs.switch_polrep('circ').data['rlvis'] / obs.switch_polrep('circ').data['rrvis'] lrll_sigma = obs.switch_polrep('circ').data['lrsigma'] / np.abs(obs.switch_polrep('circ').data['llvis']) lrrr_sigma = obs.switch_polrep('circ').data['lrsigma'] / np.abs(obs.switch_polrep('circ').data['rrvis']) rlll_sigma = obs.switch_polrep('circ').data['rlsigma'] / np.abs(obs.switch_polrep('circ').data['llvis']) rlrr_sigma = obs.switch_polrep('circ').data['rlsigma'] / np.abs(obs.switch_polrep('circ').data['rrvis']) lrll_model = np.conjugate(fpol_model) * (1.0 + cpol_model) + D1L * np.exp(-2j * fr1) * (1.0 + 2.0 * cpol_model) + np.conjugate(D2R) * np.exp(-2j * fr2) lrrr_model = np.conjugate(fpol_model) * (1.0 - cpol_model) + D1L * np.exp(-2j * fr1) + np.conjugate(D2R) * np.exp(-2j * fr2) * (1.0 - 2.0 * cpol_model) rlll_model = fpol_model * (1.0 + cpol_model) + D1R * np.exp(2j * fr1) + np.conjugate(D2L) * np.exp(2j * fr2) * (1.0 + 2.0 * cpol_model) rlrr_model = fpol_model * (1.0 - cpol_model) + D1R * np.exp(2j * fr1) * (1.0 - 2.0 * cpol_model) + np.conjugate(D2L) * np.exp(2j * fr2) chisq = np.concatenate([np.abs((lrll - lrll_model) / lrll_sigma) ** 2, np.abs((lrrr - lrrr_model) / lrrr_sigma) ** 2, np.abs((rlll - rlll_model) / rlll_sigma) ** 2, np.abs((rlrr - rlrr_model) / rlrr_sigma) ** 2]) chisq = chisq[~np.isnan(chisq)] chisq_new = np.mean(chisq) else: fpol_model = D_fit[-2] cpol_model = np.real(D_fit[-1]) fpol_data_1 = 2.0 * obs.switch_polrep('circ').data['rlvis'] / (obs.switch_polrep('circ').data['rrvis'] + obs.switch_polrep('circ').data['llvis']) fpol_data_2 = 2.0 * np.conj(obs.switch_polrep('circ').data['lrvis'] / (obs.switch_polrep('circ').data['rrvis'] + obs.switch_polrep('circ').data['llvis'])) fpol_sigma_1 = 2.0 / np.abs(obs.switch_polrep('circ').data['rrvis'] + obs.switch_polrep('circ').data['llvis']) * obs.switch_polrep('circ').data['rlsigma'] fpol_sigma_2 = 2.0 / np.abs(obs.switch_polrep('circ').data['rrvis'] + obs.switch_polrep('circ').data['llvis']) * obs.switch_polrep('circ').data['lrsigma'] chisq_new = 0.5 * np.mean(np.abs((fpol_model - fpol_data_1) / fpol_sigma_1) ** 2 + np.abs((fpol_model - fpol_data_2) / fpol_sigma_2) ** 2) else: chisq_RR = chisq_LL = chisq_RL = chisq_LR = 0.0 if 'RR' in pol_fit: chisq_RR = iu.chisq(im_circ.rrvec, ARR, obs_test.unpack_dat(obs.switch_polrep('circ').data, ['rr' + dtype])['rr' + dtype], obs.switch_polrep('circ').data['rrsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'LL' in pol_fit: chisq_LL = iu.chisq(im_circ.llvec, ALL, obs_test.unpack_dat(obs.switch_polrep('circ').data, ['ll' + dtype])['ll' + dtype], obs.switch_polrep('circ').data['llsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'RL' in pol_fit: chisq_RL = iu.chisq(im_circ.rlvec, ARL, obs_test.unpack_dat(obs.switch_polrep('circ').data, ['rl' + dtype])['rl' + dtype], obs.switch_polrep('circ').data['rlsigma'], dtype=dtype, ttype=ttype, mask=mask) if 'LR' in pol_fit: chisq_LR = iu.chisq(im_circ.lrvec, ALR, obs_test.unpack_dat(obs.switch_polrep('circ').data, ['lr' + dtype])['lr' + dtype], obs.switch_polrep('circ').data['lrsigma'], dtype=dtype, ttype=ttype, mask=mask) chisq_new = (chisq_RR + chisq_LL + chisq_RL + chisq_LR) / len(pol_fit) print('Original chi-squared: {:.4f}'.format(chisq_orig)) print('New chi-squared: {:.4f}\n'.format(chisq_new)) for isite in range(len(sites)): print(sites[isite]) print(' D_R: {:.4f}'.format(D_fit[2 * isite])) print(' D_L: {:.4f}\n'.format(D_fit[2 * isite + 1])) if const_fpol: print('Source Fractional Polarization Magnitude: {:.4f}'.format(np.abs(D_fit[-2]))) print('Source Fractional Polarization EVPA [deg]: {:.4f}\n'.format(90.0 / np.pi * np.angle(D_fit[-2]))) if inverse: print('Source Fractional Circular Polarization: {:.4f}'.format(np.real(D_fit[-1]))) tstop = time.time() print('\nleakage_cal time: %f s' % (tstop - tstart)) if obs_apply is not False: obs_test = obs_apply.copy() obs_test.tarr['dr'] *= 0.0 obs_test.tarr['dl'] *= 0.0 for isite in range(len(sites)): if sites[isite] in list(obs_test.tarr['site']): i_site = list(obs_test.tarr['site']).index(sites[isite]) obs_test.tarr['dr'][i_site] = D_fit[2 * isite] obs_test.tarr['dl'][i_site] = D_fit[2 * isite + 1] obs_test.data = simobs.apply_jones_inverse(obs_test, dcal=False, verbose=False) obs_test.dcal = True for j in range(len(obs_test.tarr)): if obs_test.tarr[j]['site'] in sites: continue obs_test.tarr[j]['dr'] = obs_apply.tarr[j]['dr'] obs_test.tarr[j]['dl'] = obs_apply.tarr[j]['dl'] else: obs_test = obs_test.switch_polrep(obs.polrep) if not const_fpol: return obs_test elif inverse: return [obs_test, D_fit[-2], D_fit[-1]] else: return [obs_test, D_fit[-2]]
eht-imaging
positive
def test_keyword_arg_lambda_method(self): """Test with lambda methods instead of usual methods.""" (typo, good) = ('abcdf', 'abcdef') sugg = quote(good) code = 'class MyClass:\n\tfunc = lambda self, ' + good + ': None\nMyClass().func({0}=1)' <DeepExtract> (bad_code, good_code) = [code.format(arg) for arg in args] </DeepExtract> <DeepExtract> sugg = sorted(listify(sugg, [], str)) (error_type, error_msg) = UNEXPECTEDKWARG details = 'Running following code :\n---\n{0}\n---'.format(bad_code) if PythonEnvRange(version_range, interpreters).contains_current_env(): exc = get_exception(bad_code) self.assertFalse(exc is None, 'No exc thrown.' + details) (type_caught, value, traceback) = exc suggestions = sorted(get_suggestions_for_exception(value, traceback)) self.log_exception(bad_code, exc, suggestions) self.assertTrue(isinstance(value, type_caught)) self.assertTrue(issubclass(type_caught, error_type), '{0} ({1}) not a subclass of {2}'.format(type_caught, value, error_type) + details) msg = next((a for a in value.args if isinstance(a, str)), '') if error_msg: self.assertRegexp(msg, error_msg, details) self.assertEqual(suggestions, sugg, details) </DeepExtract> <DeepExtract> details = 'Running following code :\n---\n{0}\n---'.format(good_code) if PythonEnvRange(version_range, interpreters).contains_current_env(): exc = get_exception(good_code) self.assertTrue(exc is None, 'Exc thrown : ' + str(exc) + details) </DeepExtract>
def test_keyword_arg_lambda_method(self): """Test with lambda methods instead of usual methods.""" (typo, good) = ('abcdf', 'abcdef') sugg = quote(good) code = 'class MyClass:\n\tfunc = lambda self, ' + good + ': None\nMyClass().func({0}=1)' (bad_code, good_code) = [code.format(arg) for arg in args] sugg = sorted(listify(sugg, [], str)) (error_type, error_msg) = UNEXPECTEDKWARG details = 'Running following code :\n---\n{0}\n---'.format(bad_code) if PythonEnvRange(version_range, interpreters).contains_current_env(): exc = get_exception(bad_code) self.assertFalse(exc is None, 'No exc thrown.' + details) (type_caught, value, traceback) = exc suggestions = sorted(get_suggestions_for_exception(value, traceback)) self.log_exception(bad_code, exc, suggestions) self.assertTrue(isinstance(value, type_caught)) self.assertTrue(issubclass(type_caught, error_type), '{0} ({1}) not a subclass of {2}'.format(type_caught, value, error_type) + details) msg = next((a for a in value.args if isinstance(a, str)), '') if error_msg: self.assertRegexp(msg, error_msg, details) self.assertEqual(suggestions, sugg, details) details = 'Running following code :\n---\n{0}\n---'.format(good_code) if PythonEnvRange(version_range, interpreters).contains_current_env(): exc = get_exception(good_code) self.assertTrue(exc is None, 'Exc thrown : ' + str(exc) + details) </DeepExtract>
DidYouMean-Python
positive
def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing the predicted logits for each class target_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing one-hot encoded classification targets weights: a float tensor of shape [batch_size, num_anchors] Returns: loss: a float tensor of shape [batch_size, num_anchors, num_classes] representing the value of the loss function. """ if self._bootstrap_type == 'soft': bootstrap_target_tensor = self._alpha * target_tensor + (1.0 - self._alpha) * torch.sigmoid(prediction_tensor) else: bootstrap_target_tensor = self._alpha * target_tensor + (1.0 - self._alpha) * (torch.sigmoid(prediction_tensor) > 0.5).float() <DeepExtract> loss = torch.clamp(prediction_tensor, min=0) - prediction_tensor * bootstrap_target_tensor.type_as(prediction_tensor) loss += torch.log1p(torch.exp(-torch.abs(prediction_tensor))) loss_mask = loss < 10000 loss_mask = loss_mask.type(torch.FloatTensor).cuda() loss = loss * loss_mask per_entry_cross_ent = loss </DeepExtract> return per_entry_cross_ent * weights.unsqueeze(2)
def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing the predicted logits for each class target_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing one-hot encoded classification targets weights: a float tensor of shape [batch_size, num_anchors] Returns: loss: a float tensor of shape [batch_size, num_anchors, num_classes] representing the value of the loss function. """ if self._bootstrap_type == 'soft': bootstrap_target_tensor = self._alpha * target_tensor + (1.0 - self._alpha) * torch.sigmoid(prediction_tensor) else: bootstrap_target_tensor = self._alpha * target_tensor + (1.0 - self._alpha) * (torch.sigmoid(prediction_tensor) > 0.5).float() loss = torch.clamp(prediction_tensor, min=0) - prediction_tensor * bootstrap_target_tensor.type_as(prediction_tensor) loss += torch.log1p(torch.exp(-torch.abs(prediction_tensor))) loss_mask = loss < 10000 loss_mask = loss_mask.type(torch.FloatTensor).cuda() loss = loss * loss_mask per_entry_cross_ent = loss return per_entry_cross_ent * weights.unsqueeze(2)
CLOCs
positive
def corners_nd(dims, origin=0.5): """generate relative box corners based on length per dim and origin point. Args: dims (float array, shape=[N, ndim]): array of length per dim origin (list or array or float): origin point relate to smallest point. dtype (output dtype, optional): Defaults to np.float32 Returns: float array, shape=[N, 2 ** ndim, ndim]: returned corners. point layout example: (2d) x0y0, x0y1, x1y0, x1y1; (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 where x0 < x1, y0 < y1, z0 < z1 """ ndim = int(dims.shape[1]) <DeepExtract> type_map = {torch.float16: np.dtype(np.float16), torch.float32: np.dtype(np.float32), torch.float16: np.dtype(np.float64), torch.int32: np.dtype(np.int32), torch.int64: np.dtype(np.int64), torch.uint8: np.dtype(np.uint8)} dtype = type_map[dims.dtype] </DeepExtract> if isinstance(origin, float): origin = [origin] * ndim corners_norm = np.stack(np.unravel_index(np.arange(2 ** ndim), [2] * ndim), axis=1).astype(dtype) if ndim == 2: corners_norm = corners_norm[[0, 1, 3, 2]] elif ndim == 3: corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dtype) corners_norm = torch.from_numpy(corners_norm).type_as(dims) corners = dims.view(-1, 1, ndim) * corners_norm.view(1, 2 ** ndim, ndim) return corners
def corners_nd(dims, origin=0.5): """generate relative box corners based on length per dim and origin point. Args: dims (float array, shape=[N, ndim]): array of length per dim origin (list or array or float): origin point relate to smallest point. dtype (output dtype, optional): Defaults to np.float32 Returns: float array, shape=[N, 2 ** ndim, ndim]: returned corners. point layout example: (2d) x0y0, x0y1, x1y0, x1y1; (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 where x0 < x1, y0 < y1, z0 < z1 """ ndim = int(dims.shape[1]) type_map = {torch.float16: np.dtype(np.float16), torch.float32: np.dtype(np.float32), torch.float16: np.dtype(np.float64), torch.int32: np.dtype(np.int32), torch.int64: np.dtype(np.int64), torch.uint8: np.dtype(np.uint8)} dtype = type_map[dims.dtype] if isinstance(origin, float): origin = [origin] * ndim corners_norm = np.stack(np.unravel_index(np.arange(2 ** ndim), [2] * ndim), axis=1).astype(dtype) if ndim == 2: corners_norm = corners_norm[[0, 1, 3, 2]] elif ndim == 3: corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] corners_norm = corners_norm - np.array(origin, dtype=dtype) corners_norm = torch.from_numpy(corners_norm).type_as(dims) corners = dims.view(-1, 1, ndim) * corners_norm.view(1, 2 ** ndim, ndim) return corners
CenterPoint
positive
def convert_reg_contents(registers): for reg in registers: if reg[0] == 'F' and (not type(registers[reg]) is str): if registers[reg] != 0: <DeepExtract> registers[reg] = hex(struct.unpack('<I', struct.pack('<f', registers[reg]))[0]) </DeepExtract> pass else: hex_list = hex(int(registers[reg])).split('x') hex_list[1] = hex_list[1].upper() if '-' in hex_list[0]: registers[reg] = '-' + hex_list[1] else: registers[reg] = hex_list[1]
def convert_reg_contents(registers): for reg in registers: if reg[0] == 'F' and (not type(registers[reg]) is str): if registers[reg] != 0: registers[reg] = hex(struct.unpack('<I', struct.pack('<f', registers[reg]))[0]) pass else: hex_list = hex(int(registers[reg])).split('x') hex_list[1] = hex_list[1].upper() if '-' in hex_list[0]: registers[reg] = '-' + hex_list[1] else: registers[reg] = hex_list[1]
Emu86
positive
def preprocess(self, docs, labels, attribute): t = Tokenizer(num_words=20000, filters='!"#$%&()*+,-./:;<=>?@[\\]^`{|}~\t\n') t.fit_on_texts(docs) encoded_docs = t.texts_to_sequences(docs) print('BEFORE Pruning:') <DeepExtract> result = [len(x) for x in encoded_docs] print('Min=%d, Mean=%d, Max=%d' % (numpy.min(result), numpy.mean(result), numpy.max(result))) data_max_seq_length = numpy.max(result) + 1 if type(attribute) == list: for (idx, attribute) in enumerate(attribute): Y = labels[:, idx] self.get_single_statistics(Y, attribute) elif type(attribute) == str: self.get_single_statistics(labels, attribute) return data_max_seq_length </DeepExtract> idx2word = {v: k for (k, v) in t.word_index.items()} stopwrd = set(stopwords.words('english')) def abbreviation_handler(text): ln = text.lower() ln = ln.replace("'t", ' not') ln = ln.replace("'s", ' is') ln = ln.replace("'ll", ' will') ln = ln.replace("'ve", ' have') ln = ln.replace("'re", ' are') ln = ln.replace("'m", ' am') ln = ln.replace("'", ' ') return ln def stopwords_handler(text): words = text.split() new_words = [w for w in words if w not in stopwrd] return ' '.join(new_words) def sequence_to_text(listOfSequences): tokenized_list = [] for text in listOfSequences: newText = '' for num in text: newText += idx2word[num] + ' ' <DeepExtract> ln = newText.lower() ln = ln.replace("'t", ' not') ln = ln.replace("'s", ' is') ln = ln.replace("'ll", ' will') ln = ln.replace("'ve", ' have') ln = ln.replace("'re", ' are') ln = ln.replace("'m", ' am') ln = ln.replace("'", ' ') newText = ln </DeepExtract> <DeepExtract> words = newText.split() new_words = [w for w in words if w not in stopwrd] newText = ' '.join(new_words) </DeepExtract> tokenized_list.append(newText) return tokenized_list <DeepExtract> tokenized_list = [] for text in encoded_docs: newText = '' for num in text: newText += idx2word[num] + ' ' newText = abbreviation_handler(newText) newText = stopwords_handler(newText) tokenized_list.append(newText) newLists = tokenized_list </DeepExtract> return newLists
def preprocess(self, docs, labels, attribute): t = Tokenizer(num_words=20000, filters='!"#$%&()*+,-./:;<=>?@[\\]^`{|}~\t\n') t.fit_on_texts(docs) encoded_docs = t.texts_to_sequences(docs) print('BEFORE Pruning:') result = [len(x) for x in encoded_docs] print('Min=%d, Mean=%d, Max=%d' % (numpy.min(result), numpy.mean(result), numpy.max(result))) data_max_seq_length = numpy.max(result) + 1 if type(attribute) == list: for (idx, attribute) in enumerate(attribute): Y = labels[:, idx] self.get_single_statistics(Y, attribute) elif type(attribute) == str: self.get_single_statistics(labels, attribute) return data_max_seq_length idx2word = {v: k for (k, v) in t.word_index.items()} stopwrd = set(stopwords.words('english')) def abbreviation_handler(text): ln = text.lower() ln = ln.replace("'t", ' not') ln = ln.replace("'s", ' is') ln = ln.replace("'ll", ' will') ln = ln.replace("'ve", ' have') ln = ln.replace("'re", ' are') ln = ln.replace("'m", ' am') ln = ln.replace("'", ' ') return ln def stopwords_handler(text): words = text.split() new_words = [w for w in words if w not in stopwrd] return ' '.join(new_words) def sequence_to_text(listOfSequences): tokenized_list = [] for text in listOfSequences: newText = '' for num in text: newText += idx2word[num] + ' ' ln = newText.lower() ln = ln.replace("'t", ' not') ln = ln.replace("'s", ' is') ln = ln.replace("'ll", ' will') ln = ln.replace("'ve", ' have') ln = ln.replace("'re", ' are') ln = ln.replace("'m", ' am') ln = ln.replace("'", ' ') newText = ln words = newText.split() new_words = [w for w in words if w not in stopwrd] newText = ' '.join(new_words) tokenized_list.append(newText) return tokenized_list tokenized_list = [] for text in encoded_docs: newText = '' for num in text: newText += idx2word[num] + ' ' newText = abbreviation_handler(newText) newText = stopwords_handler(newText) tokenized_list.append(newText) newLists = tokenized_list return newLists
automatic-personality-prediction
positive
def test_copy_entry_point(self): <DeepExtract> self.get_original_lines = mock.patch.object(models, 'get_original_lines', autospec=True, return_value=['PARAMS_FILE_NAME = not this', 'do not change']).start() self.open = mock.mock_open() mock.patch('builtins.open', self.open).start() </DeepExtract> file_id = 'file_id' params_file = 'params_file' models.copy_entry_point(file_id, params_file) self.open.assert_called_once_with(models._ENTRY_POINT_FORMAT.format(file_id), 'w') entry_file = self.open() entry_file.write.assert_any_call(f"PARAMS_FILE_NAME = '{params_file}'\n") entry_file.write.assert_any_call('do not change')
def test_copy_entry_point(self): self.get_original_lines = mock.patch.object(models, 'get_original_lines', autospec=True, return_value=['PARAMS_FILE_NAME = not this', 'do not change']).start() self.open = mock.mock_open() mock.patch('builtins.open', self.open).start() file_id = 'file_id' params_file = 'params_file' models.copy_entry_point(file_id, params_file) self.open.assert_called_once_with(models._ENTRY_POINT_FORMAT.format(file_id), 'w') entry_file = self.open() entry_file.write.assert_any_call(f"PARAMS_FILE_NAME = '{params_file}'\n") entry_file.write.assert_any_call('do not change')
cloud
positive
@pytest.mark.parametrize('value, expected', [('xxxayyy', 'xxxay'), ('cxa', 'cwzzz')]) def test_min_max_length_get_less_than(value, expected): <DeepExtract> sg = StringGenerator() sg.options['min_length'] = 3 sg.options['max_length'] = 5 sg.options['min_value'] = 'a' sg.options['max_value'] = 'z' sg.options['restrictions'] = 'bd' sg.prepare() sg = sg </DeepExtract> assert sg.get_less_than(value) == expected
@pytest.mark.parametrize('value, expected', [('xxxayyy', 'xxxay'), ('cxa', 'cwzzz')]) def test_min_max_length_get_less_than(value, expected): sg = StringGenerator() sg.options['min_length'] = 3 sg.options['max_length'] = 5 sg.options['min_value'] = 'a' sg.options['max_value'] = 'z' sg.options['restrictions'] = 'bd' sg.prepare() sg = sg assert sg.get_less_than(value) == expected
acsploit
positive
def div_q(a: ElementModPOrQorInt, b: ElementModPOrQorInt) -> ElementModQ: """Compute a/b mod q.""" <DeepExtract> if isinstance(b, BaseElement): b = b.value b = mpz(b) </DeepExtract> inverse = invert(b, _get_mpz(get_small_prime())) return mult_q(a, inverse)
def div_q(a: ElementModPOrQorInt, b: ElementModPOrQorInt) -> ElementModQ: """Compute a/b mod q.""" if isinstance(b, BaseElement): b = b.value b = mpz(b) inverse = invert(b, _get_mpz(get_small_prime())) return mult_q(a, inverse)
electionguard-python
positive
@unittest.skip('temporarily disabled') def test_bill_indexes(): parser = argparse.ArgumentParser(description='generic billy util') subparsers = parser.add_subparsers(dest='subcommand') class StubObj(object): collections = ['bills', 'votes'] db.create_collection('bills') db.create_collection('votes') MongoIndex(subparsers).handle(StubObj()) <DeepExtract> try: index = db.bills.find({'state': 'ex'}).explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if name_piece: assert name_piece in index, '%s not used %s' % (name_piece, index) </DeepExtract> <DeepExtract> try: index = db.bills.find({'state': 'ex', 'chamber': 'lower'}).explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if name_piece: assert name_piece in index, '%s not used %s' % (name_piece, index) </DeepExtract> <DeepExtract> try: index = db.bills.find({'state': 'ex', 'chamber': 'lower', 'bill_id': 'HB 27'}).explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if name_piece: assert name_piece in index, '%s not used %s' % (name_piece, index) </DeepExtract> <DeepExtract> try: index = db.bills.find({'state': 'ex', '_id': 'XYZ'}).explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if name_piece: assert name_piece in index, '%s not used %s' % (name_piece, index) </DeepExtract> <DeepExtract> try: index = db.bills.find({'versions.doc_id': 'XYZ'}).sort('created_at').explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if name_piece: assert name_piece in index, '%s not used %s' % (name_piece, index) </DeepExtract> <DeepExtract> try: index = db.bills.find({'versions.doc_id': 'XYZ'}).sort('updated_at').explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if name_piece: assert name_piece in index, '%s not used %s' % (name_piece, index) </DeepExtract> <DeepExtract> try: index = db.bills.find({'versions.doc_id': 'XYZ'}).sort('action_dates.last').explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if name_piece: assert name_piece in index, '%s not used %s' % (name_piece, index) </DeepExtract> <DeepExtract> try: index = db.bills.find({'state': 'ex', 'subjects': 'test'}).explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if 'subjects': assert 'subjects' in index, '%s not used %s' % ('subjects', index) </DeepExtract> <DeepExtract> try: index = db.bills.find({'state': 'ex', 'subjects': 'test'}).sort('action_dates.last').explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if 'subjects': assert 'subjects' in index, '%s not used %s' % ('subjects', index) </DeepExtract> <DeepExtract> try: index = db.bills.find({'state': 'ex'}).sort('action_dates.first').explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if 'first': assert 'first' in index, '%s not used %s' % ('first', index) </DeepExtract> <DeepExtract> try: index = db.bills.find({'state': 'ex'}).sort('action_dates.last').explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if 'last': assert 'last' in index, '%s not used %s' % ('last', index) </DeepExtract> <DeepExtract> try: index = db.bills.find({'state': 'ex'}).sort('action_dates.passed_upper').explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if 'passed_upper': assert 'passed_upper' in index, '%s not used %s' % ('passed_upper', index) </DeepExtract> <DeepExtract> try: index = db.bills.find({'state': 'ex'}).sort('action_dates.passed_lower').explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if 'passed_lower': assert 'passed_lower' in index, '%s not used %s' % ('passed_lower', index) </DeepExtract> <DeepExtract> try: index = db.votes.find({'bill_id': 'XYZ'}).explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if name_piece: assert name_piece in index, '%s not used %s' % (name_piece, index) </DeepExtract> <DeepExtract> try: index = db.votes.find({'bill_id': 'XYZ', 'date': 123}).explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if name_piece: assert name_piece in index, '%s not used %s' % (name_piece, index) </DeepExtract> <DeepExtract> try: index = db.votes.find({'_voters': 'XYZ'}).explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if name_piece: assert name_piece in index, '%s not used %s' % (name_piece, index) </DeepExtract> <DeepExtract> try: index = db.votes.find({'_voters': 'XYZ', 'date': 123}).explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if name_piece: assert name_piece in index, '%s not used %s' % (name_piece, index) </DeepExtract>
@unittest.skip('temporarily disabled') def test_bill_indexes(): parser = argparse.ArgumentParser(description='generic billy util') subparsers = parser.add_subparsers(dest='subcommand') class StubObj(object): collections = ['bills', 'votes'] db.create_collection('bills') db.create_collection('votes') MongoIndex(subparsers).handle(StubObj()) try: index = db.bills.find({'state': 'ex'}).explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if name_piece: assert name_piece in index, '%s not used %s' % (name_piece, index) try: index = db.bills.find({'state': 'ex', 'chamber': 'lower'}).explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if name_piece: assert name_piece in index, '%s not used %s' % (name_piece, index) try: index = db.bills.find({'state': 'ex', 'chamber': 'lower', 'bill_id': 'HB 27'}).explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if name_piece: assert name_piece in index, '%s not used %s' % (name_piece, index) try: index = db.bills.find({'state': 'ex', '_id': 'XYZ'}).explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if name_piece: assert name_piece in index, '%s not used %s' % (name_piece, index) try: index = db.bills.find({'versions.doc_id': 'XYZ'}).sort('created_at').explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if name_piece: assert name_piece in index, '%s not used %s' % (name_piece, index) try: index = db.bills.find({'versions.doc_id': 'XYZ'}).sort('updated_at').explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if name_piece: assert name_piece in index, '%s not used %s' % (name_piece, index) try: index = db.bills.find({'versions.doc_id': 'XYZ'}).sort('action_dates.last').explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if name_piece: assert name_piece in index, '%s not used %s' % (name_piece, index) try: index = db.bills.find({'state': 'ex', 'subjects': 'test'}).explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if 'subjects': assert 'subjects' in index, '%s not used %s' % ('subjects', index) try: index = db.bills.find({'state': 'ex', 'subjects': 'test'}).sort('action_dates.last').explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if 'subjects': assert 'subjects' in index, '%s not used %s' % ('subjects', index) try: index = db.bills.find({'state': 'ex'}).sort('action_dates.first').explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if 'first': assert 'first' in index, '%s not used %s' % ('first', index) try: index = db.bills.find({'state': 'ex'}).sort('action_dates.last').explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if 'last': assert 'last' in index, '%s not used %s' % ('last', index) try: index = db.bills.find({'state': 'ex'}).sort('action_dates.passed_upper').explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if 'passed_upper': assert 'passed_upper' in index, '%s not used %s' % ('passed_upper', index) try: index = db.bills.find({'state': 'ex'}).sort('action_dates.passed_lower').explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if 'passed_lower': assert 'passed_lower' in index, '%s not used %s' % ('passed_lower', index) try: index = db.votes.find({'bill_id': 'XYZ'}).explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if name_piece: assert name_piece in index, '%s not used %s' % (name_piece, index) try: index = db.votes.find({'bill_id': 'XYZ', 'date': 123}).explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if name_piece: assert name_piece in index, '%s not used %s' % (name_piece, index) try: index = db.votes.find({'_voters': 'XYZ'}).explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if name_piece: assert name_piece in index, '%s not used %s' % (name_piece, index) try: index = db.votes.find({'_voters': 'XYZ', 'date': 123}).explain()['queryPlanner']['winningPlan']['inputStage']['indexName'] except KeyError: index = None assert index is not None if name_piece: assert name_piece in index, '%s not used %s' % (name_piece, index) </DeepExtract>
billy
positive
def get_frames(self) -> tuple: """ Collect frames for camera and outputs it in 'color' dictionary ***depth and infrared are not used here*** :return: tuple of three dictionaries: color, depth, infrared """ color_frames = {} depth_maps = {} infra_frames = {} if self._footage_socket: ret = True else: ret = False self.last_frame_time = time.time() if ret: image = self._footage_socket.recv_string() <DeepExtract> img = base64.b64decode(image) npimg = np.fromstring(img, dtype=np.uint8) image = cv2.imdecode(npimg, 1) </DeepExtract> image = cv2.resize(image, RESOLUTION) color_frames[self._camera_name] = image running_time = time.time() - self.last_frame_time if running_time <= 1 / FRAMERATE: sleepy_time = int(np.ceil(1000 / FRAMERATE - running_time / 1000)) cv2.waitKey(sleepy_time) else: raise MissingFrameError('No frame was received from the webcam stream. Make sure that you started streaming on the host machine.') return (color_frames, depth_maps, infra_frames)
def get_frames(self) -> tuple: """ Collect frames for camera and outputs it in 'color' dictionary ***depth and infrared are not used here*** :return: tuple of three dictionaries: color, depth, infrared """ color_frames = {} depth_maps = {} infra_frames = {} if self._footage_socket: ret = True else: ret = False self.last_frame_time = time.time() if ret: image = self._footage_socket.recv_string() img = base64.b64decode(image) npimg = np.fromstring(img, dtype=np.uint8) image = cv2.imdecode(npimg, 1) image = cv2.resize(image, RESOLUTION) color_frames[self._camera_name] = image running_time = time.time() - self.last_frame_time if running_time <= 1 / FRAMERATE: sleepy_time = int(np.ceil(1000 / FRAMERATE - running_time / 1000)) cv2.waitKey(sleepy_time) else: raise MissingFrameError('No frame was received from the webcam stream. Make sure that you started streaming on the host machine.') return (color_frames, depth_maps, infra_frames)
DeepLabStream
positive
def eval_required(node, required_spec, var_status): """ evaluate the condition strings in required_spec to determine if the all the condition strings evaluate to True given the state of variables in var_status. required_spec is the '_required' specification in the format specification for the group. var_status is a dictionary mapping each variable referenced in the required_spec to either True (if the variable is present) or False if it's not. Returns None if all the conditions evaluate to True (e.g. no error) or returns a tuple with (condition_string, error_message) if any conditions evaluate to False (indicating an error). node is the node containing the required_spec which is used to display the path if there in an error. """ subs = {'AND': 'and', 'XOR': '^', 'OR': 'or', 'NOT': 'not'} sv_status = {key: 'True' if var_status[key] else 'False' for key in var_status} subs.update(sv_status) for rid in required_spec.keys(): cm = required_spec[rid] condition_string = cm[0] error_message = cm[1] text = condition_string + ' ' for key in subs: pat = '\\b%s(?=[\\) ])' % key text = re.sub(pat, subs[key], text) try: result = eval(text) except SyntaxError: print('%s Invalid expression for _required clause:' % node.full_path) print(condition_string) print("evaluated as: '%s'" % text) <DeepExtract> if msg: print('** Error: %s' % msg) print('Stack trace follows') print('-------------------') traceback.print_stack() sys.exit(1) </DeepExtract> if not result: return (condition_string, error_message) return None
def eval_required(node, required_spec, var_status): """ evaluate the condition strings in required_spec to determine if the all the condition strings evaluate to True given the state of variables in var_status. required_spec is the '_required' specification in the format specification for the group. var_status is a dictionary mapping each variable referenced in the required_spec to either True (if the variable is present) or False if it's not. Returns None if all the conditions evaluate to True (e.g. no error) or returns a tuple with (condition_string, error_message) if any conditions evaluate to False (indicating an error). node is the node containing the required_spec which is used to display the path if there in an error. """ subs = {'AND': 'and', 'XOR': '^', 'OR': 'or', 'NOT': 'not'} sv_status = {key: 'True' if var_status[key] else 'False' for key in var_status} subs.update(sv_status) for rid in required_spec.keys(): cm = required_spec[rid] condition_string = cm[0] error_message = cm[1] text = condition_string + ' ' for key in subs: pat = '\\b%s(?=[\\) ])' % key text = re.sub(pat, subs[key], text) try: result = eval(text) except SyntaxError: print('%s Invalid expression for _required clause:' % node.full_path) print(condition_string) print("evaluated as: '%s'" % text) if msg: print('** Error: %s' % msg) print('Stack trace follows') print('-------------------') traceback.print_stack() sys.exit(1) if not result: return (condition_string, error_message) return None
api-python
positive
def get_feature_model(name, *args, random_state=None, **kwargs): """Get an instance of a feature extraction model from a string. Arguments --------- name: str Name of the feature extraction model. *args: Arguments for the feature extraction model. **kwargs: Keyword arguments for thefeature extraction model. Returns ------- BaseFeatureExtraction: Initialized instance of feature extraction algorithm. """ <DeepExtract> model_class = _model_class_from_entry_point(name, 'asreview.models.feature_extraction') </DeepExtract> try: return model_class(*args, random_state=random_state, **kwargs) except TypeError: return model_class(*args, **kwargs)
def get_feature_model(name, *args, random_state=None, **kwargs): """Get an instance of a feature extraction model from a string. Arguments --------- name: str Name of the feature extraction model. *args: Arguments for the feature extraction model. **kwargs: Keyword arguments for thefeature extraction model. Returns ------- BaseFeatureExtraction: Initialized instance of feature extraction algorithm. """ model_class = _model_class_from_entry_point(name, 'asreview.models.feature_extraction') try: return model_class(*args, random_state=random_state, **kwargs) except TypeError: return model_class(*args, **kwargs)
asreview
positive
def _make_repr(attrs, ns, cls): <DeepExtract> unique_filename = f"<attrs generated {'repr'} {cls.__module__}.{getattr(cls, '__qualname__', cls.__name__)}>" </DeepExtract> attr_names_with_reprs = tuple(((a.name, repr if a.repr is True else a.repr, a.init) for a in attrs if a.repr is not False)) globs = {name + '_repr': r for (name, r, _) in attr_names_with_reprs if r != repr} globs['_compat'] = _compat globs['AttributeError'] = AttributeError globs['NOTHING'] = NOTHING attribute_fragments = [] for (name, r, i) in attr_names_with_reprs: accessor = 'self.' + name if i else 'getattr(self, "' + name + '", NOTHING)' fragment = '%s={%s!r}' % (name, accessor) if r == repr else '%s={%s_repr(%s)}' % (name, name, accessor) attribute_fragments.append(fragment) repr_fragment = ', '.join(attribute_fragments) if ns is None: cls_name_fragment = '{self.__class__.__qualname__.rsplit(">.", 1)[-1]}' else: cls_name_fragment = ns + '.{self.__class__.__name__}' lines = ['def __repr__(self):', ' try:', ' already_repring = _compat.repr_context.already_repring', ' except AttributeError:', ' already_repring = {id(self),}', ' _compat.repr_context.already_repring = already_repring', ' else:', ' if id(self) in already_repring:', " return '...'", ' else:', ' already_repring.add(id(self))', ' try:', f" return f'{cls_name_fragment}({repr_fragment})'", ' finally:', ' already_repring.remove(id(self))'] return _make_method('__repr__', '\n'.join(lines), unique_filename, globs=globs)
def _make_repr(attrs, ns, cls): unique_filename = f"<attrs generated {'repr'} {cls.__module__}.{getattr(cls, '__qualname__', cls.__name__)}>" attr_names_with_reprs = tuple(((a.name, repr if a.repr is True else a.repr, a.init) for a in attrs if a.repr is not False)) globs = {name + '_repr': r for (name, r, _) in attr_names_with_reprs if r != repr} globs['_compat'] = _compat globs['AttributeError'] = AttributeError globs['NOTHING'] = NOTHING attribute_fragments = [] for (name, r, i) in attr_names_with_reprs: accessor = 'self.' + name if i else 'getattr(self, "' + name + '", NOTHING)' fragment = '%s={%s!r}' % (name, accessor) if r == repr else '%s={%s_repr(%s)}' % (name, name, accessor) attribute_fragments.append(fragment) repr_fragment = ', '.join(attribute_fragments) if ns is None: cls_name_fragment = '{self.__class__.__qualname__.rsplit(">.", 1)[-1]}' else: cls_name_fragment = ns + '.{self.__class__.__name__}' lines = ['def __repr__(self):', ' try:', ' already_repring = _compat.repr_context.already_repring', ' except AttributeError:', ' already_repring = {id(self),}', ' _compat.repr_context.already_repring = already_repring', ' else:', ' if id(self) in already_repring:', " return '...'", ' else:', ' already_repring.add(id(self))', ' try:', f" return f'{cls_name_fragment}({repr_fragment})'", ' finally:', ' already_repring.remove(id(self))'] return _make_method('__repr__', '\n'.join(lines), unique_filename, globs=globs)
attrs
positive
def deprecated_alias(**aliases: str) -> Callable: """ Function decorator to warn about deprecated kwargs (and replace them). """ def deco(f): @functools.wraps(f) def wrapper(*args, **kwargs): <DeepExtract> for (alias, new) in aliases.items(): if alias in kwargs: if new in kwargs: raise ValueError(f"{f.__name__} received both the deprecated kwarg `{alias}` and it's replacement `{new}`.") warnings.warn(f'`{alias}` is deprecated; use `{new}`.', UserWarning, stacklevel=3) kwargs[new] = kwargs.pop(alias) </DeepExtract> return f(*args, **kwargs) return wrapper return deco
def deprecated_alias(**aliases: str) -> Callable: """ Function decorator to warn about deprecated kwargs (and replace them). """ def deco(f): @functools.wraps(f) def wrapper(*args, **kwargs): for (alias, new) in aliases.items(): if alias in kwargs: if new in kwargs: raise ValueError(f"{f.__name__} received both the deprecated kwarg `{alias}` and it's replacement `{new}`.") warnings.warn(f'`{alias}` is deprecated; use `{new}`.', UserWarning, stacklevel=3) kwargs[new] = kwargs.pop(alias) return f(*args, **kwargs) return wrapper return deco
alibi-detect
positive
def test_multi(self): for dtype in [np.int, np.float]: data = ma.arange(12, dtype=dtype) data[::2] = ma.masked <DeepExtract> array = biggus.NumpyArrayAdapter(np.arange(2, dtype=data.reshape(3, 4))) agg = mean(array, axis=0) self.assertEqual(agg.dtype, target) </DeepExtract> data = ma.arange(12, dtype=dtype) data[1::2] = ma.masked <DeepExtract> array = biggus.NumpyArrayAdapter(np.arange(2, dtype=data.reshape(3, 4))) agg = mean(array, axis=0) self.assertEqual(agg.dtype, target) </DeepExtract> data = ma.arange(12, dtype=dtype).reshape(3, 4) data[::2] = ma.masked <DeepExtract> array = biggus.NumpyArrayAdapter(np.arange(2, dtype=data)) agg = mean(array, axis=0) self.assertEqual(agg.dtype, target) </DeepExtract> data = ma.arange(12, dtype=dtype).reshape(3, 4) data[1::2] = ma.masked <DeepExtract> array = biggus.NumpyArrayAdapter(np.arange(2, dtype=data)) agg = mean(array, axis=0) self.assertEqual(agg.dtype, target) </DeepExtract>
def test_multi(self): for dtype in [np.int, np.float]: data = ma.arange(12, dtype=dtype) data[::2] = ma.masked array = biggus.NumpyArrayAdapter(np.arange(2, dtype=data.reshape(3, 4))) agg = mean(array, axis=0) self.assertEqual(agg.dtype, target) data = ma.arange(12, dtype=dtype) data[1::2] = ma.masked array = biggus.NumpyArrayAdapter(np.arange(2, dtype=data.reshape(3, 4))) agg = mean(array, axis=0) self.assertEqual(agg.dtype, target) data = ma.arange(12, dtype=dtype).reshape(3, 4) data[::2] = ma.masked array = biggus.NumpyArrayAdapter(np.arange(2, dtype=data)) agg = mean(array, axis=0) self.assertEqual(agg.dtype, target) data = ma.arange(12, dtype=dtype).reshape(3, 4) data[1::2] = ma.masked array = biggus.NumpyArrayAdapter(np.arange(2, dtype=data)) agg = mean(array, axis=0) self.assertEqual(agg.dtype, target) </DeepExtract>
biggus
positive
def show_unit_items(self, unit): """ [UNIT]... -- show properties of a unit. """ logg.info('try read unit %s', unit) <DeepExtract> conf = self.load_unit_conf(unit) if conf is not None: conf = conf conf = self.default_unit_conf(unit) </DeepExtract> for entry in self.each_unit_items(unit, conf): yield entry
def show_unit_items(self, unit): """ [UNIT]... -- show properties of a unit. """ logg.info('try read unit %s', unit) conf = self.load_unit_conf(unit) if conf is not None: conf = conf conf = self.default_unit_conf(unit) for entry in self.each_unit_items(unit, conf): yield entry
deployment
positive
def string(self): """ checks if a configuration is a string After an instance has been set a configuration key (via `conf`), this method can be used to check if the value (if any) configured with this key is a string. If not, an `ConfluenceConfigurationError` exception will be thrown. In the event that the configuration is not set (e.g. a value of `None`), this method will have no effect. Returns: the validator instance """ <DeepExtract> value = getattr(self.config, self.key) if value is not None and self._translate: value = self._translate(value) if isinstance(value, str) and (not value): value = None value = value </DeepExtract> if value is not None and (not isinstance(value, str)): raise ConfluenceConfigurationError('%s is not a string' % self.key) return self
def string(self): """ checks if a configuration is a string After an instance has been set a configuration key (via `conf`), this method can be used to check if the value (if any) configured with this key is a string. If not, an `ConfluenceConfigurationError` exception will be thrown. In the event that the configuration is not set (e.g. a value of `None`), this method will have no effect. Returns: the validator instance """ value = getattr(self.config, self.key) if value is not None and self._translate: value = self._translate(value) if isinstance(value, str) and (not value): value = None value = value if value is not None and (not isinstance(value, str)): raise ConfluenceConfigurationError('%s is not a string' % self.key) return self
confluencebuilder
positive
def shufflenet_v2_x1_0(num_classes, loss='softmax', pretrained=True, **kwargs): model = ShuffleNetV2(num_classes, loss, [4, 8, 4], [24, 116, 232, 464, 1024], **kwargs) if pretrained: <DeepExtract> if model_urls['shufflenetv2_x1.0'] is None: import warnings warnings.warn('ImageNet pretrained weights are unavailable for this model') return pretrain_dict = model_zoo.load_url(model_urls['shufflenetv2_x1.0']) model_dict = model.state_dict() pretrain_dict = {k: v for (k, v) in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size()} model_dict.update(pretrain_dict) model.load_state_dict(model_dict) </DeepExtract> return model
def shufflenet_v2_x1_0(num_classes, loss='softmax', pretrained=True, **kwargs): model = ShuffleNetV2(num_classes, loss, [4, 8, 4], [24, 116, 232, 464, 1024], **kwargs) if pretrained: if model_urls['shufflenetv2_x1.0'] is None: import warnings warnings.warn('ImageNet pretrained weights are unavailable for this model') return pretrain_dict = model_zoo.load_url(model_urls['shufflenetv2_x1.0']) model_dict = model.state_dict() pretrain_dict = {k: v for (k, v) in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size()} model_dict.update(pretrain_dict) model.load_state_dict(model_dict) return model
deep-person-reid
positive
def simple_graphs(self, *args): """Simple graphs meeting provided specifications. **Arguments** - ***args** : arbitrary positional arguments - Valid arguments to be passed to `sel`, or, if a single integer, the index of particular simple graph. **Returns** - _list_, if single integer argument is given - The list of edges corresponding to the specified simple graph - _1-d numpy.ndarray_, otherwise - An array of simple graphs (as lists of edges) matching the specifications. """ if not hasattr(self, '_simple_graphs'): if self._disc_col_inds is None: self._simple_graphs = np.asarray([efp.simple_graph for efp in self.efps], dtype='O') else: <DeepExtract> disc_comps = [[[efp.simple_graph for efp in self.efps][i] for i in col_inds] for col_inds in self._disc_col_inds] self._simple_graphs = np.asarray([efp.simple_graph for efp in self.efps] + [graph_union(*dc) for dc in disc_comps], dtype='O') </DeepExtract> if len(args) and isinstance(args[0], int): return self._simple_graphs[args[0]] return self._simple_graphs[self.sel(*args)]
def simple_graphs(self, *args): """Simple graphs meeting provided specifications. **Arguments** - ***args** : arbitrary positional arguments - Valid arguments to be passed to `sel`, or, if a single integer, the index of particular simple graph. **Returns** - _list_, if single integer argument is given - The list of edges corresponding to the specified simple graph - _1-d numpy.ndarray_, otherwise - An array of simple graphs (as lists of edges) matching the specifications. """ if not hasattr(self, '_simple_graphs'): if self._disc_col_inds is None: self._simple_graphs = np.asarray([efp.simple_graph for efp in self.efps], dtype='O') else: disc_comps = [[[efp.simple_graph for efp in self.efps][i] for i in col_inds] for col_inds in self._disc_col_inds] self._simple_graphs = np.asarray([efp.simple_graph for efp in self.efps] + [graph_union(*dc) for dc in disc_comps], dtype='O') if len(args) and isinstance(args[0], int): return self._simple_graphs[args[0]] return self._simple_graphs[self.sel(*args)]
EnergyFlow
positive
def forward(self, features, proposals, targets=None, benchmark=False, timers=None): """ Arguments: features (list[Tensor]): feature-maps from possibly several levels proposals (list[BoxList]): proposal boxes targets (list[BoxList], optional): the ground-truth targets. Returns: x (Tensor): the result of the feature extractor proposals (list[BoxList]): during training, the original proposals are returned. During testing, the predicted boxlists are returned with the `mask` field set losses (dict[Tensor]): During training, returns the losses for the head. During testing, returns an empty dict. """ if benchmark and timers is not None: torch.cuda.synchronize() timers[5].tic() if self.training: all_proposals = proposals <DeepExtract> assert isinstance(proposals, (list, tuple)) assert isinstance(proposals[0], BoxList) assert proposals[0].has_field('labels') positive_boxes = [] positive_inds = [] num_boxes = 0 for boxes_per_image in proposals: labels = boxes_per_image.get_field('labels') inds_mask = labels > 0 inds = inds_mask.nonzero().squeeze(1) positive_boxes.append(boxes_per_image[inds]) positive_inds.append(inds_mask) (proposals, positive_inds) = (positive_boxes, positive_inds) </DeepExtract> if self.training and self.cfg.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR: x = features x = x[torch.cat(positive_inds, dim=0)] else: x = self.feature_extractor(features, proposals) mask_logits = self.predictor(x) if benchmark and timers is not None: torch.cuda.synchronize() timers[5].toc() timers[6].tic() if not self.training: result = self.post_processor(mask_logits, proposals) if benchmark and timers is not None: torch.cuda.synchronize() timers[6].toc() return (x, result, {}) loss_mask = self.loss_evaluator(proposals, mask_logits, targets) return (x, all_proposals, dict(loss_mask=loss_mask))
def forward(self, features, proposals, targets=None, benchmark=False, timers=None): """ Arguments: features (list[Tensor]): feature-maps from possibly several levels proposals (list[BoxList]): proposal boxes targets (list[BoxList], optional): the ground-truth targets. Returns: x (Tensor): the result of the feature extractor proposals (list[BoxList]): during training, the original proposals are returned. During testing, the predicted boxlists are returned with the `mask` field set losses (dict[Tensor]): During training, returns the losses for the head. During testing, returns an empty dict. """ if benchmark and timers is not None: torch.cuda.synchronize() timers[5].tic() if self.training: all_proposals = proposals assert isinstance(proposals, (list, tuple)) assert isinstance(proposals[0], BoxList) assert proposals[0].has_field('labels') positive_boxes = [] positive_inds = [] num_boxes = 0 for boxes_per_image in proposals: labels = boxes_per_image.get_field('labels') inds_mask = labels > 0 inds = inds_mask.nonzero().squeeze(1) positive_boxes.append(boxes_per_image[inds]) positive_inds.append(inds_mask) (proposals, positive_inds) = (positive_boxes, positive_inds) if self.training and self.cfg.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR: x = features x = x[torch.cat(positive_inds, dim=0)] else: x = self.feature_extractor(features, proposals) mask_logits = self.predictor(x) if benchmark and timers is not None: torch.cuda.synchronize() timers[5].toc() timers[6].tic() if not self.training: result = self.post_processor(mask_logits, proposals) if benchmark and timers is not None: torch.cuda.synchronize() timers[6].toc() return (x, result, {}) loss_mask = self.loss_evaluator(proposals, mask_logits, targets) return (x, all_proposals, dict(loss_mask=loss_mask))
EmbedMask
positive
def f(root_dir, env, env_name, index, transparent_params): denv = FakeSingleSpacesVec(env, agent_id=index) pylog.info(f"Loading Stable Baselines policy for '{cls}' from '{root_dir}'") <DeepExtract> import aprl.training.scheduling mock_modules = {'modelfree': 'aprl', 'modelfree.scheduling': 'aprl.training.scheduling', 'modelfree.training.scheduling': 'aprl.training.scheduling'} for (old, new) in mock_modules.items(): sys.modules[old] = sys.modules[new] if 'env' in kwargs: denv = kwargs['env'] del kwargs['env'] model_path = os.path.join(root_dir, 'model.pkl') model = cls.load(model_path, env=denv, **kwargs) for old in mock_modules: del sys.modules[old] model = model </DeepExtract> try: <DeepExtract> try: normalize_path = os.path.join(root_dir, 'vec_normalize.pkl') vec_normalize = vec_env.VecNormalize.load(normalize_path, denv) vec_normalize.training = False pylog.info(f"Loaded normalization statistics from '{normalize_path}'") vec_normalize = vec_normalize except FileNotFoundError: pass vec_normalize = vec_env.VecNormalize(denv, training=False) vec_normalize.load_running_average(root_dir) pylog.info(f"Loaded normalization statistics from '{root_dir}'") vec_normalize = vec_normalize </DeepExtract> model = NormalizeModel(model, vec_normalize) except FileNotFoundError: pass return model
def f(root_dir, env, env_name, index, transparent_params): denv = FakeSingleSpacesVec(env, agent_id=index) pylog.info(f"Loading Stable Baselines policy for '{cls}' from '{root_dir}'") import aprl.training.scheduling mock_modules = {'modelfree': 'aprl', 'modelfree.scheduling': 'aprl.training.scheduling', 'modelfree.training.scheduling': 'aprl.training.scheduling'} for (old, new) in mock_modules.items(): sys.modules[old] = sys.modules[new] if 'env' in kwargs: denv = kwargs['env'] del kwargs['env'] model_path = os.path.join(root_dir, 'model.pkl') model = cls.load(model_path, env=denv, **kwargs) for old in mock_modules: del sys.modules[old] model = model try: try: normalize_path = os.path.join(root_dir, 'vec_normalize.pkl') vec_normalize = vec_env.VecNormalize.load(normalize_path, denv) vec_normalize.training = False pylog.info(f"Loaded normalization statistics from '{normalize_path}'") vec_normalize = vec_normalize except FileNotFoundError: pass vec_normalize = vec_env.VecNormalize(denv, training=False) vec_normalize.load_running_average(root_dir) pylog.info(f"Loaded normalization statistics from '{root_dir}'") vec_normalize = vec_normalize model = NormalizeModel(model, vec_normalize) except FileNotFoundError: pass return model
adversarial-policies
positive
def _get_doc_context_copacrr(doc: torch.Tensor, doc_mask: torch.Tensor, context_window_size: int) -> torch.Tensor: """ Parameters ---------- doc: with shape (B, R, D) doc_mask: binary tensor that differentiate real tokens from padding tokens (B, R) Returns ------- a tensor of shape (B, R, D) which indicates the context representation of each token in doc. We also reset padding tokens to zero since they have no context """ def moving_average(a: torch.Tensor, window_size: int, dimension: int): ret = torch.cumsum(a, dim=dimension) ret[:, window_size:] = ret[:, window_size:] - ret[:, :-window_size] return ret[:, window_size - 1:] / window_size left = context_window_size // 2 right = context_window_size - left - 1 y = F.pad(doc, (0, 0, left, right)) <DeepExtract> ret = torch.cumsum(y, dim=1) ret[:, context_window_size:] = ret[:, context_window_size:] - ret[:, :-context_window_size] document_context = ret[:, context_window_size - 1:] / context_window_size </DeepExtract> document_context = document_context * doc_mask.unsqueeze(-1).float() return document_context
def _get_doc_context_copacrr(doc: torch.Tensor, doc_mask: torch.Tensor, context_window_size: int) -> torch.Tensor: """ Parameters ---------- doc: with shape (B, R, D) doc_mask: binary tensor that differentiate real tokens from padding tokens (B, R) Returns ------- a tensor of shape (B, R, D) which indicates the context representation of each token in doc. We also reset padding tokens to zero since they have no context """ def moving_average(a: torch.Tensor, window_size: int, dimension: int): ret = torch.cumsum(a, dim=dimension) ret[:, window_size:] = ret[:, window_size:] - ret[:, :-window_size] return ret[:, window_size - 1:] / window_size left = context_window_size // 2 right = context_window_size - left - 1 y = F.pad(doc, (0, 0, left, right)) ret = torch.cumsum(y, dim=1) ret[:, context_window_size:] = ret[:, context_window_size:] - ret[:, :-context_window_size] document_context = ret[:, context_window_size - 1:] / context_window_size document_context = document_context * doc_mask.unsqueeze(-1).float() return document_context
EMNLP2020
positive
def __init__(self, ctxt_dir, remote_ctxt=None, local_ctxt=None, remote_ctxt_url=None): """ Data context resides in file:///meta_dir/context/<context_name>/ Objects are in file:///meta_dir/context/<context_name>/objects/<uuid>/{uuid_<type>.pb} DB is in file:///meta_dir/context/<context_name>/ctxt.db Assumes that the context has already been made via DataContext.create_branch() It may also be backed by a "remote" db. Args: ctxt_dir: Where the contexts are stored remote_ctxt: The remote context name local_ctxt: The local context name remote_ctxt_url: The URL of the db for the global context """ self.local_ctxt_dir = ctxt_dir self.remote_ctxt = remote_ctxt self.local_ctxt = local_ctxt self.remote_ctxt_url = remote_ctxt_url self.local_engine = None self.remote_engine = None self.valid = False self.len_uncommitted_history = DEFAULT_LEN_UNCOMMITTED_HISTORY <DeepExtract> if in_memory: _logger.debug('Building in-memory database from local state...') self.local_engine = create_engine('sqlite:///:memory:', echo=False) self.rebuild_db() else: db_file = os.path.join(self._get_local_context_dir(), DB_FILE) self.local_engine = create_engine('sqlite:///' + db_file, echo=False) if not os.path.isfile(db_file): _logger.debug('No disdat {} local db data file found.'.format(db_file)) _logger.debug('\t Rebuilding local database from local state...'.format(db_file)) self.rebuild_db() self.dbck() return </DeepExtract> <DeepExtract> if self.remote_ctxt_url is None: return if not self.remote_engine and False: try: self.remote_engine = boto3.resource('dynamodb', endpoint_url='http://localhost:8000') except Exception as e: _logger.debug('Failed to get dynamo AWS resource: {}'.format(e)) return </DeepExtract>
def __init__(self, ctxt_dir, remote_ctxt=None, local_ctxt=None, remote_ctxt_url=None): """ Data context resides in file:///meta_dir/context/<context_name>/ Objects are in file:///meta_dir/context/<context_name>/objects/<uuid>/{uuid_<type>.pb} DB is in file:///meta_dir/context/<context_name>/ctxt.db Assumes that the context has already been made via DataContext.create_branch() It may also be backed by a "remote" db. Args: ctxt_dir: Where the contexts are stored remote_ctxt: The remote context name local_ctxt: The local context name remote_ctxt_url: The URL of the db for the global context """ self.local_ctxt_dir = ctxt_dir self.remote_ctxt = remote_ctxt self.local_ctxt = local_ctxt self.remote_ctxt_url = remote_ctxt_url self.local_engine = None self.remote_engine = None self.valid = False self.len_uncommitted_history = DEFAULT_LEN_UNCOMMITTED_HISTORY if in_memory: _logger.debug('Building in-memory database from local state...') self.local_engine = create_engine('sqlite:///:memory:', echo=False) self.rebuild_db() else: db_file = os.path.join(self._get_local_context_dir(), DB_FILE) self.local_engine = create_engine('sqlite:///' + db_file, echo=False) if not os.path.isfile(db_file): _logger.debug('No disdat {} local db data file found.'.format(db_file)) _logger.debug('\t Rebuilding local database from local state...'.format(db_file)) self.rebuild_db() self.dbck() return if self.remote_ctxt_url is None: return if not self.remote_engine and False: try: self.remote_engine = boto3.resource('dynamodb', endpoint_url='http://localhost:8000') except Exception as e: _logger.debug('Failed to get dynamo AWS resource: {}'.format(e)) return </DeepExtract>
disdat
positive
def test_check_region_decorator_keeps_docstring_and_name(): <DeepExtract> result = time.time() </DeepExtract> time.sleep(0.1) <DeepExtract> result2 = time.time() </DeepExtract> assert result == result2 assert albert.__doc__ == 'A doc string' assert albert.__name__ == 'albert'
def test_check_region_decorator_keeps_docstring_and_name(): result = time.time() time.sleep(0.1) result2 = time.time() assert result == result2 assert albert.__doc__ == 'A doc string' assert albert.__name__ == 'albert'
beaker
positive
def spawn_event_worker(self, event: MigrationEvent): """Start process recycling nodes in a pool accordingly to some event parameters :param MigrationEvent event: Event data """ if event.pool not in self.pools_accepting_events: self.logger.warning(f'Pool {event.pool} not configured to accept migration trigger event, skipping') <DeepExtract> self.cluster_connector.mark_node_migration_resource(event.resource_name, MigrationStatus.SKIPPED, attempts) </DeepExtract> return <DeepExtract> try: if event.pool in self.migration_configs: worker_setup = WorkerSetup.from_config(self.migration_configs[event.pool]) except Exception as e: self.logger.exception(f'Bad migration configuration for pool {event.pool}: {e}') worker_setup = None </DeepExtract> if not worker_setup or event.cluster != self.options.cluster: self.logger.warning(f'Event not processable by this batch instance, skipping: {event}') <DeepExtract> self.cluster_connector.mark_node_migration_resource(event.resource_name, MigrationStatus.SKIPPED, attempts) </DeepExtract> return self.logger.info(f'Spawning migration worker for event: {event}') <DeepExtract> if event: (prefix, cluster, pool) = (self.EVENT_WORKER_LABEL_PREFIX, event.cluster, event.pool) elif pool: (prefix, cluster) = (self.UPTIME_WORKER_LABEL_PREFIX, self.options.cluster) else: raise ValueError("Either 'pool' or 'event' must be provided as parameter") worker_label = self.WORKER_LABEL_SEPARATOR.join((prefix, cluster, pool)) </DeepExtract> if self._spawn_worker(label=worker_label, routine=event_migration_worker, initial_restart_count=event.previous_attempts, migration_event=event, worker_setup=worker_setup): <DeepExtract> self.cluster_connector.mark_node_migration_resource(event.resource_name, MigrationStatus.INPROGRESS, attempts) </DeepExtract> self.events_in_progress[worker_label] = event
def spawn_event_worker(self, event: MigrationEvent): """Start process recycling nodes in a pool accordingly to some event parameters :param MigrationEvent event: Event data """ if event.pool not in self.pools_accepting_events: self.logger.warning(f'Pool {event.pool} not configured to accept migration trigger event, skipping') self.cluster_connector.mark_node_migration_resource(event.resource_name, MigrationStatus.SKIPPED, attempts) return try: if event.pool in self.migration_configs: worker_setup = WorkerSetup.from_config(self.migration_configs[event.pool]) except Exception as e: self.logger.exception(f'Bad migration configuration for pool {event.pool}: {e}') worker_setup = None if not worker_setup or event.cluster != self.options.cluster: self.logger.warning(f'Event not processable by this batch instance, skipping: {event}') self.cluster_connector.mark_node_migration_resource(event.resource_name, MigrationStatus.SKIPPED, attempts) return self.logger.info(f'Spawning migration worker for event: {event}') if event: (prefix, cluster, pool) = (self.EVENT_WORKER_LABEL_PREFIX, event.cluster, event.pool) elif pool: (prefix, cluster) = (self.UPTIME_WORKER_LABEL_PREFIX, self.options.cluster) else: raise ValueError("Either 'pool' or 'event' must be provided as parameter") worker_label = self.WORKER_LABEL_SEPARATOR.join((prefix, cluster, pool)) if self._spawn_worker(label=worker_label, routine=event_migration_worker, initial_restart_count=event.previous_attempts, migration_event=event, worker_setup=worker_setup): self.cluster_connector.mark_node_migration_resource(event.resource_name, MigrationStatus.INPROGRESS, attempts) self.events_in_progress[worker_label] = event
clusterman
positive
def connectcomponents(idf, components, fluid=None): """rename nodes so that the components get connected fluid is only needed if there are air and water nodes fluid is Air or Water or ''. if the fluid is Steam, use Water""" if fluid is None: fluid = '' if len(components) == 1: (thiscomp, thiscompnode) = components[0] <DeepExtract> def blankfield(fieldvalue): """test for blank field""" try: if fieldvalue.strip() == '': return True else: return False except AttributeError: return False def trimfields(fields, thisnode): if len(fields) > 1: if thiscompnode is not None: fields = [field for field in fields if field.startswith(thiscompnode)] return fields else: print('Where should this loop connect ?') print('%s - %s' % (thiscomp.key, thiscomp.Name)) print([field.split('Inlet_Node_Name')[0] for field in inletfields]) raise WhichLoopError else: return fields inletfields = getfieldnamesendswith(thiscomp, 'Inlet_Node_Name') inletfields = trimfields(inletfields, thiscompnode) for inletfield in inletfields: if blankfield(thiscomp[inletfield]) == True or False == True: thiscomp[inletfield] = '%s_%s' % (thiscomp.Name, inletfield) outletfields = getfieldnamesendswith(thiscomp, 'Outlet_Node_Name') outletfields = trimfields(outletfields, thiscompnode) for outletfield in outletfields: if blankfield(thiscomp[outletfield]) == True or False == True: thiscomp[outletfield] = '%s_%s' % (thiscomp.Name, outletfield) return thiscomp </DeepExtract> <DeepExtract> if thiscompnode is None: thiscompnode = '' if fluid is None: fluid = '' nodenames = getfieldnamesendswith(thiscomp, 'Outlet_Node_Name') nodenames = [name for name in nodenames if name.startswith(thiscompnode)] fnodenames = [nd for nd in nodenames if nd.find(fluid) != -1] fnodenames = [name for name in fnodenames if name.startswith(thiscompnode)] if len(fnodenames) == 0: nodename = nodenames[0] else: nodename = fnodenames[0] outletnodename = nodename </DeepExtract> thiscomp[outletnodename] = [thiscomp[outletnodename], thiscomp[outletnodename]] return components for i in range(len(components) - 1): (thiscomp, thiscompnode) = components[i] (nextcomp, nextcompnode) = components[i + 1] <DeepExtract> def blankfield(fieldvalue): """test for blank field""" try: if fieldvalue.strip() == '': return True else: return False except AttributeError: return False def trimfields(fields, thisnode): if len(fields) > 1: if thiscompnode is not None: fields = [field for field in fields if field.startswith(thiscompnode)] return fields else: print('Where should this loop connect ?') print('%s - %s' % (thiscomp.key, thiscomp.Name)) print([field.split('Inlet_Node_Name')[0] for field in inletfields]) raise WhichLoopError else: return fields inletfields = getfieldnamesendswith(thiscomp, 'Inlet_Node_Name') inletfields = trimfields(inletfields, thiscompnode) for inletfield in inletfields: if blankfield(thiscomp[inletfield]) == True or False == True: thiscomp[inletfield] = '%s_%s' % (thiscomp.Name, inletfield) outletfields = getfieldnamesendswith(thiscomp, 'Outlet_Node_Name') outletfields = trimfields(outletfields, thiscompnode) for outletfield in outletfields: if blankfield(thiscomp[outletfield]) == True or False == True: thiscomp[outletfield] = '%s_%s' % (thiscomp.Name, outletfield) return thiscomp </DeepExtract> <DeepExtract> def blankfield(fieldvalue): """test for blank field""" try: if fieldvalue.strip() == '': return True else: return False except AttributeError: return False def trimfields(fields, thisnode): if len(fields) > 1: if nextcompnode is not None: fields = [field for field in fields if field.startswith(nextcompnode)] return fields else: print('Where should this loop connect ?') print('%s - %s' % (nextcomp.key, nextcomp.Name)) print([field.split('Inlet_Node_Name')[0] for field in inletfields]) raise WhichLoopError else: return fields inletfields = getfieldnamesendswith(nextcomp, 'Inlet_Node_Name') inletfields = trimfields(inletfields, nextcompnode) for inletfield in inletfields: if blankfield(nextcomp[inletfield]) == True or False == True: nextcomp[inletfield] = '%s_%s' % (nextcomp.Name, inletfield) outletfields = getfieldnamesendswith(nextcomp, 'Outlet_Node_Name') outletfields = trimfields(outletfields, nextcompnode) for outletfield in outletfields: if blankfield(nextcomp[outletfield]) == True or False == True: nextcomp[outletfield] = '%s_%s' % (nextcomp.Name, outletfield) return nextcomp </DeepExtract> betweennodename = '%s_%s_node' % (thiscomp.Name, nextcomp.Name) <DeepExtract> if thiscompnode is None: thiscompnode = '' if fluid is None: fluid = '' nodenames = getfieldnamesendswith(thiscomp, 'Outlet_Node_Name') nodenames = [name for name in nodenames if name.startswith(thiscompnode)] fnodenames = [nd for nd in nodenames if nd.find(fluid) != -1] fnodenames = [name for name in fnodenames if name.startswith(thiscompnode)] if len(fnodenames) == 0: nodename = nodenames[0] else: nodename = fnodenames[0] outletnodename = nodename </DeepExtract> thiscomp[outletnodename] = [thiscomp[outletnodename], betweennodename] <DeepExtract> if startswith is None: startswith = '' if fluid is None: fluid = '' nodenames = getfieldnamesendswith(nextcomp, 'Inlet_Node_Name') nodenames = [name for name in nodenames if name.startswith(startswith)] fnodenames = [nd for nd in nodenames if nd.find(fluid) != -1] fnodenames = [name for name in fnodenames if name.startswith(startswith)] if len(fnodenames) == 0: nodename = nodenames[0] else: nodename = fnodenames[0] inletnodename = nodename </DeepExtract> nextcomp[inletnodename] = [nextcomp[inletnodename], betweennodename] return components
def connectcomponents(idf, components, fluid=None): """rename nodes so that the components get connected fluid is only needed if there are air and water nodes fluid is Air or Water or ''. if the fluid is Steam, use Water""" if fluid is None: fluid = '' if len(components) == 1: (thiscomp, thiscompnode) = components[0] def blankfield(fieldvalue): """test for blank field""" try: if fieldvalue.strip() == '': return True else: return False except AttributeError: return False def trimfields(fields, thisnode): if len(fields) > 1: if thiscompnode is not None: fields = [field for field in fields if field.startswith(thiscompnode)] return fields else: print('Where should this loop connect ?') print('%s - %s' % (thiscomp.key, thiscomp.Name)) print([field.split('Inlet_Node_Name')[0] for field in inletfields]) raise WhichLoopError else: return fields inletfields = getfieldnamesendswith(thiscomp, 'Inlet_Node_Name') inletfields = trimfields(inletfields, thiscompnode) for inletfield in inletfields: if blankfield(thiscomp[inletfield]) == True or False == True: thiscomp[inletfield] = '%s_%s' % (thiscomp.Name, inletfield) outletfields = getfieldnamesendswith(thiscomp, 'Outlet_Node_Name') outletfields = trimfields(outletfields, thiscompnode) for outletfield in outletfields: if blankfield(thiscomp[outletfield]) == True or False == True: thiscomp[outletfield] = '%s_%s' % (thiscomp.Name, outletfield) return thiscomp if thiscompnode is None: thiscompnode = '' if fluid is None: fluid = '' nodenames = getfieldnamesendswith(thiscomp, 'Outlet_Node_Name') nodenames = [name for name in nodenames if name.startswith(thiscompnode)] fnodenames = [nd for nd in nodenames if nd.find(fluid) != -1] fnodenames = [name for name in fnodenames if name.startswith(thiscompnode)] if len(fnodenames) == 0: nodename = nodenames[0] else: nodename = fnodenames[0] outletnodename = nodename thiscomp[outletnodename] = [thiscomp[outletnodename], thiscomp[outletnodename]] return components for i in range(len(components) - 1): (thiscomp, thiscompnode) = components[i] (nextcomp, nextcompnode) = components[i + 1] def blankfield(fieldvalue): """test for blank field""" try: if fieldvalue.strip() == '': return True else: return False except AttributeError: return False def trimfields(fields, thisnode): if len(fields) > 1: if thiscompnode is not None: fields = [field for field in fields if field.startswith(thiscompnode)] return fields else: print('Where should this loop connect ?') print('%s - %s' % (thiscomp.key, thiscomp.Name)) print([field.split('Inlet_Node_Name')[0] for field in inletfields]) raise WhichLoopError else: return fields inletfields = getfieldnamesendswith(thiscomp, 'Inlet_Node_Name') inletfields = trimfields(inletfields, thiscompnode) for inletfield in inletfields: if blankfield(thiscomp[inletfield]) == True or False == True: thiscomp[inletfield] = '%s_%s' % (thiscomp.Name, inletfield) outletfields = getfieldnamesendswith(thiscomp, 'Outlet_Node_Name') outletfields = trimfields(outletfields, thiscompnode) for outletfield in outletfields: if blankfield(thiscomp[outletfield]) == True or False == True: thiscomp[outletfield] = '%s_%s' % (thiscomp.Name, outletfield) return thiscomp def blankfield(fieldvalue): """test for blank field""" try: if fieldvalue.strip() == '': return True else: return False except AttributeError: return False def trimfields(fields, thisnode): if len(fields) > 1: if nextcompnode is not None: fields = [field for field in fields if field.startswith(nextcompnode)] return fields else: print('Where should this loop connect ?') print('%s - %s' % (nextcomp.key, nextcomp.Name)) print([field.split('Inlet_Node_Name')[0] for field in inletfields]) raise WhichLoopError else: return fields inletfields = getfieldnamesendswith(nextcomp, 'Inlet_Node_Name') inletfields = trimfields(inletfields, nextcompnode) for inletfield in inletfields: if blankfield(nextcomp[inletfield]) == True or False == True: nextcomp[inletfield] = '%s_%s' % (nextcomp.Name, inletfield) outletfields = getfieldnamesendswith(nextcomp, 'Outlet_Node_Name') outletfields = trimfields(outletfields, nextcompnode) for outletfield in outletfields: if blankfield(nextcomp[outletfield]) == True or False == True: nextcomp[outletfield] = '%s_%s' % (nextcomp.Name, outletfield) return nextcomp betweennodename = '%s_%s_node' % (thiscomp.Name, nextcomp.Name) if thiscompnode is None: thiscompnode = '' if fluid is None: fluid = '' nodenames = getfieldnamesendswith(thiscomp, 'Outlet_Node_Name') nodenames = [name for name in nodenames if name.startswith(thiscompnode)] fnodenames = [nd for nd in nodenames if nd.find(fluid) != -1] fnodenames = [name for name in fnodenames if name.startswith(thiscompnode)] if len(fnodenames) == 0: nodename = nodenames[0] else: nodename = fnodenames[0] outletnodename = nodename thiscomp[outletnodename] = [thiscomp[outletnodename], betweennodename] if startswith is None: startswith = '' if fluid is None: fluid = '' nodenames = getfieldnamesendswith(nextcomp, 'Inlet_Node_Name') nodenames = [name for name in nodenames if name.startswith(startswith)] fnodenames = [nd for nd in nodenames if nd.find(fluid) != -1] fnodenames = [name for name in fnodenames if name.startswith(startswith)] if len(fnodenames) == 0: nodename = nodenames[0] else: nodename = fnodenames[0] inletnodename = nodename nextcomp[inletnodename] = [nextcomp[inletnodename], betweennodename] return components
eppy
positive
def test_noop(self): <DeepExtract> self.client_connect() self.client.login(user='nobody', passwd='nobody') </DeepExtract> self.assertEqual(self.client.sendcmd('noop'), "200 I successfully done nothin'.")
def test_noop(self): self.client_connect() self.client.login(user='nobody', passwd='nobody') self.assertEqual(self.client.sendcmd('noop'), "200 I successfully done nothin'.")
conpot
positive
def annos_to_ground(annos, dir, ids): <DeepExtract> plane = np.stack([read_plane(os.path.join(dir, '%06d.txt' % i)) for i in ids], axis=0) </DeepExtract> for i in range(len(annos)): <DeepExtract> annos[i]['location'][:, 1] -= (-plane[i][3] - plane[i][0] * annos[i]['location'][:, 0] - plane[i][2] * annos[i]['location'][:, 2]) / plane[i][1] annos[i] = annos[i] </DeepExtract> return annos
def annos_to_ground(annos, dir, ids): plane = np.stack([read_plane(os.path.join(dir, '%06d.txt' % i)) for i in ids], axis=0) for i in range(len(annos)): annos[i]['location'][:, 1] -= (-plane[i][3] - plane[i][0] * annos[i]['location'][:, 0] - plane[i][2] * annos[i]['location'][:, 2]) / plane[i][1] annos[i] = annos[i] return annos
3D_adapt_auto_driving
positive
def compute_round(r, master=0, base=1): <DeepExtract> if master == 0: mas = self._winners elif master == 1: mas = self._losers else: mas = self._final if master < 2: rnd = mas[r] else: rnd = mas if r > 0 or master > 0: for m in rnd: m.compute_partial() (mas, rnd) = (mas, rnd) </DeepExtract> num = len(rnd) gens = [m.instances() for m in rnd] for instances in itertools.product(*gens): prob = base for inst in instances: prob *= inst[0] inst[2].broadcast_instance(inst) <DeepExtract> if master == 0: for inst in instances: self._tally[inst[1][0]].bumpers[inst[1][1]] += prob elif master == 1: for inst in instances: self._tally[inst[1][0]][r] += prob self._tally[inst[1][0]].eliminators[inst[1][1]] += prob elif master == 2: (i1, i2) = instances wb_guy = rnd[0].get_player(0) lb_guy = rnd[0].get_player(1) if i1[1][1] == wb_guy or i2[1][1] == wb_guy: winner = wb_guy loser = lb_guy self._tally[loser].eliminators[winner] += prob if i1[1][1] != wb_guy: self._tally[winner].bumpers[loser] += prob else: winner = lb_guy loser = wb_guy self._tally[loser].bumpers[winner] += prob self._tally[loser].eliminators[winner] += prob self._tally[winner][-1] += prob self._tally[loser][-2] += prob </DeepExtract> if r < len(mas) - 1 and master < 2: <DeepExtract> (mas, rnd) = self.fetch_round(r + 1, master) num = len(rnd) gens = [m.instances() for m in rnd] for instances in itertools.product(*gens): prob = prob for inst in instances: prob *= inst[0] inst[2].broadcast_instance(inst) self.compute_instances(instances, master, rnd, r + 1, prob) if r + 1 < len(mas) - 1 and master < 2: self.compute_round(r + 1 + 1, master, prob) elif r + 1 == len(mas) - 1 and master < 2: self.compute_round(0, master + 1, prob) </DeepExtract> elif r == len(mas) - 1 and master < 2: <DeepExtract> (mas, rnd) = self.fetch_round(0, master + 1) num = len(rnd) gens = [m.instances() for m in rnd] for instances in itertools.product(*gens): prob = prob for inst in instances: prob *= inst[0] inst[2].broadcast_instance(inst) self.compute_instances(instances, master + 1, rnd, 0, prob) if 0 < len(mas) - 1 and master + 1 < 2: self.compute_round(0 + 1, master + 1, prob) elif 0 == len(mas) - 1 and master + 1 < 2: self.compute_round(0, master + 1 + 1, prob) </DeepExtract>
def compute_round(r, master=0, base=1): if master == 0: mas = self._winners elif master == 1: mas = self._losers else: mas = self._final if master < 2: rnd = mas[r] else: rnd = mas if r > 0 or master > 0: for m in rnd: m.compute_partial() (mas, rnd) = (mas, rnd) num = len(rnd) gens = [m.instances() for m in rnd] for instances in itertools.product(*gens): prob = base for inst in instances: prob *= inst[0] inst[2].broadcast_instance(inst) if master == 0: for inst in instances: self._tally[inst[1][0]].bumpers[inst[1][1]] += prob elif master == 1: for inst in instances: self._tally[inst[1][0]][r] += prob self._tally[inst[1][0]].eliminators[inst[1][1]] += prob elif master == 2: (i1, i2) = instances wb_guy = rnd[0].get_player(0) lb_guy = rnd[0].get_player(1) if i1[1][1] == wb_guy or i2[1][1] == wb_guy: winner = wb_guy loser = lb_guy self._tally[loser].eliminators[winner] += prob if i1[1][1] != wb_guy: self._tally[winner].bumpers[loser] += prob else: winner = lb_guy loser = wb_guy self._tally[loser].bumpers[winner] += prob self._tally[loser].eliminators[winner] += prob self._tally[winner][-1] += prob self._tally[loser][-2] += prob if r < len(mas) - 1 and master < 2: (mas, rnd) = self.fetch_round(r + 1, master) num = len(rnd) gens = [m.instances() for m in rnd] for instances in itertools.product(*gens): prob = prob for inst in instances: prob *= inst[0] inst[2].broadcast_instance(inst) self.compute_instances(instances, master, rnd, r + 1, prob) if r + 1 < len(mas) - 1 and master < 2: self.compute_round(r + 1 + 1, master, prob) elif r + 1 == len(mas) - 1 and master < 2: self.compute_round(0, master + 1, prob) elif r == len(mas) - 1 and master < 2: (mas, rnd) = self.fetch_round(0, master + 1) num = len(rnd) gens = [m.instances() for m in rnd] for instances in itertools.product(*gens): prob = prob for inst in instances: prob *= inst[0] inst[2].broadcast_instance(inst) self.compute_instances(instances, master + 1, rnd, 0, prob) if 0 < len(mas) - 1 and master + 1 < 2: self.compute_round(0 + 1, master + 1, prob) elif 0 == len(mas) - 1 and master + 1 < 2: self.compute_round(0, master + 1 + 1, prob) </DeepExtract>
aligulac
positive
def _poll(self, timeout=None): <DeepExtract> if self.closed: raise ClosedEnvironmentError('Trying to operate on `{0}`, after a call to `close()`.'.format(type(self).__name__)) </DeepExtract> if timeout is None: return True end_time = time.time() + timeout delta = None for pipe in self.parent_pipes: delta = max(end_time - time.time(), 0) if pipe is None: return False if pipe.closed or not pipe.poll(delta): return False return True
def _poll(self, timeout=None): if self.closed: raise ClosedEnvironmentError('Trying to operate on `{0}`, after a call to `close()`.'.format(type(self).__name__)) if timeout is None: return True end_time = time.time() + timeout delta = None for pipe in self.parent_pipes: delta = max(end_time - time.time(), 0) if pipe is None: return False if pipe.closed or not pipe.poll(delta): return False return True
DQN-DDPG_Stock_Trading
positive
def as_freq(data_series, freq, atomic_freq='1 Min', series_type='cumulative', include_coverage=False): """Resample data to a different frequency. This method can be used to upsample or downsample meter data. The assumption it makes to do so is that meter data is constant and averaged over the given periods. For instance, to convert billing-period data to daily data, this method first upsamples to the atomic frequency (1 minute freqency, by default), "spreading" usage evenly across all minutes in each period. Then it downsamples to hourly frequency and returns that result. With instantaneous series, the data is copied to all contiguous time intervals and the mean over `freq` is returned. **Caveats**: - This method gives a fair amount of flexibility in resampling as long as you are OK with the assumption that usage is constant over the period (this assumption is generally broken in observed data at large enough frequencies, so this caveat should not be taken lightly). Parameters ---------- data_series : :any:`pandas.Series` Data to resample. Should have a :any:`pandas.DatetimeIndex`. freq : :any:`str` The frequency to resample to. This should be given in a form recognized by the :any:`pandas.Series.resample` method. atomic_freq : :any:`str`, optional The "atomic" frequency of the intermediate data form. This can be adjusted to a higher atomic frequency to increase speed or memory performance. series_type : :any:`str`, {'cumulative', ‘instantaneous’}, default 'cumulative' Type of data sampling. 'cumulative' data can be spread over smaller time intervals and is aggregated using addition (e.g. meter data). 'instantaneous' data is copied (not spread) over smaller time intervals and is aggregated by averaging (e.g. weather data). include_coverage: :any:`bool`, default `False` Option of whether to return a series with just the resampled values or a dataframe with a column that includes percent coverage of source data used for each sample. Returns ------- resampled_data : :any:`pandas.Series` or :any:`pandas.DataFrame` Data resampled to the given frequency (optionally as a dataframe with a coverage column if `include_coverage` is used. """ if not isinstance(data_series, pd.Series): raise ValueError('expected series, got object with class {}'.format(data_series.__class__)) if data_series.empty: return data_series <DeepExtract> series = data_series[~data_series.index.duplicated(keep='first')] </DeepExtract> target_freq = pd.Timedelta(atomic_freq) timedeltas = (series.index[1:] - series.index[:-1]).append(pd.TimedeltaIndex([pd.NaT])) if series_type == 'cumulative': spread_factor = target_freq.total_seconds() / timedeltas.total_seconds() series_spread = series * spread_factor atomic_series = series_spread.asfreq(atomic_freq, method='ffill') resampled = atomic_series.resample(freq).sum() resampled_with_nans = atomic_series.resample(freq).first() n_coverage = atomic_series.resample(freq).count() resampled = resampled[resampled_with_nans.notnull()].reindex(resampled.index) elif series_type == 'instantaneous': atomic_series = series.asfreq(atomic_freq, method='ffill') resampled = atomic_series.resample(freq).mean() if resampled.index[-1] < series.index[-1]: last_index = pd.date_range(resampled.index[-1], freq=freq, periods=2)[1:] resampled = pd.concat([resampled, pd.Series(np.nan, index=last_index)]).resample(freq).mean() if include_coverage: n_total = resampled.resample(atomic_freq).count().resample(freq).count() resampled = resampled.to_frame('value') resampled['coverage'] = n_coverage / n_total return resampled else: return resampled
def as_freq(data_series, freq, atomic_freq='1 Min', series_type='cumulative', include_coverage=False): """Resample data to a different frequency. This method can be used to upsample or downsample meter data. The assumption it makes to do so is that meter data is constant and averaged over the given periods. For instance, to convert billing-period data to daily data, this method first upsamples to the atomic frequency (1 minute freqency, by default), "spreading" usage evenly across all minutes in each period. Then it downsamples to hourly frequency and returns that result. With instantaneous series, the data is copied to all contiguous time intervals and the mean over `freq` is returned. **Caveats**: - This method gives a fair amount of flexibility in resampling as long as you are OK with the assumption that usage is constant over the period (this assumption is generally broken in observed data at large enough frequencies, so this caveat should not be taken lightly). Parameters ---------- data_series : :any:`pandas.Series` Data to resample. Should have a :any:`pandas.DatetimeIndex`. freq : :any:`str` The frequency to resample to. This should be given in a form recognized by the :any:`pandas.Series.resample` method. atomic_freq : :any:`str`, optional The "atomic" frequency of the intermediate data form. This can be adjusted to a higher atomic frequency to increase speed or memory performance. series_type : :any:`str`, {'cumulative', ‘instantaneous’}, default 'cumulative' Type of data sampling. 'cumulative' data can be spread over smaller time intervals and is aggregated using addition (e.g. meter data). 'instantaneous' data is copied (not spread) over smaller time intervals and is aggregated by averaging (e.g. weather data). include_coverage: :any:`bool`, default `False` Option of whether to return a series with just the resampled values or a dataframe with a column that includes percent coverage of source data used for each sample. Returns ------- resampled_data : :any:`pandas.Series` or :any:`pandas.DataFrame` Data resampled to the given frequency (optionally as a dataframe with a coverage column if `include_coverage` is used. """ if not isinstance(data_series, pd.Series): raise ValueError('expected series, got object with class {}'.format(data_series.__class__)) if data_series.empty: return data_series series = data_series[~data_series.index.duplicated(keep='first')] target_freq = pd.Timedelta(atomic_freq) timedeltas = (series.index[1:] - series.index[:-1]).append(pd.TimedeltaIndex([pd.NaT])) if series_type == 'cumulative': spread_factor = target_freq.total_seconds() / timedeltas.total_seconds() series_spread = series * spread_factor atomic_series = series_spread.asfreq(atomic_freq, method='ffill') resampled = atomic_series.resample(freq).sum() resampled_with_nans = atomic_series.resample(freq).first() n_coverage = atomic_series.resample(freq).count() resampled = resampled[resampled_with_nans.notnull()].reindex(resampled.index) elif series_type == 'instantaneous': atomic_series = series.asfreq(atomic_freq, method='ffill') resampled = atomic_series.resample(freq).mean() if resampled.index[-1] < series.index[-1]: last_index = pd.date_range(resampled.index[-1], freq=freq, periods=2)[1:] resampled = pd.concat([resampled, pd.Series(np.nan, index=last_index)]).resample(freq).mean() if include_coverage: n_total = resampled.resample(atomic_freq).count().resample(freq).count() resampled = resampled.to_frame('value') resampled['coverage'] = n_coverage / n_total return resampled else: return resampled
eemeter
positive
def fit(self, X, y, n_repeat=10, is_incremental=False): """Train the model from X and y. Parameters ---------- X: 2D array, optional (default=None) Feature matrix of the whole dataset. y: 2D array, optional (default=None) Label matrix of the whole dataset. n_repeat: int, optional (default=10) The number of optimization iterations. is_incremental: bool, optional (default=False) Whether to train the model in an incremental way. """ if not self._init_flag or not self._ini_parameters: <DeepExtract> if X is None: X = self._init_X if y is None: y = self._init_y X = np.asarray(X) y = np.asarray(y) if len(np.nonzero(y == 2.0)[0]) == 0: y = np.hstack((y, 2 * np.ones((y.shape[0], 1)))) tar_sh = np.shape(y) d = np.shape(X)[1] n_class = tar_sh[1] max_query = math.floor(tar_sh[0] * (tar_sh[1] - 1) / 2) D = 200 num_sub = 5 norm_up = np.inf lmbda = 0 step_size0 = 0.05 avg_begin = 10 avg_size = 5 costs = 1.0 / np.arange(start=1, stop=n_class * 5 + 1) for k in np.arange(start=1, stop=n_class * 5): costs[k] = costs[k - 1] + costs[k] V = np.random.normal(0, 1 / np.sqrt(d), (D, d)) B = np.random.normal(0, 1 / np.sqrt(d), (D, n_class * num_sub)) for k in range(d): tmp1 = V[:, k] if np.all(tmp1 > norm_up): V[:, k] = tmp1 * norm_up / np.linalg.norm(tmp1) for k in range(n_class * num_sub): tmp1 = B[:, k] if np.all(tmp1 > norm_up): B[:, k] = tmp1 * norm_up / np.linalg.norm(tmp1) AB = 0 AV = 0 Anum = 0 trounds = 0 for rr in range(n_repeat): (B, V, AB, AV, Anum, trounds) = self.train_model(X, y, B, V, costs, norm_up, step_size0, num_sub, AB, AV, Anum, trounds, lmbda, avg_begin, avg_size) self._ini_parameters = (B, V, AB, AV, Anum, trounds, costs, norm_up, step_size0, num_sub, lmbda, avg_begin, avg_size, n_repeat, max_query) </DeepExtract> (self._B, self._V, self._AB, self._AV, self._Anum, self._trounds, self._costs, self._norm_up, self._step_size0, self._num_sub, self._lmbda, self._avg_begin, self._avg_size, self._n_repeat, self._max_query) = self._ini_parameters self._init_flag = True if not is_incremental: (self._B, self._V, self._AB, self._AV, self._Anum, self._trounds, self._costs, self._norm_up, self._step_size0, self._num_sub, self._lmbda, self._avg_begin, self._avg_size, self._n_repeat, self._max_query) = self._ini_parameters for i in range(n_repeat): <DeepExtract> y = np.asarray(y) if len(np.nonzero(y == 2.0)[0]) == 0: y = np.hstack((y, 2 * np.ones((y.shape[0], 1)))) X = np.asarray(X) self._B = np.asarray(self._B) self._V = np.asarray(self._V) (n, n_class) = np.shape(y) (row_ind, col_ind) = np.nonzero(y >= 1) train_pairs = np.hstack((row_ind.reshape((-1, 1)), col_ind.reshape((-1, 1)))) n = np.shape(train_pairs)[0] random_idx = randperm(n - 1) for i in range(n): idx_ins = int(train_pairs[random_idx[i], 0]) xins = X[int(idx_ins), :].T idx_class = int(train_pairs[random_idx[i], 1]) if idx_class == n_class - 1: idx_irr = np.nonzero(y[idx_ins, :] == -1)[0] else: idx_irr = np.hstack((np.nonzero(y[idx_ins, :] == -1)[0], n_class - 1)) n_irr = len(idx_irr) By = self._B[:, idx_class * self._num_sub:(idx_class + 1) * self._num_sub] Vins = self._V.dot(xins) fy = np.max(By.T.dot(Vins), axis=0) idx_max_class = np.argmax(By.T.dot(Vins), axis=0) By = By[:, idx_max_class] fyn = np.NINF for j in range(n_irr): idx_pick = idx_irr[randperm(n_irr - 1, 1)[0]] Byn = self._B[:, idx_pick * self._num_sub:(idx_pick + 1) * self._num_sub] tmp1 = Byn.T.dot(Vins) fyn = np.max(tmp1, axis=0) idx_max_pick = np.argmax(tmp1, axis=0) if fyn > fy - 1: break if fyn > fy - 1: step_size = self._step_size0 / (1 + self._lmbda * self._trounds * self._step_size0) self._trounds = self._trounds + 1 Byn = self._B[:, idx_pick * self._num_sub + idx_max_pick] loss = self._costs[math.floor(n_irr / (j + 1)) - 1] tmp1 = By + step_size * loss * Vins tmp3 = np.linalg.norm(tmp1) if tmp3 > self._norm_up: tmp1 = tmp1 * self._norm_up / tmp3 tmp2 = Byn - step_size * loss * Vins tmp3 = np.linalg.norm(tmp2) if tmp3 > self._norm_up: tmp2 = tmp2 * self._norm_up / tmp3 self._V -= step_size * loss * self._B[:, [idx_pick * self._num_sub + idx_max_pick, idx_class * self._num_sub + idx_max_class]].dot(np.vstack((xins, -xins))) norms = np.linalg.norm(self._V, axis=0) idx_down = np.nonzero(norms > self._norm_up)[0] self._B[:, idx_class * self._num_sub + idx_max_class] = tmp1 self._B[:, idx_pick * self._num_sub + idx_max_pick] = tmp2 if idx_down.size > 0: norms = norms[norms > self._norm_up] for k in range(len(idx_down)): self._V[:, idx_down[k]] = self._V[:, idx_down[k]] * self._norm_up / norms[k] if i == 0 or (self._trounds > self._avg_begin and i % self._avg_size == 0): self._AB = self._AB + self._B self._AV = self._AV + self._V self._Anum = self._Anum + 1 (self._B, self._V, self._AB, self._AV, self._Anum, self._trounds) = (self._B, self._V, self._AB, self._AV, self._Anum, self._trounds) </DeepExtract>
def fit(self, X, y, n_repeat=10, is_incremental=False): """Train the model from X and y. Parameters ---------- X: 2D array, optional (default=None) Feature matrix of the whole dataset. y: 2D array, optional (default=None) Label matrix of the whole dataset. n_repeat: int, optional (default=10) The number of optimization iterations. is_incremental: bool, optional (default=False) Whether to train the model in an incremental way. """ if not self._init_flag or not self._ini_parameters: if X is None: X = self._init_X if y is None: y = self._init_y X = np.asarray(X) y = np.asarray(y) if len(np.nonzero(y == 2.0)[0]) == 0: y = np.hstack((y, 2 * np.ones((y.shape[0], 1)))) tar_sh = np.shape(y) d = np.shape(X)[1] n_class = tar_sh[1] max_query = math.floor(tar_sh[0] * (tar_sh[1] - 1) / 2) D = 200 num_sub = 5 norm_up = np.inf lmbda = 0 step_size0 = 0.05 avg_begin = 10 avg_size = 5 costs = 1.0 / np.arange(start=1, stop=n_class * 5 + 1) for k in np.arange(start=1, stop=n_class * 5): costs[k] = costs[k - 1] + costs[k] V = np.random.normal(0, 1 / np.sqrt(d), (D, d)) B = np.random.normal(0, 1 / np.sqrt(d), (D, n_class * num_sub)) for k in range(d): tmp1 = V[:, k] if np.all(tmp1 > norm_up): V[:, k] = tmp1 * norm_up / np.linalg.norm(tmp1) for k in range(n_class * num_sub): tmp1 = B[:, k] if np.all(tmp1 > norm_up): B[:, k] = tmp1 * norm_up / np.linalg.norm(tmp1) AB = 0 AV = 0 Anum = 0 trounds = 0 for rr in range(n_repeat): (B, V, AB, AV, Anum, trounds) = self.train_model(X, y, B, V, costs, norm_up, step_size0, num_sub, AB, AV, Anum, trounds, lmbda, avg_begin, avg_size) self._ini_parameters = (B, V, AB, AV, Anum, trounds, costs, norm_up, step_size0, num_sub, lmbda, avg_begin, avg_size, n_repeat, max_query) (self._B, self._V, self._AB, self._AV, self._Anum, self._trounds, self._costs, self._norm_up, self._step_size0, self._num_sub, self._lmbda, self._avg_begin, self._avg_size, self._n_repeat, self._max_query) = self._ini_parameters self._init_flag = True if not is_incremental: (self._B, self._V, self._AB, self._AV, self._Anum, self._trounds, self._costs, self._norm_up, self._step_size0, self._num_sub, self._lmbda, self._avg_begin, self._avg_size, self._n_repeat, self._max_query) = self._ini_parameters for i in range(n_repeat): y = np.asarray(y) if len(np.nonzero(y == 2.0)[0]) == 0: y = np.hstack((y, 2 * np.ones((y.shape[0], 1)))) X = np.asarray(X) self._B = np.asarray(self._B) self._V = np.asarray(self._V) (n, n_class) = np.shape(y) (row_ind, col_ind) = np.nonzero(y >= 1) train_pairs = np.hstack((row_ind.reshape((-1, 1)), col_ind.reshape((-1, 1)))) n = np.shape(train_pairs)[0] random_idx = randperm(n - 1) for i in range(n): idx_ins = int(train_pairs[random_idx[i], 0]) xins = X[int(idx_ins), :].T idx_class = int(train_pairs[random_idx[i], 1]) if idx_class == n_class - 1: idx_irr = np.nonzero(y[idx_ins, :] == -1)[0] else: idx_irr = np.hstack((np.nonzero(y[idx_ins, :] == -1)[0], n_class - 1)) n_irr = len(idx_irr) By = self._B[:, idx_class * self._num_sub:(idx_class + 1) * self._num_sub] Vins = self._V.dot(xins) fy = np.max(By.T.dot(Vins), axis=0) idx_max_class = np.argmax(By.T.dot(Vins), axis=0) By = By[:, idx_max_class] fyn = np.NINF for j in range(n_irr): idx_pick = idx_irr[randperm(n_irr - 1, 1)[0]] Byn = self._B[:, idx_pick * self._num_sub:(idx_pick + 1) * self._num_sub] tmp1 = Byn.T.dot(Vins) fyn = np.max(tmp1, axis=0) idx_max_pick = np.argmax(tmp1, axis=0) if fyn > fy - 1: break if fyn > fy - 1: step_size = self._step_size0 / (1 + self._lmbda * self._trounds * self._step_size0) self._trounds = self._trounds + 1 Byn = self._B[:, idx_pick * self._num_sub + idx_max_pick] loss = self._costs[math.floor(n_irr / (j + 1)) - 1] tmp1 = By + step_size * loss * Vins tmp3 = np.linalg.norm(tmp1) if tmp3 > self._norm_up: tmp1 = tmp1 * self._norm_up / tmp3 tmp2 = Byn - step_size * loss * Vins tmp3 = np.linalg.norm(tmp2) if tmp3 > self._norm_up: tmp2 = tmp2 * self._norm_up / tmp3 self._V -= step_size * loss * self._B[:, [idx_pick * self._num_sub + idx_max_pick, idx_class * self._num_sub + idx_max_class]].dot(np.vstack((xins, -xins))) norms = np.linalg.norm(self._V, axis=0) idx_down = np.nonzero(norms > self._norm_up)[0] self._B[:, idx_class * self._num_sub + idx_max_class] = tmp1 self._B[:, idx_pick * self._num_sub + idx_max_pick] = tmp2 if idx_down.size > 0: norms = norms[norms > self._norm_up] for k in range(len(idx_down)): self._V[:, idx_down[k]] = self._V[:, idx_down[k]] * self._norm_up / norms[k] if i == 0 or (self._trounds > self._avg_begin and i % self._avg_size == 0): self._AB = self._AB + self._B self._AV = self._AV + self._V self._Anum = self._Anum + 1 (self._B, self._V, self._AB, self._AV, self._Anum, self._trounds) = (self._B, self._V, self._AB, self._AV, self._Anum, self._trounds) </DeepExtract>
ALiPy
positive
def update_(self): <DeepExtract> for link in self.links: socket_from_type = link.from_socket.node.nodedef.getOutput(link.from_socket.name).getType() socket_to_type = link.to_socket.node.nodedef.getInput(link.to_socket.name).getType() if socket_to_type != socket_from_type: link.is_valid = False continue link.is_valid = True </DeepExtract> for material in bpy.data.materials: if material.hdusd.mx_node_tree and material.hdusd.mx_node_tree.name == self.name: material.hdusd.update() for window in bpy.context.window_manager.windows: for area in window.screen.areas: if area.type == AREA_TO_UPDATE: for region in area.regions: if region.type == REGION_TO_UPDATE: region.tag_redraw()
def update_(self): for link in self.links: socket_from_type = link.from_socket.node.nodedef.getOutput(link.from_socket.name).getType() socket_to_type = link.to_socket.node.nodedef.getInput(link.to_socket.name).getType() if socket_to_type != socket_from_type: link.is_valid = False continue link.is_valid = True for material in bpy.data.materials: if material.hdusd.mx_node_tree and material.hdusd.mx_node_tree.name == self.name: material.hdusd.update() for window in bpy.context.window_manager.windows: for area in window.screen.areas: if area.type == AREA_TO_UPDATE: for region in area.regions: if region.type == REGION_TO_UPDATE: region.tag_redraw()
BlenderUSDHydraAddon
positive
def process_event(self, event, **kwargs): """ Process a received event. """ res = None try: message = event['data'] message['_event_tag'] = event['tag'] <DeepExtract> (func, settings) = self._get_hook_for(message, 'worker', parse_settings=True) result = func(message, **settings) res = result </DeepExtract> except Exception as ex: log.exception('Exception while processing event: {:}'.format(event)) res = {'error': str(ex)} finally: if res != None: <DeepExtract> match = self._tag_regex.match(message['_event_tag']) groups = match.groupdict() tag = '{:s}/res/{:s}'.format(self._namespace, groups['id']) if DEBUG: log.debug("Sending reply mesage with tag '{:s}': {:}".format(tag, res)) with self._bus_lock: self._outgoing_bus.fire_event(res, tag) </DeepExtract> else: log.warn('No reply to send back for event: {:}'.format(event))
def process_event(self, event, **kwargs): """ Process a received event. """ res = None try: message = event['data'] message['_event_tag'] = event['tag'] (func, settings) = self._get_hook_for(message, 'worker', parse_settings=True) result = func(message, **settings) res = result except Exception as ex: log.exception('Exception while processing event: {:}'.format(event)) res = {'error': str(ex)} finally: if res != None: match = self._tag_regex.match(message['_event_tag']) groups = match.groupdict() tag = '{:s}/res/{:s}'.format(self._namespace, groups['id']) if DEBUG: log.debug("Sending reply mesage with tag '{:s}': {:}".format(tag, res)) with self._bus_lock: self._outgoing_bus.fire_event(res, tag) else: log.warn('No reply to send back for event: {:}'.format(event))
autopi-core
positive
def test_message_call_method_setter(client): data = {'method_count': 0} <DeepExtract> if data is None: data = {} response = post_and_get_response(client, url=f'/message/tests.views.fake_components.{component_name}', data=data, action_queue=[{'payload': {'name': 'method_count=2'}, 'type': 'callMethod'}]) body = response </DeepExtract> assert body['data'].get('method_count') == 2
def test_message_call_method_setter(client): data = {'method_count': 0} if data is None: data = {} response = post_and_get_response(client, url=f'/message/tests.views.fake_components.{component_name}', data=data, action_queue=[{'payload': {'name': 'method_count=2'}, 'type': 'callMethod'}]) body = response assert body['data'].get('method_count') == 2
django-unicorn
positive
def writeSuites(output): """Write all TestDescriptions and SuiteDescriptions""" for suite in suites: <DeepExtract> global lastIncluded if options.outputFileName: dirname = os.path.split(options.outputFileName)[0] tfile = relpath(suite['file'], dirname) if os.path.exists(tfile): if tfile == lastIncluded: return output.writelines(['#include "', tfile, '"\n\n']) lastIncluded = tfile return tfile = os.path.abspath(suite['file']) if os.path.exists(tfile): if tfile == lastIncluded: return output.writelines(['#include "', tfile, '"\n\n']) lastIncluded = tfile return </DeepExtract> if isGenerated(suite): <DeepExtract> output.write('class %s : public CxxTest::TestSuite {\n' % suite['fullname']) output.write('public:\n') for line in suite['lines']: output.write(line) output.write('};\n\n') </DeepExtract> if not options.noStaticInit: if isDynamic(suite): <DeepExtract> if options.noStaticInit: output.write('static %s* %s;\n\n' % (suite['fullname'], suite['object'])) else: output.write('static %s* %s = 0;\n\n' % (suite['fullname'], suite['object'])) </DeepExtract> else: <DeepExtract> output.writelines(['static ', suite['fullname'], ' ', suite['object'], ';\n\n']) </DeepExtract> <DeepExtract> if options.noStaticInit: output.write('static CxxTest::List %s;\n' % suite['tlist']) else: output.write('static CxxTest::List %s = { 0, 0 };\n' % suite['tlist']) </DeepExtract> <DeepExtract> if isDynamic(suite): writeDynamicDescription(output, suite) else: writeStaticDescription(output, suite) </DeepExtract> <DeepExtract> for test in suite['tests']: writeTestDescription(output, suite, test) </DeepExtract>
def writeSuites(output): """Write all TestDescriptions and SuiteDescriptions""" for suite in suites: global lastIncluded if options.outputFileName: dirname = os.path.split(options.outputFileName)[0] tfile = relpath(suite['file'], dirname) if os.path.exists(tfile): if tfile == lastIncluded: return output.writelines(['#include "', tfile, '"\n\n']) lastIncluded = tfile return tfile = os.path.abspath(suite['file']) if os.path.exists(tfile): if tfile == lastIncluded: return output.writelines(['#include "', tfile, '"\n\n']) lastIncluded = tfile return if isGenerated(suite): output.write('class %s : public CxxTest::TestSuite {\n' % suite['fullname']) output.write('public:\n') for line in suite['lines']: output.write(line) output.write('};\n\n') if not options.noStaticInit: if isDynamic(suite): if options.noStaticInit: output.write('static %s* %s;\n\n' % (suite['fullname'], suite['object'])) else: output.write('static %s* %s = 0;\n\n' % (suite['fullname'], suite['object'])) else: output.writelines(['static ', suite['fullname'], ' ', suite['object'], ';\n\n']) if options.noStaticInit: output.write('static CxxTest::List %s;\n' % suite['tlist']) else: output.write('static CxxTest::List %s = { 0, 0 };\n' % suite['tlist']) if isDynamic(suite): writeDynamicDescription(output, suite) else: writeStaticDescription(output, suite) for test in suite['tests']: writeTestDescription(output, suite, test) </DeepExtract>
cxxtest
positive
def _impose_degradation(self, pe_pristine=pd.DataFrame(), ne_1_pristine=pd.DataFrame(), ne_2_pristine_pos=pd.DataFrame(), ne_2_pristine_neg=pd.DataFrame(), lli=0.0, lam_pe=0.0, lam_ne=0.0, x_ne_2=0.0): pe_translation = 0 pe_shrinkage = 0 ne_translation = 0 ne_shrinkage = 0 <DeepExtract> if ne_2_pristine_pos.empty: df_blended = ne_1_pristine ne_pristine = df_blended if ne_2_pristine_neg.empty: electrode_2 = ne_2_pristine_pos x_ne_2 = np.abs(x_ne_2) elif x_ne_2 > 0: electrode_2 = ne_2_pristine_pos else: electrode_2 = ne_2_pristine_neg x_ne_2 = np.abs(x_ne_2) electrode_1_interper = interp1d(ne_1_pristine['Voltage_aligned'], ne_1_pristine['SOC_aligned'], bounds_error=False, fill_value='extrapolate') electrode_2_interper = interp1d(electrode_2['Voltage_aligned'], electrode_2['SOC_aligned'], bounds_error=False, fill_value='extrapolate') voltage_vec = np.linspace(np.min((np.min(ne_1_pristine['Voltage_aligned']), np.min(electrode_2['Voltage_aligned']))), np.max((np.max(ne_1_pristine['Voltage_aligned']), np.max(electrode_2['Voltage_aligned']))), 1001) electrode_1_voltage_aligned = pd.DataFrame(electrode_1_interper(voltage_vec), columns=['SOC']) electrode_2_voltage_aligned = pd.DataFrame(electrode_2_interper(voltage_vec), columns=['SOC']) electrode_1_voltage_aligned['Voltage'] = voltage_vec electrode_2_voltage_aligned['Voltage'] = voltage_vec df_blend_voltage_aligned = pd.DataFrame((1 - x_ne_2) * electrode_1_voltage_aligned['SOC'] + x_ne_2 * electrode_2_voltage_aligned['SOC'], columns=['SOC']) df_blend_voltage_aligned['Voltage'] = electrode_1_voltage_aligned.merge(electrode_2_voltage_aligned, on='Voltage')['Voltage'] df_blended_interper = interp1d(df_blend_voltage_aligned['SOC'], df_blend_voltage_aligned['Voltage'], bounds_error=False) soc_vec = np.linspace(0, 100, 1001) df_blended = pd.DataFrame(df_blended_interper(soc_vec), columns=['Voltage_aligned']) df_blended['SOC_aligned'] = soc_vec df_blended_soc_mod_interper = interp1d(df_blended['SOC_aligned'].loc[~df_blended['Voltage_aligned'].isna()], df_blended['Voltage_aligned'].loc[~df_blended['Voltage_aligned'].isna()], bounds_error=False) soc_vec = np.linspace(np.min(df_blended['SOC_aligned'].loc[~df_blended['Voltage_aligned'].isna()]), np.max(df_blended['SOC_aligned'].loc[~df_blended['Voltage_aligned'].isna()]), 1001) df_blended_soc_mod = pd.DataFrame(df_blended_soc_mod_interper(soc_vec), columns=['Voltage_aligned']) df_blended_soc_mod['SOC_aligned'] = soc_vec / np.max(soc_vec) * 100 ne_pristine = df_blended_soc_mod </DeepExtract> ne_translation += lli upper_voltage_limit = self.UPPER_VOLTAGE pe_soc_setpoint = pe_pristine['SOC_aligned'].loc[np.argmin(np.abs(pe_pristine['Voltage_aligned'] - ne_pristine['Voltage_aligned'] - upper_voltage_limit))] pe_translation += lam_pe * pe_soc_setpoint / 100 pe_shrinkage += lam_pe lower_voltage_limit = self.LOWER_VOLTAGE ne_soc_setpoint = ne_pristine['SOC_aligned'].loc[np.argmin(pe_pristine['Voltage_aligned'] - ne_pristine['Voltage_aligned'] - lower_voltage_limit)] ne_translation += lam_ne * ne_soc_setpoint / 100 ne_shrinkage += lam_ne pe_pristine_shifted_by_deg = pe_pristine.copy() pe_pristine_shifted_by_deg['SOC_aligned'] = pe_pristine_shifted_by_deg['SOC_aligned'] * (1 - pe_shrinkage / 100) + pe_translation ne_pristine_shifted_by_deg = ne_pristine.copy() ne_pristine_shifted_by_deg['SOC_aligned'] = ne_pristine_shifted_by_deg['SOC_aligned'] * (1 - ne_shrinkage / 100) + ne_translation lower_soc = np.min((np.min(pe_pristine_shifted_by_deg['SOC_aligned']), np.min(ne_pristine_shifted_by_deg['SOC_aligned']))) upper_soc = np.max((np.max(pe_pristine_shifted_by_deg['SOC_aligned']), np.max(ne_pristine_shifted_by_deg['SOC_aligned']))) soc_vec = np.linspace(lower_soc, upper_soc, 1001) pe_pristine_interper = interp1d(pe_pristine_shifted_by_deg['SOC_aligned'], pe_pristine_shifted_by_deg['Voltage_aligned'], bounds_error=False) pe_degraded = pe_pristine_shifted_by_deg.copy() pe_degraded['SOC_aligned'] = soc_vec pe_degraded['Voltage_aligned'] = pe_pristine_interper(soc_vec) ne_pristine_interper = interp1d(ne_pristine_shifted_by_deg['SOC_aligned'], ne_pristine_shifted_by_deg['Voltage_aligned'], bounds_error=False) ne_degraded = ne_pristine_shifted_by_deg.copy() ne_degraded['SOC_aligned'] = soc_vec ne_degraded['Voltage_aligned'] = ne_pristine_interper(soc_vec) return (pe_degraded, ne_degraded)
def _impose_degradation(self, pe_pristine=pd.DataFrame(), ne_1_pristine=pd.DataFrame(), ne_2_pristine_pos=pd.DataFrame(), ne_2_pristine_neg=pd.DataFrame(), lli=0.0, lam_pe=0.0, lam_ne=0.0, x_ne_2=0.0): pe_translation = 0 pe_shrinkage = 0 ne_translation = 0 ne_shrinkage = 0 if ne_2_pristine_pos.empty: df_blended = ne_1_pristine ne_pristine = df_blended if ne_2_pristine_neg.empty: electrode_2 = ne_2_pristine_pos x_ne_2 = np.abs(x_ne_2) elif x_ne_2 > 0: electrode_2 = ne_2_pristine_pos else: electrode_2 = ne_2_pristine_neg x_ne_2 = np.abs(x_ne_2) electrode_1_interper = interp1d(ne_1_pristine['Voltage_aligned'], ne_1_pristine['SOC_aligned'], bounds_error=False, fill_value='extrapolate') electrode_2_interper = interp1d(electrode_2['Voltage_aligned'], electrode_2['SOC_aligned'], bounds_error=False, fill_value='extrapolate') voltage_vec = np.linspace(np.min((np.min(ne_1_pristine['Voltage_aligned']), np.min(electrode_2['Voltage_aligned']))), np.max((np.max(ne_1_pristine['Voltage_aligned']), np.max(electrode_2['Voltage_aligned']))), 1001) electrode_1_voltage_aligned = pd.DataFrame(electrode_1_interper(voltage_vec), columns=['SOC']) electrode_2_voltage_aligned = pd.DataFrame(electrode_2_interper(voltage_vec), columns=['SOC']) electrode_1_voltage_aligned['Voltage'] = voltage_vec electrode_2_voltage_aligned['Voltage'] = voltage_vec df_blend_voltage_aligned = pd.DataFrame((1 - x_ne_2) * electrode_1_voltage_aligned['SOC'] + x_ne_2 * electrode_2_voltage_aligned['SOC'], columns=['SOC']) df_blend_voltage_aligned['Voltage'] = electrode_1_voltage_aligned.merge(electrode_2_voltage_aligned, on='Voltage')['Voltage'] df_blended_interper = interp1d(df_blend_voltage_aligned['SOC'], df_blend_voltage_aligned['Voltage'], bounds_error=False) soc_vec = np.linspace(0, 100, 1001) df_blended = pd.DataFrame(df_blended_interper(soc_vec), columns=['Voltage_aligned']) df_blended['SOC_aligned'] = soc_vec df_blended_soc_mod_interper = interp1d(df_blended['SOC_aligned'].loc[~df_blended['Voltage_aligned'].isna()], df_blended['Voltage_aligned'].loc[~df_blended['Voltage_aligned'].isna()], bounds_error=False) soc_vec = np.linspace(np.min(df_blended['SOC_aligned'].loc[~df_blended['Voltage_aligned'].isna()]), np.max(df_blended['SOC_aligned'].loc[~df_blended['Voltage_aligned'].isna()]), 1001) df_blended_soc_mod = pd.DataFrame(df_blended_soc_mod_interper(soc_vec), columns=['Voltage_aligned']) df_blended_soc_mod['SOC_aligned'] = soc_vec / np.max(soc_vec) * 100 ne_pristine = df_blended_soc_mod ne_translation += lli upper_voltage_limit = self.UPPER_VOLTAGE pe_soc_setpoint = pe_pristine['SOC_aligned'].loc[np.argmin(np.abs(pe_pristine['Voltage_aligned'] - ne_pristine['Voltage_aligned'] - upper_voltage_limit))] pe_translation += lam_pe * pe_soc_setpoint / 100 pe_shrinkage += lam_pe lower_voltage_limit = self.LOWER_VOLTAGE ne_soc_setpoint = ne_pristine['SOC_aligned'].loc[np.argmin(pe_pristine['Voltage_aligned'] - ne_pristine['Voltage_aligned'] - lower_voltage_limit)] ne_translation += lam_ne * ne_soc_setpoint / 100 ne_shrinkage += lam_ne pe_pristine_shifted_by_deg = pe_pristine.copy() pe_pristine_shifted_by_deg['SOC_aligned'] = pe_pristine_shifted_by_deg['SOC_aligned'] * (1 - pe_shrinkage / 100) + pe_translation ne_pristine_shifted_by_deg = ne_pristine.copy() ne_pristine_shifted_by_deg['SOC_aligned'] = ne_pristine_shifted_by_deg['SOC_aligned'] * (1 - ne_shrinkage / 100) + ne_translation lower_soc = np.min((np.min(pe_pristine_shifted_by_deg['SOC_aligned']), np.min(ne_pristine_shifted_by_deg['SOC_aligned']))) upper_soc = np.max((np.max(pe_pristine_shifted_by_deg['SOC_aligned']), np.max(ne_pristine_shifted_by_deg['SOC_aligned']))) soc_vec = np.linspace(lower_soc, upper_soc, 1001) pe_pristine_interper = interp1d(pe_pristine_shifted_by_deg['SOC_aligned'], pe_pristine_shifted_by_deg['Voltage_aligned'], bounds_error=False) pe_degraded = pe_pristine_shifted_by_deg.copy() pe_degraded['SOC_aligned'] = soc_vec pe_degraded['Voltage_aligned'] = pe_pristine_interper(soc_vec) ne_pristine_interper = interp1d(ne_pristine_shifted_by_deg['SOC_aligned'], ne_pristine_shifted_by_deg['Voltage_aligned'], bounds_error=False) ne_degraded = ne_pristine_shifted_by_deg.copy() ne_degraded['SOC_aligned'] = soc_vec ne_degraded['Voltage_aligned'] = ne_pristine_interper(soc_vec) return (pe_degraded, ne_degraded)
beep
positive
def test_delete_assignment_with_no_submissions(self): try: Assignment.objects.get(assignment_id=1).delete() except Assignment.DoesNotExist: pass kwargs = {'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'} <DeepExtract> client = Client() client.login(username=TEST_USER_USERNAME, password=TEST_USER_PASSWORD) client = client </DeepExtract> response = client.post('/teacher/course/1/delete_assignment', {'assignment_id': 1}, **kwargs) json_string = response.content.decode(encoding='UTF-8') array = json.loads(json_string) self.assertEqual(response.status_code, 200) self.assertEqual(array['status'], 'failed') self.assertEqual(array['message'], 'record not found')
def test_delete_assignment_with_no_submissions(self): try: Assignment.objects.get(assignment_id=1).delete() except Assignment.DoesNotExist: pass kwargs = {'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'} client = Client() client.login(username=TEST_USER_USERNAME, password=TEST_USER_PASSWORD) client = client response = client.post('/teacher/course/1/delete_assignment', {'assignment_id': 1}, **kwargs) json_string = response.content.decode(encoding='UTF-8') array = json.loads(json_string) self.assertEqual(response.status_code, 200) self.assertEqual(array['status'], 'failed') self.assertEqual(array['message'], 'record not found')
academicstoday-django
positive
def setup_working_directory(self, cluster): if self.clusters_directory: workdir = os.path.join(self.clusters_directory, cluster.name) else: workdir = tempfile.mkdtemp(prefix='dask', suffix=cluster.name) <DeepExtract> certsdir = os.path.join(workdir, '.certs') </DeepExtract> <DeepExtract> logsdir = os.path.join(workdir, 'logs') </DeepExtract> paths = [workdir, certsdir, logsdir] for path in paths: os.makedirs(path, 448, exist_ok=True) <DeepExtract> certsdir = self.get_certs_directory(workdir) cert_path = os.path.join(certsdir, 'dask.crt') key_path = os.path.join(certsdir, 'dask.pem') (cert_path, key_path) = (cert_path, key_path) </DeepExtract> flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL for (path, data) in [(cert_path, cluster.tls_cert), (key_path, cluster.tls_key)]: with os.fdopen(os.open(path, flags, 384), 'wb') as fil: fil.write(data) paths.extend(path) <DeepExtract> pwnam = getpwnam(cluster.username) for p in paths: os.chown(p, pwnam.pw_uid, pwnam.pw_gid) </DeepExtract> self.log.debug('Working directory %s for cluster %s created', workdir, cluster.name) return workdir
def setup_working_directory(self, cluster): if self.clusters_directory: workdir = os.path.join(self.clusters_directory, cluster.name) else: workdir = tempfile.mkdtemp(prefix='dask', suffix=cluster.name) certsdir = os.path.join(workdir, '.certs') logsdir = os.path.join(workdir, 'logs') paths = [workdir, certsdir, logsdir] for path in paths: os.makedirs(path, 448, exist_ok=True) certsdir = self.get_certs_directory(workdir) cert_path = os.path.join(certsdir, 'dask.crt') key_path = os.path.join(certsdir, 'dask.pem') (cert_path, key_path) = (cert_path, key_path) flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL for (path, data) in [(cert_path, cluster.tls_cert), (key_path, cluster.tls_key)]: with os.fdopen(os.open(path, flags, 384), 'wb') as fil: fil.write(data) paths.extend(path) pwnam = getpwnam(cluster.username) for p in paths: os.chown(p, pwnam.pw_uid, pwnam.pw_gid) self.log.debug('Working directory %s for cluster %s created', workdir, cluster.name) return workdir
dask-gateway
positive
def get(self, autoscaler_id): <DeepExtract> if not self.has_permissions('clusters.view_cluster', self.cluster): self.raise_no_permissions('clusters.view_cluster') </DeepExtract> obj = models.CMAutoScaler.objects.get(id=autoscaler_id) return self.to_api_object(obj)
def get(self, autoscaler_id): if not self.has_permissions('clusters.view_cluster', self.cluster): self.raise_no_permissions('clusters.view_cluster') obj = models.CMAutoScaler.objects.get(id=autoscaler_id) return self.to_api_object(obj)
cloudman
positive
def maybe_empty_lines(self, current_line: Line) -> LinesBlock: """Return the number of extra empty lines before and after the `current_line`. This is for separating `def`, `async def` and `class` with extra empty lines (two on module-level). """ <DeepExtract> max_allowed = 1 if current_line.depth == 0: max_allowed = 1 if self.mode.is_pyi else 2 if current_line.leaves: first_leaf = current_line.leaves[0] before = first_leaf.prefix.count('\n') before = min(before, max_allowed) first_leaf.prefix = '' else: before = 0 depth = current_line.depth while self.previous_defs and self.previous_defs[-1].depth >= depth: if self.mode.is_pyi: assert self.previous_line is not None if depth and (not current_line.is_def) and self.previous_line.is_def: before = min(1, before) elif Preview.blank_line_after_nested_stub_class in self.mode and self.previous_defs[-1].is_class and (not self.previous_defs[-1].is_stub_class): before = 1 elif depth: before = 0 else: before = 1 elif depth: before = 1 elif not depth and self.previous_defs[-1].depth and (current_line.leaves[-1].type == token.COLON) and (current_line.leaves[0].value not in ('with', 'try', 'for', 'while', 'if', 'match')): before = 1 else: before = 2 self.previous_defs.pop() if current_line.is_decorator or current_line.is_def or current_line.is_class: (before, after) = self._maybe_empty_lines_for_class_or_def(current_line, before) if self.previous_line and self.previous_line.is_import and (not current_line.is_import) and (not current_line.is_fmt_pass_converted(first_leaf_matches=is_import)) and (depth == self.previous_line.depth): (before, after) = (before or 1, 0) if self.previous_line and self.previous_line.is_class and current_line.is_triple_quoted_string: (before, after) = (before, 1) if self.previous_line and self.previous_line.opens_block: (before, after) = (0, 0) (before, after) = (before, 0) </DeepExtract> previous_after = self.previous_block.after if self.previous_block else 0 before = 0 if self.previous_line is None else before - previous_after block = LinesBlock(mode=self.mode, previous_block=self.previous_block, original_line=current_line, before=before, after=after) if current_line.is_comment: if self.previous_line is None or (not self.previous_line.is_decorator and (not self.previous_line.is_comment or before) and (self.semantic_leading_comment is None or before)): self.semantic_leading_comment = block elif not current_line.is_decorator or before: self.semantic_leading_comment = None self.previous_line = current_line self.previous_block = block return block
def maybe_empty_lines(self, current_line: Line) -> LinesBlock: """Return the number of extra empty lines before and after the `current_line`. This is for separating `def`, `async def` and `class` with extra empty lines (two on module-level). """ max_allowed = 1 if current_line.depth == 0: max_allowed = 1 if self.mode.is_pyi else 2 if current_line.leaves: first_leaf = current_line.leaves[0] before = first_leaf.prefix.count('\n') before = min(before, max_allowed) first_leaf.prefix = '' else: before = 0 depth = current_line.depth while self.previous_defs and self.previous_defs[-1].depth >= depth: if self.mode.is_pyi: assert self.previous_line is not None if depth and (not current_line.is_def) and self.previous_line.is_def: before = min(1, before) elif Preview.blank_line_after_nested_stub_class in self.mode and self.previous_defs[-1].is_class and (not self.previous_defs[-1].is_stub_class): before = 1 elif depth: before = 0 else: before = 1 elif depth: before = 1 elif not depth and self.previous_defs[-1].depth and (current_line.leaves[-1].type == token.COLON) and (current_line.leaves[0].value not in ('with', 'try', 'for', 'while', 'if', 'match')): before = 1 else: before = 2 self.previous_defs.pop() if current_line.is_decorator or current_line.is_def or current_line.is_class: (before, after) = self._maybe_empty_lines_for_class_or_def(current_line, before) if self.previous_line and self.previous_line.is_import and (not current_line.is_import) and (not current_line.is_fmt_pass_converted(first_leaf_matches=is_import)) and (depth == self.previous_line.depth): (before, after) = (before or 1, 0) if self.previous_line and self.previous_line.is_class and current_line.is_triple_quoted_string: (before, after) = (before, 1) if self.previous_line and self.previous_line.opens_block: (before, after) = (0, 0) (before, after) = (before, 0) previous_after = self.previous_block.after if self.previous_block else 0 before = 0 if self.previous_line is None else before - previous_after block = LinesBlock(mode=self.mode, previous_block=self.previous_block, original_line=current_line, before=before, after=after) if current_line.is_comment: if self.previous_line is None or (not self.previous_line.is_decorator and (not self.previous_line.is_comment or before) and (self.semantic_leading_comment is None or before)): self.semantic_leading_comment = block elif not current_line.is_decorator or before: self.semantic_leading_comment = None self.previous_line = current_line self.previous_block = block return block
black
positive
def __init__(self, env, dagger_agent, driving_style=DrivingStyle.STEER_ONLY): """ Normalize the brake and handbrake space to be between -1 and 1 so that all actions have the same dimension - Then create a single box space. The is_game_driving space can be ignored for now within the ppo agent. """ super(BootstrapRLGymEnv, self).__init__(env) self.denormalizers = None <DeepExtract> ac_space = self.action_space if isinstance(ac_space, gym.spaces.Tuple): self.denormalizers = [] box_spaces = [s for s in ac_space.spaces if isinstance(s, gym.spaces.Box)] total_dims = 0 for (i, space) in enumerate(box_spaces): if len(space.shape) > 1 or space.shape[0] > 1: raise NotImplementedError('Multi-dimensional box spaces not yet supported - need to flatten / separate') else: total_dims += 1 self.denormalizers.append(self.get_denormalizer(space.high[0], space.low[0])) self.action_space = gym.spaces.Box(-1, 1, shape=(total_dims,)) </DeepExtract> self.dagger_agent = dagger_agent self.driving_style = driving_style self.previous_obz = None self.experience_buffer = ExperienceBuffer() self.simple_test = c.SIMPLE_PPO if self.simple_test: shape = (5,) else: speed_length = 1 acceleration_length = 3 previous_output_length = 3 shape = (dagger_agent.net.num_last_hidden + dagger_agent.net.num_targets,) self.observation_space = spaces.Box(low=np.finfo(np.float32).min, high=np.finfo(np.float32).max, shape=shape, dtype=np.float32)
def __init__(self, env, dagger_agent, driving_style=DrivingStyle.STEER_ONLY): """ Normalize the brake and handbrake space to be between -1 and 1 so that all actions have the same dimension - Then create a single box space. The is_game_driving space can be ignored for now within the ppo agent. """ super(BootstrapRLGymEnv, self).__init__(env) self.denormalizers = None ac_space = self.action_space if isinstance(ac_space, gym.spaces.Tuple): self.denormalizers = [] box_spaces = [s for s in ac_space.spaces if isinstance(s, gym.spaces.Box)] total_dims = 0 for (i, space) in enumerate(box_spaces): if len(space.shape) > 1 or space.shape[0] > 1: raise NotImplementedError('Multi-dimensional box spaces not yet supported - need to flatten / separate') else: total_dims += 1 self.denormalizers.append(self.get_denormalizer(space.high[0], space.low[0])) self.action_space = gym.spaces.Box(-1, 1, shape=(total_dims,)) self.dagger_agent = dagger_agent self.driving_style = driving_style self.previous_obz = None self.experience_buffer = ExperienceBuffer() self.simple_test = c.SIMPLE_PPO if self.simple_test: shape = (5,) else: speed_length = 1 acceleration_length = 3 previous_output_length = 3 shape = (dagger_agent.net.num_last_hidden + dagger_agent.net.num_targets,) self.observation_space = spaces.Box(low=np.finfo(np.float32).min, high=np.finfo(np.float32).max, shape=shape, dtype=np.float32)
deepdrive
positive
def _writer(self, id_): """ A threaded background writer """ client = None while True: self.writer_thread_status[id_] = STATUS_QUEUE entry = self._write_queue.get() self.writer_thread_status[id_] = STATUS_NOTHING if entry is None or self.last_exception: logger.debug('Writer {} finishing.'.format(id_)) break if client is None: <DeepExtract> session = boto3.session.Session() if self._disable_encoding_type: session.events.unregister('before-parameter-build.s3.ListObjects', set_list_objects_encoding_type_url) client = session.client('s3', **self._resource_config) client = client </DeepExtract> (uid, enc_envkey, enc_version, enc_nonce, data, callback) = entry self.writer_thread_status[id_] = STATUS_THROTTLING time.sleep(self.write_throttling.consume(len(data))) self.writer_thread_status[id_] = STATUS_NOTHING try: self.writer_thread_status[id_] = STATUS_WRITING client.put_object(Body=data, Key=uid, Bucket=self._bucket_name) self.writer_thread_status[id_] = STATUS_NOTHING except Exception as e: self.last_exception = e else: if callback: callback(uid, enc_envkey, enc_version, enc_nonce) self._write_queue.task_done()
def _writer(self, id_): """ A threaded background writer """ client = None while True: self.writer_thread_status[id_] = STATUS_QUEUE entry = self._write_queue.get() self.writer_thread_status[id_] = STATUS_NOTHING if entry is None or self.last_exception: logger.debug('Writer {} finishing.'.format(id_)) break if client is None: session = boto3.session.Session() if self._disable_encoding_type: session.events.unregister('before-parameter-build.s3.ListObjects', set_list_objects_encoding_type_url) client = session.client('s3', **self._resource_config) client = client (uid, enc_envkey, enc_version, enc_nonce, data, callback) = entry self.writer_thread_status[id_] = STATUS_THROTTLING time.sleep(self.write_throttling.consume(len(data))) self.writer_thread_status[id_] = STATUS_NOTHING try: self.writer_thread_status[id_] = STATUS_WRITING client.put_object(Body=data, Key=uid, Bucket=self._bucket_name) self.writer_thread_status[id_] = STATUS_NOTHING except Exception as e: self.last_exception = e else: if callback: callback(uid, enc_envkey, enc_version, enc_nonce) self._write_queue.task_done()
backy2
positive
def GetUij(coords_i, coords_j, r_ij=None): """Calculate 3d unit vector from cartesian points i to j. Gives zero vector if i and j are the same point. Args: coords_i (float*): 3 cartesian coordinates [Angstrom] of point i. coords_j (float*): 3 cartesian coordinates [Angstrom] of point j. r_ij (float): Distance from i to j [Angstrom], if provided. Returns: u_ij (float*): 3 unit vector components from point i to j. """ if not r_ij: <DeepExtract> r_ij = math.sqrt((coords_j[0] - coords_i[0]) ** 2 + (coords_j[1] - coords_i[1]) ** 2 + (coords_j[2] - coords_i[2]) ** 2) </DeepExtract> if not r_ij: return numpy.zeros(3) return (coords_j - coords_i) / r_ij
def GetUij(coords_i, coords_j, r_ij=None): """Calculate 3d unit vector from cartesian points i to j. Gives zero vector if i and j are the same point. Args: coords_i (float*): 3 cartesian coordinates [Angstrom] of point i. coords_j (float*): 3 cartesian coordinates [Angstrom] of point j. r_ij (float): Distance from i to j [Angstrom], if provided. Returns: u_ij (float*): 3 unit vector components from point i to j. """ if not r_ij: r_ij = math.sqrt((coords_j[0] - coords_i[0]) ** 2 + (coords_j[1] - coords_i[1]) ** 2 + (coords_j[2] - coords_i[2]) ** 2) if not r_ij: return numpy.zeros(3) return (coords_j - coords_i) / r_ij
computational_chemistry
positive
def __init__(self, input_depth, output_depth, expansion, stride, bn_type='bn', kernel=3, width_divisor=1, shuffle_type=None, pw_group=1, se=False, cdw=False, dw_skip_bn=False, dw_skip_relu=False): super(IRFBlock, self).__init__() assert kernel in [1, 3, 5, 7], kernel self.use_res_connect = stride == 1 and input_depth == output_depth self.output_depth = output_depth mid_depth = int(input_depth * expansion) <DeepExtract> ret = int(mid_depth) if width_divisor > 0 and mid_depth % width_divisor != 0: ret = int((_py2_round(mid_depth / width_divisor) or width_divisor) * width_divisor) mid_depth = ret </DeepExtract> self.pw = ConvBNRelu(input_depth, mid_depth, kernel=1, stride=1, pad=0, no_bias=1, use_relu='relu', bn_type=bn_type, group=pw_group) <DeepExtract> assert stride in [1, 2, 4] or stride in [-1, -2, -4] or (isinstance(stride, tuple) and all((x in [-1, -2, -4] for x in stride))) scales = stride ret = None if isinstance(stride, tuple) or stride < 0: scales = [-x for x in stride] if isinstance(stride, tuple) else -stride stride = 1 ret = Upsample(scale_factor=scales, mode='nearest', align_corners=None) (self.upscale, stride) = (ret, stride) </DeepExtract> if kernel == 1: self.dw = nn.Sequential() elif cdw: dw1 = ConvBNRelu(mid_depth, mid_depth, kernel=kernel, stride=stride, pad=kernel // 2, group=mid_depth, no_bias=1, use_relu='relu', bn_type=bn_type) dw2 = ConvBNRelu(mid_depth, mid_depth, kernel=kernel, stride=1, pad=kernel // 2, group=mid_depth, no_bias=1, use_relu='relu' if not dw_skip_relu else None, bn_type=bn_type if not dw_skip_bn else None) self.dw = nn.Sequential(OrderedDict([('dw1', dw1), ('dw2', dw2)])) else: self.dw = ConvBNRelu(mid_depth, mid_depth, kernel=kernel, stride=stride, pad=kernel // 2, group=mid_depth, no_bias=1, use_relu='relu' if not dw_skip_relu else None, bn_type=bn_type if not dw_skip_bn else None) self.pwl = ConvBNRelu(mid_depth, output_depth, kernel=1, stride=1, pad=0, no_bias=1, use_relu=None, bn_type=bn_type, group=pw_group) self.shuffle_type = shuffle_type if shuffle_type is not None: self.shuffle = ChannelShuffle(pw_group) self.se4 = SEModule(output_depth) if se else nn.Sequential() self.output_depth = output_depth
def __init__(self, input_depth, output_depth, expansion, stride, bn_type='bn', kernel=3, width_divisor=1, shuffle_type=None, pw_group=1, se=False, cdw=False, dw_skip_bn=False, dw_skip_relu=False): super(IRFBlock, self).__init__() assert kernel in [1, 3, 5, 7], kernel self.use_res_connect = stride == 1 and input_depth == output_depth self.output_depth = output_depth mid_depth = int(input_depth * expansion) ret = int(mid_depth) if width_divisor > 0 and mid_depth % width_divisor != 0: ret = int((_py2_round(mid_depth / width_divisor) or width_divisor) * width_divisor) mid_depth = ret self.pw = ConvBNRelu(input_depth, mid_depth, kernel=1, stride=1, pad=0, no_bias=1, use_relu='relu', bn_type=bn_type, group=pw_group) assert stride in [1, 2, 4] or stride in [-1, -2, -4] or (isinstance(stride, tuple) and all((x in [-1, -2, -4] for x in stride))) scales = stride ret = None if isinstance(stride, tuple) or stride < 0: scales = [-x for x in stride] if isinstance(stride, tuple) else -stride stride = 1 ret = Upsample(scale_factor=scales, mode='nearest', align_corners=None) (self.upscale, stride) = (ret, stride) if kernel == 1: self.dw = nn.Sequential() elif cdw: dw1 = ConvBNRelu(mid_depth, mid_depth, kernel=kernel, stride=stride, pad=kernel // 2, group=mid_depth, no_bias=1, use_relu='relu', bn_type=bn_type) dw2 = ConvBNRelu(mid_depth, mid_depth, kernel=kernel, stride=1, pad=kernel // 2, group=mid_depth, no_bias=1, use_relu='relu' if not dw_skip_relu else None, bn_type=bn_type if not dw_skip_bn else None) self.dw = nn.Sequential(OrderedDict([('dw1', dw1), ('dw2', dw2)])) else: self.dw = ConvBNRelu(mid_depth, mid_depth, kernel=kernel, stride=stride, pad=kernel // 2, group=mid_depth, no_bias=1, use_relu='relu' if not dw_skip_relu else None, bn_type=bn_type if not dw_skip_bn else None) self.pwl = ConvBNRelu(mid_depth, output_depth, kernel=1, stride=1, pad=0, no_bias=1, use_relu=None, bn_type=bn_type, group=pw_group) self.shuffle_type = shuffle_type if shuffle_type is not None: self.shuffle = ChannelShuffle(pw_group) self.se4 = SEModule(output_depth) if se else nn.Sequential() self.output_depth = output_depth
DF-Traffic-Sign-Identification
positive
def main(): <DeepExtract> param_util.handle_version_flag() parser = provider_base.create_parser(sys.argv[0]) parser.add_argument('--version', '-v', default=False, help='Print the dsub version and exit.') parser.add_argument('--jobs', '-j', required=True, nargs='*', help='List of job-ids to delete. Use "*" to delete all running jobs.') parser.add_argument('--tasks', '-t', nargs='*', help='List of tasks in an array job to delete.') parser.add_argument('--users', '-u', nargs='*', default=[], help='Deletes only those jobs which were submitted by the list of users.\n Use "*" to delete jobs of any user.') parser.add_argument('--age', help="Deletes only those jobs newer than the specified age. Ages can be\n listed using a number followed by a unit. Supported units are\n s (seconds), m (minutes), h (hours), d (days), w (weeks).\n For example: '7d' (7 days). Bare numbers are treated as UTC.") parser.add_argument('--label', nargs='*', action=param_util.ListParamAction, default=[], help='User labels to match. Tasks returned must match all labels.', metavar='KEY=VALUE') google_common = parser.add_argument_group(title='google-common', description='Options common to the "google", "google-cls-v2", and\n "google-v2" providers') google_common.add_argument('--project', help='Cloud project ID in which to find and delete the job(s)') google_cls_v2 = parser.add_argument_group(title='"google-cls-v2" provider options', description='See also the "google-common" options listed') google_cls_v2.add_argument('--location', default=job_model.DEFAULT_LOCATION, help='Specifies the Google Cloud region to which the dsub job was\n submitted. (default: {})'.format(job_model.DEFAULT_LOCATION)) args = provider_base.parse_args(parser, {'google-batch': ['project'], 'google-cls-v2': ['project'], 'google-v2': ['project'], 'test-fails': [], 'local': []}, sys.argv[1:]) </DeepExtract> create_time = param_util.age_to_create_time(args.age) provider = provider_base.get_provider(args, resources, credentials_fn=get_credentials) user_ids = set(args.users) if args.users else {dsub_util.get_os_user()} labels = param_util.parse_pair_args(args.label, job_model.LabelParam) with dsub_util.replace_print(): provider_base.emit_provider_message(provider) <DeepExtract> print('Delete running jobs:') print(' user:') print(' %s\n' % user_ids) print(' job-id:') print(' %s\n' % args.jobs) if args.tasks: print(' task-id:') print(' %s\n' % args.tasks) if args.label: print(' labels:') print(' %s\n' % repr(args.label)) </DeepExtract> <DeepExtract> (deleted_tasks, error_messages) = provider.delete_jobs(user_ids, set(args.jobs) if args.jobs else None, set(args.tasks) if args.tasks else None, labels, create_time, create_time_max) for msg in error_messages: print(msg) deleted_tasks = deleted_tasks </DeepExtract> deleted_jobs = dsub_util.tasks_to_job_ids(deleted_tasks) job_count = len(deleted_jobs) deleted_tasks = [t for t in deleted_tasks if t.get_field('task-id')] tasks_msg = '' if deleted_tasks: task_count = len(deleted_tasks) tasks_msg = ' (%d task%s)' % (task_count, '' if task_count == 1 else 's') print('%d job%s deleted%s' % (job_count, '' if job_count == 1 else 's', tasks_msg))
def main(): param_util.handle_version_flag() parser = provider_base.create_parser(sys.argv[0]) parser.add_argument('--version', '-v', default=False, help='Print the dsub version and exit.') parser.add_argument('--jobs', '-j', required=True, nargs='*', help='List of job-ids to delete. Use "*" to delete all running jobs.') parser.add_argument('--tasks', '-t', nargs='*', help='List of tasks in an array job to delete.') parser.add_argument('--users', '-u', nargs='*', default=[], help='Deletes only those jobs which were submitted by the list of users.\n Use "*" to delete jobs of any user.') parser.add_argument('--age', help="Deletes only those jobs newer than the specified age. Ages can be\n listed using a number followed by a unit. Supported units are\n s (seconds), m (minutes), h (hours), d (days), w (weeks).\n For example: '7d' (7 days). Bare numbers are treated as UTC.") parser.add_argument('--label', nargs='*', action=param_util.ListParamAction, default=[], help='User labels to match. Tasks returned must match all labels.', metavar='KEY=VALUE') google_common = parser.add_argument_group(title='google-common', description='Options common to the "google", "google-cls-v2", and\n "google-v2" providers') google_common.add_argument('--project', help='Cloud project ID in which to find and delete the job(s)') google_cls_v2 = parser.add_argument_group(title='"google-cls-v2" provider options', description='See also the "google-common" options listed') google_cls_v2.add_argument('--location', default=job_model.DEFAULT_LOCATION, help='Specifies the Google Cloud region to which the dsub job was\n submitted. (default: {})'.format(job_model.DEFAULT_LOCATION)) args = provider_base.parse_args(parser, {'google-batch': ['project'], 'google-cls-v2': ['project'], 'google-v2': ['project'], 'test-fails': [], 'local': []}, sys.argv[1:]) create_time = param_util.age_to_create_time(args.age) provider = provider_base.get_provider(args, resources, credentials_fn=get_credentials) user_ids = set(args.users) if args.users else {dsub_util.get_os_user()} labels = param_util.parse_pair_args(args.label, job_model.LabelParam) with dsub_util.replace_print(): provider_base.emit_provider_message(provider) print('Delete running jobs:') print(' user:') print(' %s\n' % user_ids) print(' job-id:') print(' %s\n' % args.jobs) if args.tasks: print(' task-id:') print(' %s\n' % args.tasks) if args.label: print(' labels:') print(' %s\n' % repr(args.label)) (deleted_tasks, error_messages) = provider.delete_jobs(user_ids, set(args.jobs) if args.jobs else None, set(args.tasks) if args.tasks else None, labels, create_time, create_time_max) for msg in error_messages: print(msg) deleted_tasks = deleted_tasks deleted_jobs = dsub_util.tasks_to_job_ids(deleted_tasks) job_count = len(deleted_jobs) deleted_tasks = [t for t in deleted_tasks if t.get_field('task-id')] tasks_msg = '' if deleted_tasks: task_count = len(deleted_tasks) tasks_msg = ' (%d task%s)' % (task_count, '' if task_count == 1 else 's') print('%d job%s deleted%s' % (job_count, '' if job_count == 1 else 's', tasks_msg))
dsub
positive
def unregister(self, obj, obj_id=None): <DeepExtract> obj_id = obj_id or getattr(obj, 'id', str(id(obj))) if isinstance(obj, view.Root): store = self.roots elif isinstance(obj, view.Element): store = self.elements elif isinstance(obj, type) and issubclass(obj, view.CustomElement): store = self.element_types elif isinstance(obj, variables.Variable): store = self.variables elif callable(obj): store = self.functions else: raise RuntimeError('No registry is defined for objects of type {}'.format(type(obj).__name__)) (obj_id, store) = (obj_id, store) </DeepExtract> del store[obj_id]
def unregister(self, obj, obj_id=None): obj_id = obj_id or getattr(obj, 'id', str(id(obj))) if isinstance(obj, view.Root): store = self.roots elif isinstance(obj, view.Element): store = self.elements elif isinstance(obj, type) and issubclass(obj, view.CustomElement): store = self.element_types elif isinstance(obj, variables.Variable): store = self.variables elif callable(obj): store = self.functions else: raise RuntimeError('No registry is defined for objects of type {}'.format(type(obj).__name__)) (obj_id, store) = (obj_id, store) del store[obj_id]
awe
positive
def get_broker_id(self): if self.broker_id: return self.broker_id meta_path = '{}/meta.properties'.format(self.kafka_properties.get_property('log.dirs')) while not os.path.isfile(meta_path): return None with open(meta_path) as f: <DeepExtract> for line in f.readlines(): match = re.search('^broker\\.id=(\\d+)$', line.strip()) if match: self.broker_id = match.group(1) </DeepExtract> return self.broker_id
def get_broker_id(self): if self.broker_id: return self.broker_id meta_path = '{}/meta.properties'.format(self.kafka_properties.get_property('log.dirs')) while not os.path.isfile(meta_path): return None with open(meta_path) as f: for line in f.readlines(): match = re.search('^broker\\.id=(\\d+)$', line.strip()) if match: self.broker_id = match.group(1) return self.broker_id
bubuku
positive
def test_last_entries(self): <DeepExtract> params = {'title': 'My test entry', 'content': 'My test content with image <img src="/image.jpg" />', 'slug': 'my-test-entry', 'tags': 'tests', 'publication_date': datetime(2010, 1, 1, 12), 'status': PUBLISHED} entry = Entry.objects.create(**params) entry.sites.add(self.site) entry.categories.add(self.category) entry.authors.add(self.author) return entry </DeepExtract> feed = LastEntries() self.assertEqual(feed.link(), '/') self.assertEqual(len(feed.items()), 1) self.assertEqual(feed.get_title(None), 'Last entries') self.assertEqual(feed.description(), 'The last entries on the site example.com')
def test_last_entries(self): params = {'title': 'My test entry', 'content': 'My test content with image <img src="/image.jpg" />', 'slug': 'my-test-entry', 'tags': 'tests', 'publication_date': datetime(2010, 1, 1, 12), 'status': PUBLISHED} entry = Entry.objects.create(**params) entry.sites.add(self.site) entry.categories.add(self.category) entry.authors.add(self.author) return entry feed = LastEntries() self.assertEqual(feed.link(), '/') self.assertEqual(len(feed.items()), 1) self.assertEqual(feed.get_title(None), 'Last entries') self.assertEqual(feed.description(), 'The last entries on the site example.com')
django-blog-zinnia
positive
def long_position_logic(self, trend): """ This function will handle all the logic when bot is in a long position. :param trend: Current trend the bot registers based on strategies provided. """ if self.custom_stop_loss is not None and self.current_price <= self.custom_stop_loss: <DeepExtract> with self.lock: if self.current_position != LONG: return if coin is None: coin = self.coin if coin <= 0: raise ValueError(f'You cannot sell {coin} {self.coin_name}.') if coin > self.coin: raise ValueError(f'You have {self.coin} {self.coin_name}. You cannot sell {coin} {self.coin_name}.') self.current_price = self.data_view.get_current_price() self.commission_paid += coin * self.current_price * self.transaction_fee_percentage_decimal self.balance += coin * self.current_price * (1 - self.transaction_fee_percentage_decimal) self.current_position = None self.custom_stop_loss = None self.previous_position = LONG self.coin -= coin self.add_trade('Sold long because of custom stop loss.', force=force, stop_loss_exit=stop_loss_exit) if self.coin == 0: self.buy_long_price = self.long_trailing_price = None </DeepExtract> elif self.get_stop_loss() is not None and self.current_price <= self.get_stop_loss(): if not self.safety_timer: <DeepExtract> with self.lock: if self.current_position != LONG: return if coin is None: coin = self.coin if coin <= 0: raise ValueError(f'You cannot sell {coin} {self.coin_name}.') if coin > self.coin: raise ValueError(f'You have {self.coin} {self.coin_name}. You cannot sell {coin} {self.coin_name}.') self.current_price = self.data_view.get_current_price() self.commission_paid += coin * self.current_price * self.transaction_fee_percentage_decimal self.balance += coin * self.current_price * (1 - self.transaction_fee_percentage_decimal) self.current_position = None self.custom_stop_loss = None self.previous_position = LONG self.coin -= coin self.add_trade('Sold long because of stop loss.', force=force, stop_loss_exit=True) if self.coin == 0: self.buy_long_price = self.long_trailing_price = None </DeepExtract> elif not self.scheduled_safety_timer: self.scheduled_safety_timer = time.time() + self.safety_timer elif time.time() > self.scheduled_safety_timer: <DeepExtract> with self.lock: if self.current_position != LONG: return if coin is None: coin = self.coin if coin <= 0: raise ValueError(f'You cannot sell {coin} {self.coin_name}.') if coin > self.coin: raise ValueError(f'You have {self.coin} {self.coin_name}. You cannot sell {coin} {self.coin_name}.') self.current_price = self.data_view.get_current_price() self.commission_paid += coin * self.current_price * self.transaction_fee_percentage_decimal self.balance += coin * self.current_price * (1 - self.transaction_fee_percentage_decimal) self.current_position = None self.custom_stop_loss = None self.previous_position = LONG self.coin -= coin self.add_trade('Sold long because of stop loss and safety timer.', force=force, stop_loss_exit=True) if self.coin == 0: self.buy_long_price = self.long_trailing_price = None </DeepExtract> elif self.get_take_profit() is not None and self.current_price >= self.get_take_profit(): <DeepExtract> with self.lock: if self.current_position != LONG: return if coin is None: coin = self.coin if coin <= 0: raise ValueError(f'You cannot sell {coin} {self.coin_name}.') if coin > self.coin: raise ValueError(f'You have {self.coin} {self.coin_name}. You cannot sell {coin} {self.coin_name}.') self.current_price = self.data_view.get_current_price() self.commission_paid += coin * self.current_price * self.transaction_fee_percentage_decimal self.balance += coin * self.current_price * (1 - self.transaction_fee_percentage_decimal) self.current_position = None self.custom_stop_loss = None self.previous_position = LONG self.coin -= coin self.add_trade('Sold long because of take profit.', force=force, stop_loss_exit=stop_loss_exit) if self.coin == 0: self.buy_long_price = self.long_trailing_price = None </DeepExtract> elif not self.in_human_control: if trend == BEARISH: <DeepExtract> with self.lock: if self.current_position != LONG: return if coin is None: coin = self.coin if coin <= 0: raise ValueError(f'You cannot sell {coin} {self.coin_name}.') if coin > self.coin: raise ValueError(f'You have {self.coin} {self.coin_name}. You cannot sell {coin} {self.coin_name}.') self.current_price = self.data_view.get_current_price() self.commission_paid += coin * self.current_price * self.transaction_fee_percentage_decimal self.balance += coin * self.current_price * (1 - self.transaction_fee_percentage_decimal) self.current_position = None self.custom_stop_loss = None self.previous_position = LONG self.coin -= coin self.add_trade('Sold long because a cross was detected.', force=force, stop_loss_exit=stop_loss_exit) if self.coin == 0: self.buy_long_price = self.long_trailing_price = None </DeepExtract> <DeepExtract> with self.lock: if self.current_position == SHORT: return self.current_price = self.data_view.get_current_price() if coin is None: transaction_fee = self.balance * self.transaction_fee_percentage_decimal coin = (self.balance - transaction_fee) / self.current_price if coin <= 0: raise ValueError(f'You cannot borrow negative {abs(coin)} {self.coin_name}.') self.coin_owed += coin self.commission_paid += self.current_price * coin * self.transaction_fee_percentage_decimal self.balance += self.current_price * coin * (1 - self.transaction_fee_percentage_decimal) self.current_position = SHORT self.sell_short_price = self.short_trailing_price = self.current_price self.add_trade('Sold short because a cross was detected.', force=force, smart_enter=smart_enter) </DeepExtract> elif trend == EXIT_LONG: <DeepExtract> with self.lock: if self.current_position != LONG: return if coin is None: coin = self.coin if coin <= 0: raise ValueError(f'You cannot sell {coin} {self.coin_name}.') if coin > self.coin: raise ValueError(f'You have {self.coin} {self.coin_name}. You cannot sell {coin} {self.coin_name}.') self.current_price = self.data_view.get_current_price() self.commission_paid += coin * self.current_price * self.transaction_fee_percentage_decimal self.balance += coin * self.current_price * (1 - self.transaction_fee_percentage_decimal) self.current_position = None self.custom_stop_loss = None self.previous_position = LONG self.coin -= coin self.add_trade('Sold long because an exit-long trend was detected.', force=force, stop_loss_exit=stop_loss_exit) if self.coin == 0: self.buy_long_price = self.long_trailing_price = None </DeepExtract>
def long_position_logic(self, trend): """ This function will handle all the logic when bot is in a long position. :param trend: Current trend the bot registers based on strategies provided. """ if self.custom_stop_loss is not None and self.current_price <= self.custom_stop_loss: with self.lock: if self.current_position != LONG: return if coin is None: coin = self.coin if coin <= 0: raise ValueError(f'You cannot sell {coin} {self.coin_name}.') if coin > self.coin: raise ValueError(f'You have {self.coin} {self.coin_name}. You cannot sell {coin} {self.coin_name}.') self.current_price = self.data_view.get_current_price() self.commission_paid += coin * self.current_price * self.transaction_fee_percentage_decimal self.balance += coin * self.current_price * (1 - self.transaction_fee_percentage_decimal) self.current_position = None self.custom_stop_loss = None self.previous_position = LONG self.coin -= coin self.add_trade('Sold long because of custom stop loss.', force=force, stop_loss_exit=stop_loss_exit) if self.coin == 0: self.buy_long_price = self.long_trailing_price = None elif self.get_stop_loss() is not None and self.current_price <= self.get_stop_loss(): if not self.safety_timer: with self.lock: if self.current_position != LONG: return if coin is None: coin = self.coin if coin <= 0: raise ValueError(f'You cannot sell {coin} {self.coin_name}.') if coin > self.coin: raise ValueError(f'You have {self.coin} {self.coin_name}. You cannot sell {coin} {self.coin_name}.') self.current_price = self.data_view.get_current_price() self.commission_paid += coin * self.current_price * self.transaction_fee_percentage_decimal self.balance += coin * self.current_price * (1 - self.transaction_fee_percentage_decimal) self.current_position = None self.custom_stop_loss = None self.previous_position = LONG self.coin -= coin self.add_trade('Sold long because of stop loss.', force=force, stop_loss_exit=True) if self.coin == 0: self.buy_long_price = self.long_trailing_price = None elif not self.scheduled_safety_timer: self.scheduled_safety_timer = time.time() + self.safety_timer elif time.time() > self.scheduled_safety_timer: with self.lock: if self.current_position != LONG: return if coin is None: coin = self.coin if coin <= 0: raise ValueError(f'You cannot sell {coin} {self.coin_name}.') if coin > self.coin: raise ValueError(f'You have {self.coin} {self.coin_name}. You cannot sell {coin} {self.coin_name}.') self.current_price = self.data_view.get_current_price() self.commission_paid += coin * self.current_price * self.transaction_fee_percentage_decimal self.balance += coin * self.current_price * (1 - self.transaction_fee_percentage_decimal) self.current_position = None self.custom_stop_loss = None self.previous_position = LONG self.coin -= coin self.add_trade('Sold long because of stop loss and safety timer.', force=force, stop_loss_exit=True) if self.coin == 0: self.buy_long_price = self.long_trailing_price = None elif self.get_take_profit() is not None and self.current_price >= self.get_take_profit(): with self.lock: if self.current_position != LONG: return if coin is None: coin = self.coin if coin <= 0: raise ValueError(f'You cannot sell {coin} {self.coin_name}.') if coin > self.coin: raise ValueError(f'You have {self.coin} {self.coin_name}. You cannot sell {coin} {self.coin_name}.') self.current_price = self.data_view.get_current_price() self.commission_paid += coin * self.current_price * self.transaction_fee_percentage_decimal self.balance += coin * self.current_price * (1 - self.transaction_fee_percentage_decimal) self.current_position = None self.custom_stop_loss = None self.previous_position = LONG self.coin -= coin self.add_trade('Sold long because of take profit.', force=force, stop_loss_exit=stop_loss_exit) if self.coin == 0: self.buy_long_price = self.long_trailing_price = None elif not self.in_human_control: if trend == BEARISH: with self.lock: if self.current_position != LONG: return if coin is None: coin = self.coin if coin <= 0: raise ValueError(f'You cannot sell {coin} {self.coin_name}.') if coin > self.coin: raise ValueError(f'You have {self.coin} {self.coin_name}. You cannot sell {coin} {self.coin_name}.') self.current_price = self.data_view.get_current_price() self.commission_paid += coin * self.current_price * self.transaction_fee_percentage_decimal self.balance += coin * self.current_price * (1 - self.transaction_fee_percentage_decimal) self.current_position = None self.custom_stop_loss = None self.previous_position = LONG self.coin -= coin self.add_trade('Sold long because a cross was detected.', force=force, stop_loss_exit=stop_loss_exit) if self.coin == 0: self.buy_long_price = self.long_trailing_price = None with self.lock: if self.current_position == SHORT: return self.current_price = self.data_view.get_current_price() if coin is None: transaction_fee = self.balance * self.transaction_fee_percentage_decimal coin = (self.balance - transaction_fee) / self.current_price if coin <= 0: raise ValueError(f'You cannot borrow negative {abs(coin)} {self.coin_name}.') self.coin_owed += coin self.commission_paid += self.current_price * coin * self.transaction_fee_percentage_decimal self.balance += self.current_price * coin * (1 - self.transaction_fee_percentage_decimal) self.current_position = SHORT self.sell_short_price = self.short_trailing_price = self.current_price self.add_trade('Sold short because a cross was detected.', force=force, smart_enter=smart_enter) elif trend == EXIT_LONG: with self.lock: if self.current_position != LONG: return if coin is None: coin = self.coin if coin <= 0: raise ValueError(f'You cannot sell {coin} {self.coin_name}.') if coin > self.coin: raise ValueError(f'You have {self.coin} {self.coin_name}. You cannot sell {coin} {self.coin_name}.') self.current_price = self.data_view.get_current_price() self.commission_paid += coin * self.current_price * self.transaction_fee_percentage_decimal self.balance += coin * self.current_price * (1 - self.transaction_fee_percentage_decimal) self.current_position = None self.custom_stop_loss = None self.previous_position = LONG self.coin -= coin self.add_trade('Sold long because an exit-long trend was detected.', force=force, stop_loss_exit=stop_loss_exit) if self.coin == 0: self.buy_long_price = self.long_trailing_price = None </DeepExtract>
algobot
positive
def get_coding_scheme_warnings(self, silent=False): """Check if the coding scheme has detected any errors.""" ret_fail = False for block in self.blocks: if block.id == 0: words = [self.read_reg(self.REGS.EFUSE_RD_REPEAT_ERR0_REG + offs * 4) for offs in range(5)] data = BitArray() for word in reversed(words): data.append('uint:32=%d' % word) block.err_bitarray.overwrite(data, pos=32) block.num_errors = block.err_bitarray.count(True) block.fail = block.num_errors != 0 else: (addr_reg_f, fail_bit) = self.REGS.BLOCK_FAIL_BIT[block.id] if fail_bit is None: block.fail = False else: block.fail = self.read_reg(addr_reg_f) & 1 << fail_bit != 0 (addr_reg_n, num_mask, num_offs) = self.REGS.BLOCK_NUM_ERRORS[block.id] if num_mask is None or num_offs is None: block.num_errors = 0 else: block.num_errors = self.read_reg(addr_reg_n) >> num_offs & num_mask ret_fail |= block.fail if not silent and (block.fail or block.num_errors): print('Error(s) in BLOCK%d [ERRORS:%d FAIL:%d]' % (block.id, block.num_errors, block.fail)) if (self.debug or ret_fail) and (not silent): <DeepExtract> print('') self.blocks[0].print_block(self.blocks[0].err_bitarray, 'err__regs', debug=True) print('{:27} 0x{:08x}'.format('EFUSE_RD_RS_ERR0_REG', self.read_reg(self.REGS.EFUSE_RD_RS_ERR0_REG))) print('{:27} 0x{:08x}'.format('EFUSE_RD_RS_ERR1_REG', self.read_reg(self.REGS.EFUSE_RD_RS_ERR1_REG))) </DeepExtract> return ret_fail
def get_coding_scheme_warnings(self, silent=False): """Check if the coding scheme has detected any errors.""" ret_fail = False for block in self.blocks: if block.id == 0: words = [self.read_reg(self.REGS.EFUSE_RD_REPEAT_ERR0_REG + offs * 4) for offs in range(5)] data = BitArray() for word in reversed(words): data.append('uint:32=%d' % word) block.err_bitarray.overwrite(data, pos=32) block.num_errors = block.err_bitarray.count(True) block.fail = block.num_errors != 0 else: (addr_reg_f, fail_bit) = self.REGS.BLOCK_FAIL_BIT[block.id] if fail_bit is None: block.fail = False else: block.fail = self.read_reg(addr_reg_f) & 1 << fail_bit != 0 (addr_reg_n, num_mask, num_offs) = self.REGS.BLOCK_NUM_ERRORS[block.id] if num_mask is None or num_offs is None: block.num_errors = 0 else: block.num_errors = self.read_reg(addr_reg_n) >> num_offs & num_mask ret_fail |= block.fail if not silent and (block.fail or block.num_errors): print('Error(s) in BLOCK%d [ERRORS:%d FAIL:%d]' % (block.id, block.num_errors, block.fail)) if (self.debug or ret_fail) and (not silent): print('') self.blocks[0].print_block(self.blocks[0].err_bitarray, 'err__regs', debug=True) print('{:27} 0x{:08x}'.format('EFUSE_RD_RS_ERR0_REG', self.read_reg(self.REGS.EFUSE_RD_RS_ERR0_REG))) print('{:27} 0x{:08x}'.format('EFUSE_RD_RS_ERR1_REG', self.read_reg(self.REGS.EFUSE_RD_RS_ERR1_REG))) return ret_fail
esptool
positive
def refine_by_decoder(features, end_points, decoder_height, decoder_width, decoder_use_separable_conv=False, model_variant=None, weight_decay=0.0001, reuse=None, is_training=False, fine_tune_batch_norm=False): """Adds the decoder to obtain sharper segmentation results. Args: features: A tensor of size [batch, features_height, features_width, features_channels]. end_points: A dictionary from components of the network to the corresponding activation. decoder_height: The height of decoder feature maps. decoder_width: The width of decoder feature maps. decoder_use_separable_conv: Employ separable convolution for decoder or not. model_variant: Model variant for feature extraction. weight_decay: The weight decay for model variables. reuse: Reuse the model variables or not. is_training: Is training or not. fine_tune_batch_norm: Fine-tune the batch norm parameters or not. Returns: Decoder output with size [batch, decoder_height, decoder_width, decoder_channels]. """ batch_norm_params = {'is_training': is_training if fine_tune_batch_norm else False, 'decay': 0.9997, 'epsilon': 1e-05, 'scale': True} with slim.arg_scope([slim.conv2d, slim.separable_conv2d], weights_regularizer=slim.l2_regularizer(weight_decay), activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm, padding='SAME', stride=1, reuse=reuse): with slim.arg_scope([slim.batch_norm], **batch_norm_params): with tf.variable_scope(_DECODER_SCOPE, _DECODER_SCOPE, [features]): feature_list = feature_extractor.networks_to_feature_maps[model_variant][feature_extractor.DECODER_END_POINTS] if feature_list is None: tf.logging.info('Not found any decoder end points.') return features else: decoder_features = features for (i, name) in enumerate(feature_list): decoder_features_list = [decoder_features] feature_name = '{}/{}'.format(feature_extractor.name_scope[model_variant], name) decoder_features_list.append(slim.conv2d(end_points[feature_name], 48, 1, scope='feature_projection' + str(i))) for (j, feature) in enumerate(decoder_features_list): decoder_features_list[j] = tf.image.resize_bilinear(feature, [decoder_height, decoder_width], align_corners=True) decoder_features_list[j].set_shape([None, decoder_height, decoder_width, None]) decoder_depth = 256 if decoder_use_separable_conv: <DeepExtract> outputs = slim.separable_conv2d(tf.concat(decoder_features_list, 3), None, 3, depth_multiplier=1, rate=1, weights_initializer=tf.truncated_normal_initializer(stddev=depthwise_weights_initializer_stddev), weights_regularizer=None, scope='decoder_conv0' + '_depthwise') decoder_features = slim.conv2d(outputs, decoder_depth, 1, weights_initializer=tf.truncated_normal_initializer(stddev=pointwise_weights_initializer_stddev), weights_regularizer=slim.l2_regularizer(weight_decay), scope='decoder_conv0' + '_pointwise') </DeepExtract> <DeepExtract> outputs = slim.separable_conv2d(decoder_features, None, 3, depth_multiplier=1, rate=1, weights_initializer=tf.truncated_normal_initializer(stddev=depthwise_weights_initializer_stddev), weights_regularizer=None, scope='decoder_conv1' + '_depthwise') decoder_features = slim.conv2d(outputs, decoder_depth, 1, weights_initializer=tf.truncated_normal_initializer(stddev=pointwise_weights_initializer_stddev), weights_regularizer=slim.l2_regularizer(weight_decay), scope='decoder_conv1' + '_pointwise') </DeepExtract> else: num_convs = 2 decoder_features = slim.repeat(tf.concat(decoder_features_list, 3), num_convs, slim.conv2d, decoder_depth, 3, scope='decoder_conv' + str(i)) return decoder_features
def refine_by_decoder(features, end_points, decoder_height, decoder_width, decoder_use_separable_conv=False, model_variant=None, weight_decay=0.0001, reuse=None, is_training=False, fine_tune_batch_norm=False): """Adds the decoder to obtain sharper segmentation results. Args: features: A tensor of size [batch, features_height, features_width, features_channels]. end_points: A dictionary from components of the network to the corresponding activation. decoder_height: The height of decoder feature maps. decoder_width: The width of decoder feature maps. decoder_use_separable_conv: Employ separable convolution for decoder or not. model_variant: Model variant for feature extraction. weight_decay: The weight decay for model variables. reuse: Reuse the model variables or not. is_training: Is training or not. fine_tune_batch_norm: Fine-tune the batch norm parameters or not. Returns: Decoder output with size [batch, decoder_height, decoder_width, decoder_channels]. """ batch_norm_params = {'is_training': is_training if fine_tune_batch_norm else False, 'decay': 0.9997, 'epsilon': 1e-05, 'scale': True} with slim.arg_scope([slim.conv2d, slim.separable_conv2d], weights_regularizer=slim.l2_regularizer(weight_decay), activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm, padding='SAME', stride=1, reuse=reuse): with slim.arg_scope([slim.batch_norm], **batch_norm_params): with tf.variable_scope(_DECODER_SCOPE, _DECODER_SCOPE, [features]): feature_list = feature_extractor.networks_to_feature_maps[model_variant][feature_extractor.DECODER_END_POINTS] if feature_list is None: tf.logging.info('Not found any decoder end points.') return features else: decoder_features = features for (i, name) in enumerate(feature_list): decoder_features_list = [decoder_features] feature_name = '{}/{}'.format(feature_extractor.name_scope[model_variant], name) decoder_features_list.append(slim.conv2d(end_points[feature_name], 48, 1, scope='feature_projection' + str(i))) for (j, feature) in enumerate(decoder_features_list): decoder_features_list[j] = tf.image.resize_bilinear(feature, [decoder_height, decoder_width], align_corners=True) decoder_features_list[j].set_shape([None, decoder_height, decoder_width, None]) decoder_depth = 256 if decoder_use_separable_conv: outputs = slim.separable_conv2d(tf.concat(decoder_features_list, 3), None, 3, depth_multiplier=1, rate=1, weights_initializer=tf.truncated_normal_initializer(stddev=depthwise_weights_initializer_stddev), weights_regularizer=None, scope='decoder_conv0' + '_depthwise') decoder_features = slim.conv2d(outputs, decoder_depth, 1, weights_initializer=tf.truncated_normal_initializer(stddev=pointwise_weights_initializer_stddev), weights_regularizer=slim.l2_regularizer(weight_decay), scope='decoder_conv0' + '_pointwise') outputs = slim.separable_conv2d(decoder_features, None, 3, depth_multiplier=1, rate=1, weights_initializer=tf.truncated_normal_initializer(stddev=depthwise_weights_initializer_stddev), weights_regularizer=None, scope='decoder_conv1' + '_depthwise') decoder_features = slim.conv2d(outputs, decoder_depth, 1, weights_initializer=tf.truncated_normal_initializer(stddev=pointwise_weights_initializer_stddev), weights_regularizer=slim.l2_regularizer(weight_decay), scope='decoder_conv1' + '_pointwise') else: num_convs = 2 decoder_features = slim.repeat(tf.concat(decoder_features_list, 3), num_convs, slim.conv2d, decoder_depth, 3, scope='decoder_conv' + str(i)) return decoder_features
data-science-bowl-2018
positive
def perform_save(self, comment, request): comment.save() <DeepExtract> if settings.COMMENT_ALLOW_SUBSCRIPTION: self._initialize_email_service(comment, request) self.email_service.send_notification_to_followers() </DeepExtract> comment.refresh_from_db() return comment
def perform_save(self, comment, request): comment.save() if settings.COMMENT_ALLOW_SUBSCRIPTION: self._initialize_email_service(comment, request) self.email_service.send_notification_to_followers() comment.refresh_from_db() return comment
Comment
positive
@staticmethod def generate_triplets(labels, num_triplets): def create_indices(_labels): inds = dict() for (idx, ind) in enumerate(_labels): if ind not in inds: inds[ind] = [] inds[ind].append(idx) return inds triplets = [] <DeepExtract> inds = dict() for (idx, ind) in enumerate(labels.numpy()): if ind not in inds: inds[ind] = [] inds[ind].append(idx) indices = inds </DeepExtract> unique_labels = np.unique(labels.numpy()) n_classes = unique_labels.shape[0] already_idxs = set() for x in tqdm(range(num_triplets)): if len(already_idxs) >= args.batch_size: already_idxs = set() c1 = np.random.randint(0, n_classes) while c1 in already_idxs: c1 = np.random.randint(0, n_classes) already_idxs.add(c1) c2 = np.random.randint(0, n_classes) while c1 == c2: c2 = np.random.randint(0, n_classes) if len(indices[c1]) == 2: (n1, n2) = (0, 1) else: n1 = np.random.randint(0, len(indices[c1])) n2 = np.random.randint(0, len(indices[c1])) while n1 == n2: n2 = np.random.randint(0, len(indices[c1])) n3 = np.random.randint(0, len(indices[c2])) triplets.append([indices[c1][n1], indices[c1][n2], indices[c2][n3]]) return torch.LongTensor(np.array(triplets))
@staticmethod def generate_triplets(labels, num_triplets): def create_indices(_labels): inds = dict() for (idx, ind) in enumerate(_labels): if ind not in inds: inds[ind] = [] inds[ind].append(idx) return inds triplets = [] inds = dict() for (idx, ind) in enumerate(labels.numpy()): if ind not in inds: inds[ind] = [] inds[ind].append(idx) indices = inds unique_labels = np.unique(labels.numpy()) n_classes = unique_labels.shape[0] already_idxs = set() for x in tqdm(range(num_triplets)): if len(already_idxs) >= args.batch_size: already_idxs = set() c1 = np.random.randint(0, n_classes) while c1 in already_idxs: c1 = np.random.randint(0, n_classes) already_idxs.add(c1) c2 = np.random.randint(0, n_classes) while c1 == c2: c2 = np.random.randint(0, n_classes) if len(indices[c1]) == 2: (n1, n2) = (0, 1) else: n1 = np.random.randint(0, len(indices[c1])) n2 = np.random.randint(0, len(indices[c1])) while n1 == n2: n2 = np.random.randint(0, len(indices[c1])) n3 = np.random.randint(0, len(indices[c2])) triplets.append([indices[c1][n1], indices[c1][n2], indices[c2][n3]]) return torch.LongTensor(np.array(triplets))
CNNs-Without-Borders
positive
def _merge_a_into_b(a, b, stack=None): """Merge config dictionary a into config dictionary b, clobbering the options in b whenever they are also specified in a. """ assert isinstance(a, AttrDict), 'Argument `a` must be an AttrDict' assert isinstance(b, AttrDict), 'Argument `b` must be an AttrDict' for (k, v_) in a.items(): full_key = '.'.join(stack) + '.' + k if stack is not None else k if k not in b: raise KeyError('Non-existent config key: {}'.format(full_key)) v = copy.deepcopy(v_) <DeepExtract> if isinstance(v, dict): v = AttrDict(v) if not isinstance(v, six.string_types): v = v try: v = literal_eval(v) except ValueError: pass except SyntaxError: pass v = v </DeepExtract> <DeepExtract> type_b = type(b[k]) type_a = type(v) if type_a is type_b: v = v if isinstance(b[k], np.ndarray): v = np.array(v, dtype=b[k].dtype) elif isinstance(b[k], six.string_types): v = str(v) elif isinstance(v, tuple) and isinstance(b[k], list): v = list(v) elif isinstance(v, list) and isinstance(b[k], tuple): v = tuple(v) else: raise ValueError('Type mismatch ({} vs. {}) with values ({} vs. {}) for config key: {}'.format(type_b, type_a, b[k], v, full_key)) v = v </DeepExtract> if isinstance(v, AttrDict): try: stack_push = [k] if stack is None else stack + [k] <DeepExtract> assert isinstance(v, AttrDict), 'Argument `a` must be an AttrDict' assert isinstance(b[k], AttrDict), 'Argument `b` must be an AttrDict' for (k, v_) in v.items(): full_key = '.'.join(stack_push) + '.' + k if stack_push is not None else k if k not in b[k]: raise KeyError('Non-existent config key: {}'.format(full_key)) v = copy.deepcopy(v_) v = _decode_cfg_value(v) v = _check_and_coerce_cfg_value_type(v, b[k][k], k, full_key) if isinstance(v, AttrDict): try: stack_push = [k] if stack_push is None else stack_push + [k] _merge_a_into_b(v, b[k][k], stack=stack_push) except BaseException: raise else: b[k][k] = v </DeepExtract> except BaseException: raise else: b[k] = v
def _merge_a_into_b(a, b, stack=None): """Merge config dictionary a into config dictionary b, clobbering the options in b whenever they are also specified in a. """ assert isinstance(a, AttrDict), 'Argument `a` must be an AttrDict' assert isinstance(b, AttrDict), 'Argument `b` must be an AttrDict' for (k, v_) in a.items(): full_key = '.'.join(stack) + '.' + k if stack is not None else k if k not in b: raise KeyError('Non-existent config key: {}'.format(full_key)) v = copy.deepcopy(v_) if isinstance(v, dict): v = AttrDict(v) if not isinstance(v, six.string_types): v = v try: v = literal_eval(v) except ValueError: pass except SyntaxError: pass v = v type_b = type(b[k]) type_a = type(v) if type_a is type_b: v = v if isinstance(b[k], np.ndarray): v = np.array(v, dtype=b[k].dtype) elif isinstance(b[k], six.string_types): v = str(v) elif isinstance(v, tuple) and isinstance(b[k], list): v = list(v) elif isinstance(v, list) and isinstance(b[k], tuple): v = tuple(v) else: raise ValueError('Type mismatch ({} vs. {}) with values ({} vs. {}) for config key: {}'.format(type_b, type_a, b[k], v, full_key)) v = v if isinstance(v, AttrDict): try: stack_push = [k] if stack is None else stack + [k] assert isinstance(v, AttrDict), 'Argument `a` must be an AttrDict' assert isinstance(b[k], AttrDict), 'Argument `b` must be an AttrDict' for (k, v_) in v.items(): full_key = '.'.join(stack_push) + '.' + k if stack_push is not None else k if k not in b[k]: raise KeyError('Non-existent config key: {}'.format(full_key)) v = copy.deepcopy(v_) v = _decode_cfg_value(v) v = _check_and_coerce_cfg_value_type(v, b[k][k], k, full_key) if isinstance(v, AttrDict): try: stack_push = [k] if stack_push is None else stack_push + [k] _merge_a_into_b(v, b[k][k], stack=stack_push) except BaseException: raise else: b[k][k] = v except BaseException: raise else: b[k] = v
3DSSD
positive
def get_entrypoints(*, venv: VEnv, name: str) -> Optional[Tuple[EntryPoint, ...]]: if not venv.lib_path: logger.critical('cannot locate lib path in the venv') return None paths = venv.lib_path.glob('*-*.*-info') <DeepExtract> name = canonicalize_name(name) for path in paths: package_name = path.stem.split('-')[0] if canonicalize_name(package_name) == name: path = path path = None </DeepExtract> if not path: logger.critical('cannot locate dist-info for installed package') return None path = path / 'entry_points.txt' if path.exists(): return EggInfoConverter().parse_entrypoints(content=path.read_text()).entrypoints if not venv.bin_path: logger.error('cannot find any entrypoints for package') return None names = {name, name.replace('-', '_'), name.replace('_', '-'), name.replace('-', '').replace('_', ''), canonicalize_name(name), canonicalize_name(name).replace('-', '_'), canonicalize_name(name).replace('_', '-'), canonicalize_name(name).replace('-', '').replace('_', '')} paths = tuple((venv.bin_path / name for name in names)) if IS_WINDOWS: paths = tuple((p.with_suffix('.exe') for p in paths)) for path in paths: if path.exists(): return (EntryPoint(path=path, name=name),) logger.error('cannot find any entrypoints for package') return None
def get_entrypoints(*, venv: VEnv, name: str) -> Optional[Tuple[EntryPoint, ...]]: if not venv.lib_path: logger.critical('cannot locate lib path in the venv') return None paths = venv.lib_path.glob('*-*.*-info') name = canonicalize_name(name) for path in paths: package_name = path.stem.split('-')[0] if canonicalize_name(package_name) == name: path = path path = None if not path: logger.critical('cannot locate dist-info for installed package') return None path = path / 'entry_points.txt' if path.exists(): return EggInfoConverter().parse_entrypoints(content=path.read_text()).entrypoints if not venv.bin_path: logger.error('cannot find any entrypoints for package') return None names = {name, name.replace('-', '_'), name.replace('_', '-'), name.replace('-', '').replace('_', ''), canonicalize_name(name), canonicalize_name(name).replace('-', '_'), canonicalize_name(name).replace('_', '-'), canonicalize_name(name).replace('-', '').replace('_', '')} paths = tuple((venv.bin_path / name for name in names)) if IS_WINDOWS: paths = tuple((p.with_suffix('.exe') for p in paths)) for path in paths: if path.exists(): return (EntryPoint(path=path, name=name),) logger.error('cannot find any entrypoints for package') return None
dephell
positive
def __cut_internal(self, sentence, HMM=True): <DeepExtract> if self.tokenizer.user_word_tag_tab: self.word_tag_tab.update(self.tokenizer.user_word_tag_tab) self.tokenizer.user_word_tag_tab = {} </DeepExtract> sentence = strdecode(sentence) blocks = re_han_internal.split(sentence) if HMM: cut_blk = self.__cut_DAG else: cut_blk = self.__cut_DAG_NO_HMM for blk in blocks: if re_han_internal.match(blk): for word in cut_blk(blk): yield word else: tmp = re_skip_internal.split(blk) for x in tmp: if re_skip_internal.match(x): yield pair(x, 'x') else: for xx in x: if re_num.match(xx): yield pair(xx, 'm') elif re_eng.match(x): yield pair(xx, 'eng') else: yield pair(xx, 'x')
def __cut_internal(self, sentence, HMM=True): if self.tokenizer.user_word_tag_tab: self.word_tag_tab.update(self.tokenizer.user_word_tag_tab) self.tokenizer.user_word_tag_tab = {} sentence = strdecode(sentence) blocks = re_han_internal.split(sentence) if HMM: cut_blk = self.__cut_DAG else: cut_blk = self.__cut_DAG_NO_HMM for blk in blocks: if re_han_internal.match(blk): for word in cut_blk(blk): yield word else: tmp = re_skip_internal.split(blk) for x in tmp: if re_skip_internal.match(x): yield pair(x, 'x') else: for xx in x: if re_num.match(xx): yield pair(xx, 'm') elif re_eng.match(x): yield pair(xx, 'eng') else: yield pair(xx, 'x')
Chinese-clinical-NER
positive
def verify_request(self, oauth_request): """Verifies an api call and checks all the parameters.""" <DeepExtract> try: version = oauth_request.get_parameter('oauth_version') except: version = VERSION if version and version != self.version: raise OAuthError('OAuth version %s not supported.' % str(version)) version = version </DeepExtract> <DeepExtract> consumer_key = oauth_request.get_parameter('oauth_consumer_key') consumer = self.data_store.lookup_consumer(consumer_key) if not consumer: raise OAuthError('Invalid consumer.') consumer = consumer </DeepExtract> <DeepExtract> token_field = oauth_request.get_parameter('oauth_token') token = self.data_store.lookup_token('access', token_field) if not token: raise OAuthError('Invalid %s token: %s' % ('access', token_field)) token = token </DeepExtract> <DeepExtract> (timestamp, nonce) = oauth_request._get_timestamp_nonce() self._check_timestamp(timestamp) self._check_nonce(consumer, token, nonce) signature_method = self._get_signature_method(oauth_request) try: signature = oauth_request.get_parameter('oauth_signature') except: raise OAuthError('Missing signature.') valid_sig = signature_method.check_signature(oauth_request, consumer, token, signature) if not valid_sig: (key, base) = signature_method.build_signature_base_string(oauth_request, consumer, token) raise OAuthError('Invalid signature. Expected signature base string: %s' % base) built = signature_method.build_signature(oauth_request, consumer, token) </DeepExtract> parameters = oauth_request.get_nonoauth_parameters() return (consumer, token, parameters)
def verify_request(self, oauth_request): """Verifies an api call and checks all the parameters.""" try: version = oauth_request.get_parameter('oauth_version') except: version = VERSION if version and version != self.version: raise OAuthError('OAuth version %s not supported.' % str(version)) version = version consumer_key = oauth_request.get_parameter('oauth_consumer_key') consumer = self.data_store.lookup_consumer(consumer_key) if not consumer: raise OAuthError('Invalid consumer.') consumer = consumer token_field = oauth_request.get_parameter('oauth_token') token = self.data_store.lookup_token('access', token_field) if not token: raise OAuthError('Invalid %s token: %s' % ('access', token_field)) token = token (timestamp, nonce) = oauth_request._get_timestamp_nonce() self._check_timestamp(timestamp) self._check_nonce(consumer, token, nonce) signature_method = self._get_signature_method(oauth_request) try: signature = oauth_request.get_parameter('oauth_signature') except: raise OAuthError('Missing signature.') valid_sig = signature_method.check_signature(oauth_request, consumer, token, signature) if not valid_sig: (key, base) = signature_method.build_signature_base_string(oauth_request, consumer, token) raise OAuthError('Invalid signature. Expected signature base string: %s' % base) built = signature_method.build_signature(oauth_request, consumer, token) parameters = oauth_request.get_nonoauth_parameters() return (consumer, token, parameters)
cloud-services-notifications
positive
def _date_dict(self, date, padded=True): if isinstance(date, (int, float)): <DeepExtract> date = dt.date.fromordinal(int(date - 1721424.5)).strftime('%m-%d-%Y') </DeepExtract> (m, d, y) = [str(int(x)).rjust(2, '0') if padded else str(x) for x in date.split('-')] return {'month': m, 'day': d, 'year': y}
def _date_dict(self, date, padded=True): if isinstance(date, (int, float)): date = dt.date.fromordinal(int(date - 1721424.5)).strftime('%m-%d-%Y') (m, d, y) = [str(int(x)).rjust(2, '0') if padded else str(x) for x in date.split('-')] return {'month': m, 'day': d, 'year': y}
CorpBot.py
positive
def adapted_chu_liu_edmonds(length: int, score_matrix: numpy.ndarray, coreference: List[int], current_nodes: List[bool], final_edges: Dict[int, int], old_input: numpy.ndarray, old_output: numpy.ndarray, representatives: List[Set[int]]): """ Applies the chu-liu-edmonds algorithm recursively to a graph with edge weights defined by score_matrix. Note that this function operates in place, so variables will be modified. Parameters ---------- length : ``int``, required. The number of nodes. score_matrix : ``numpy.ndarray``, required. The score matrix representing the scores for pairs of nodes. coreference: ``List[int]``, required. A list which maps a node to its first precedent. current_nodes : ``List[bool]``, required. The nodes which are representatives in the graph. A representative at it's most basic represents a node, but as the algorithm progresses, individual nodes will represent collapsed cycles in the graph. final_edges: ``Dict[int, int]``, required. An empty dictionary which will be populated with the nodes which are connected in the maximum spanning tree. old_input: ``numpy.ndarray``, required. a map from an edge to its head node. Key: The edge is a tuple, and elements in a tuple could be a node or a representative of a cycle. old_output: ``numpy.ndarray``, required. representatives : ``List[Set[int]]``, required. A list containing the nodes that a particular node is representing at this iteration in the graph. Returns ------- Nothing - all variables are modified in place. """ parents = [-1] for node1 in range(1, length): parents.append(0) if current_nodes[node1]: max_score = score_matrix[0, node1] for node2 in range(1, length): if node2 == node1 or not current_nodes[node2]: continue _parent = old_input[node1, node2] _child = old_output[node1, node2] if coreference[_parent] == coreference[_child]: continue new_score = score_matrix[node2, node1] if new_score > max_score: max_score = new_score parents[node1] = node2 <DeepExtract> added = [False for _ in range(length)] added[0] = True cycle = set() has_cycle = False for i in range(1, length): if has_cycle: break if added[i] or not current_nodes[i]: continue this_cycle = set() this_cycle.add(i) added[i] = True has_cycle = True next_node = i while parents[next_node] not in this_cycle: next_node = parents[next_node] if added[next_node]: has_cycle = False break added[next_node] = True this_cycle.add(next_node) if has_cycle: original = next_node cycle.add(original) next_node = parents[original] while next_node != original: cycle.add(next_node) next_node = parents[next_node] break (has_cycle, cycle) = (has_cycle, list(cycle)) </DeepExtract> if not has_cycle: final_edges[0] = -1 for node in range(1, length): if not current_nodes[node]: continue parent = old_input[parents[node], node] child = old_output[parents[node], node] final_edges[child] = parent return cycle_weight = 0.0 index = 0 for node in cycle: index += 1 cycle_weight += score_matrix[parents[node], node] cycle_representative = cycle[0] for node in range(length): if not current_nodes[node] or node in cycle: continue in_edge_weight = float('-inf') in_edge = -1 out_edge_weight = float('-inf') out_edge = -1 for node_in_cycle in cycle: _parent = old_input[node_in_cycle, node] _child = old_output[node_in_cycle, node] if coreference[_parent] != coreference[_child]: if score_matrix[node_in_cycle, node] > in_edge_weight: in_edge_weight = score_matrix[node_in_cycle, node] in_edge = node_in_cycle _parent = old_input[node, node_in_cycle] _child = old_output[node, node_in_cycle] if coreference[_parent] != coreference[_child]: score = cycle_weight + score_matrix[node, node_in_cycle] - score_matrix[parents[node_in_cycle], node_in_cycle] if score > out_edge_weight: out_edge_weight = score out_edge = node_in_cycle score_matrix[cycle_representative, node] = in_edge_weight old_input[cycle_representative, node] = old_input[in_edge, node] old_output[cycle_representative, node] = old_output[in_edge, node] score_matrix[node, cycle_representative] = out_edge_weight old_output[node, cycle_representative] = old_output[node, out_edge] old_input[node, cycle_representative] = old_input[node, out_edge] considered_representatives: List[Set[int]] = [] for (i, node_in_cycle) in enumerate(cycle): considered_representatives.append(set()) if i > 0: current_nodes[node_in_cycle] = False for node in representatives[node_in_cycle]: considered_representatives[i].add(node) if i > 0: representatives[cycle_representative].add(node) <DeepExtract> parents = [-1] for node1 in range(1, length): parents.append(0) if current_nodes[node1]: max_score = score_matrix[0, node1] for node2 in range(1, length): if node2 == node1 or not current_nodes[node2]: continue _parent = old_input[node1, node2] _child = old_output[node1, node2] if coreference[_parent] == coreference[_child]: continue new_score = score_matrix[node2, node1] if new_score > max_score: max_score = new_score parents[node1] = node2 (has_cycle, cycle) = _find_cycle(parents, length, current_nodes) if not has_cycle: final_edges[0] = -1 for node in range(1, length): if not current_nodes[node]: continue parent = old_input[parents[node], node] child = old_output[parents[node], node] final_edges[child] = parent return cycle_weight = 0.0 index = 0 for node in cycle: index += 1 cycle_weight += score_matrix[parents[node], node] cycle_representative = cycle[0] for node in range(length): if not current_nodes[node] or node in cycle: continue in_edge_weight = float('-inf') in_edge = -1 out_edge_weight = float('-inf') out_edge = -1 for node_in_cycle in cycle: _parent = old_input[node_in_cycle, node] _child = old_output[node_in_cycle, node] if coreference[_parent] != coreference[_child]: if score_matrix[node_in_cycle, node] > in_edge_weight: in_edge_weight = score_matrix[node_in_cycle, node] in_edge = node_in_cycle _parent = old_input[node, node_in_cycle] _child = old_output[node, node_in_cycle] if coreference[_parent] != coreference[_child]: score = cycle_weight + score_matrix[node, node_in_cycle] - score_matrix[parents[node_in_cycle], node_in_cycle] if score > out_edge_weight: out_edge_weight = score out_edge = node_in_cycle score_matrix[cycle_representative, node] = in_edge_weight old_input[cycle_representative, node] = old_input[in_edge, node] old_output[cycle_representative, node] = old_output[in_edge, node] score_matrix[node, cycle_representative] = out_edge_weight old_output[node, cycle_representative] = old_output[node, out_edge] old_input[node, cycle_representative] = old_input[node, out_edge] considered_representatives: List[Set[int]] = [] for (i, node_in_cycle) in enumerate(cycle): considered_representatives.append(set()) if i > 0: current_nodes[node_in_cycle] = False for node in representatives[node_in_cycle]: considered_representatives[i].add(node) if i > 0: representatives[cycle_representative].add(node) adapted_chu_liu_edmonds(length, score_matrix, coreference, current_nodes, final_edges, old_input, old_output, representatives) found = False key_node = -1 for (i, node) in enumerate(cycle): for cycle_rep in considered_representatives[i]: if cycle_rep in final_edges: key_node = node found = True break if found: break previous = parents[key_node] while previous != key_node: child = old_output[parents[previous], previous] parent = old_input[parents[previous], previous] final_edges[child] = parent previous = parents[previous] </DeepExtract> found = False key_node = -1 for (i, node) in enumerate(cycle): for cycle_rep in considered_representatives[i]: if cycle_rep in final_edges: key_node = node found = True break if found: break previous = parents[key_node] while previous != key_node: child = old_output[parents[previous], previous] parent = old_input[parents[previous], previous] final_edges[child] = parent previous = parents[previous]
def adapted_chu_liu_edmonds(length: int, score_matrix: numpy.ndarray, coreference: List[int], current_nodes: List[bool], final_edges: Dict[int, int], old_input: numpy.ndarray, old_output: numpy.ndarray, representatives: List[Set[int]]): """ Applies the chu-liu-edmonds algorithm recursively to a graph with edge weights defined by score_matrix. Note that this function operates in place, so variables will be modified. Parameters ---------- length : ``int``, required. The number of nodes. score_matrix : ``numpy.ndarray``, required. The score matrix representing the scores for pairs of nodes. coreference: ``List[int]``, required. A list which maps a node to its first precedent. current_nodes : ``List[bool]``, required. The nodes which are representatives in the graph. A representative at it's most basic represents a node, but as the algorithm progresses, individual nodes will represent collapsed cycles in the graph. final_edges: ``Dict[int, int]``, required. An empty dictionary which will be populated with the nodes which are connected in the maximum spanning tree. old_input: ``numpy.ndarray``, required. a map from an edge to its head node. Key: The edge is a tuple, and elements in a tuple could be a node or a representative of a cycle. old_output: ``numpy.ndarray``, required. representatives : ``List[Set[int]]``, required. A list containing the nodes that a particular node is representing at this iteration in the graph. Returns ------- Nothing - all variables are modified in place. """ parents = [-1] for node1 in range(1, length): parents.append(0) if current_nodes[node1]: max_score = score_matrix[0, node1] for node2 in range(1, length): if node2 == node1 or not current_nodes[node2]: continue _parent = old_input[node1, node2] _child = old_output[node1, node2] if coreference[_parent] == coreference[_child]: continue new_score = score_matrix[node2, node1] if new_score > max_score: max_score = new_score parents[node1] = node2 added = [False for _ in range(length)] added[0] = True cycle = set() has_cycle = False for i in range(1, length): if has_cycle: break if added[i] or not current_nodes[i]: continue this_cycle = set() this_cycle.add(i) added[i] = True has_cycle = True next_node = i while parents[next_node] not in this_cycle: next_node = parents[next_node] if added[next_node]: has_cycle = False break added[next_node] = True this_cycle.add(next_node) if has_cycle: original = next_node cycle.add(original) next_node = parents[original] while next_node != original: cycle.add(next_node) next_node = parents[next_node] break (has_cycle, cycle) = (has_cycle, list(cycle)) if not has_cycle: final_edges[0] = -1 for node in range(1, length): if not current_nodes[node]: continue parent = old_input[parents[node], node] child = old_output[parents[node], node] final_edges[child] = parent return cycle_weight = 0.0 index = 0 for node in cycle: index += 1 cycle_weight += score_matrix[parents[node], node] cycle_representative = cycle[0] for node in range(length): if not current_nodes[node] or node in cycle: continue in_edge_weight = float('-inf') in_edge = -1 out_edge_weight = float('-inf') out_edge = -1 for node_in_cycle in cycle: _parent = old_input[node_in_cycle, node] _child = old_output[node_in_cycle, node] if coreference[_parent] != coreference[_child]: if score_matrix[node_in_cycle, node] > in_edge_weight: in_edge_weight = score_matrix[node_in_cycle, node] in_edge = node_in_cycle _parent = old_input[node, node_in_cycle] _child = old_output[node, node_in_cycle] if coreference[_parent] != coreference[_child]: score = cycle_weight + score_matrix[node, node_in_cycle] - score_matrix[parents[node_in_cycle], node_in_cycle] if score > out_edge_weight: out_edge_weight = score out_edge = node_in_cycle score_matrix[cycle_representative, node] = in_edge_weight old_input[cycle_representative, node] = old_input[in_edge, node] old_output[cycle_representative, node] = old_output[in_edge, node] score_matrix[node, cycle_representative] = out_edge_weight old_output[node, cycle_representative] = old_output[node, out_edge] old_input[node, cycle_representative] = old_input[node, out_edge] considered_representatives: List[Set[int]] = [] for (i, node_in_cycle) in enumerate(cycle): considered_representatives.append(set()) if i > 0: current_nodes[node_in_cycle] = False for node in representatives[node_in_cycle]: considered_representatives[i].add(node) if i > 0: representatives[cycle_representative].add(node) parents = [-1] for node1 in range(1, length): parents.append(0) if current_nodes[node1]: max_score = score_matrix[0, node1] for node2 in range(1, length): if node2 == node1 or not current_nodes[node2]: continue _parent = old_input[node1, node2] _child = old_output[node1, node2] if coreference[_parent] == coreference[_child]: continue new_score = score_matrix[node2, node1] if new_score > max_score: max_score = new_score parents[node1] = node2 (has_cycle, cycle) = _find_cycle(parents, length, current_nodes) if not has_cycle: final_edges[0] = -1 for node in range(1, length): if not current_nodes[node]: continue parent = old_input[parents[node], node] child = old_output[parents[node], node] final_edges[child] = parent return cycle_weight = 0.0 index = 0 for node in cycle: index += 1 cycle_weight += score_matrix[parents[node], node] cycle_representative = cycle[0] for node in range(length): if not current_nodes[node] or node in cycle: continue in_edge_weight = float('-inf') in_edge = -1 out_edge_weight = float('-inf') out_edge = -1 for node_in_cycle in cycle: _parent = old_input[node_in_cycle, node] _child = old_output[node_in_cycle, node] if coreference[_parent] != coreference[_child]: if score_matrix[node_in_cycle, node] > in_edge_weight: in_edge_weight = score_matrix[node_in_cycle, node] in_edge = node_in_cycle _parent = old_input[node, node_in_cycle] _child = old_output[node, node_in_cycle] if coreference[_parent] != coreference[_child]: score = cycle_weight + score_matrix[node, node_in_cycle] - score_matrix[parents[node_in_cycle], node_in_cycle] if score > out_edge_weight: out_edge_weight = score out_edge = node_in_cycle score_matrix[cycle_representative, node] = in_edge_weight old_input[cycle_representative, node] = old_input[in_edge, node] old_output[cycle_representative, node] = old_output[in_edge, node] score_matrix[node, cycle_representative] = out_edge_weight old_output[node, cycle_representative] = old_output[node, out_edge] old_input[node, cycle_representative] = old_input[node, out_edge] considered_representatives: List[Set[int]] = [] for (i, node_in_cycle) in enumerate(cycle): considered_representatives.append(set()) if i > 0: current_nodes[node_in_cycle] = False for node in representatives[node_in_cycle]: considered_representatives[i].add(node) if i > 0: representatives[cycle_representative].add(node) adapted_chu_liu_edmonds(length, score_matrix, coreference, current_nodes, final_edges, old_input, old_output, representatives) found = False key_node = -1 for (i, node) in enumerate(cycle): for cycle_rep in considered_representatives[i]: if cycle_rep in final_edges: key_node = node found = True break if found: break previous = parents[key_node] while previous != key_node: child = old_output[parents[previous], previous] parent = old_input[parents[previous], previous] final_edges[child] = parent previous = parents[previous] found = False key_node = -1 for (i, node) in enumerate(cycle): for cycle_rep in considered_representatives[i]: if cycle_rep in final_edges: key_node = node found = True break if found: break previous = parents[key_node] while previous != key_node: child = old_output[parents[previous], previous] parent = old_input[parents[previous], previous] final_edges[child] = parent previous = parents[previous]
ACE
positive
def filter_resolver_args(self, args: Dict[str, Any], info: GraphQLResolveInfo) -> Dict[str, Any]: <DeepExtract> if isinstance(args, dict): args_to_trace = {k: copy_args_for_tracing(v) for (k, v) in args.items()} if isinstance(args, list): args_to_trace = [copy_args_for_tracing(v) for v in args] if isinstance(args, (UploadFile, File)): args_to_trace = repr_upload_file(args) args_to_trace = args </DeepExtract> if not self._arg_filter: return args_to_trace return self._arg_filter(args_to_trace, info)
def filter_resolver_args(self, args: Dict[str, Any], info: GraphQLResolveInfo) -> Dict[str, Any]: if isinstance(args, dict): args_to_trace = {k: copy_args_for_tracing(v) for (k, v) in args.items()} if isinstance(args, list): args_to_trace = [copy_args_for_tracing(v) for v in args] if isinstance(args, (UploadFile, File)): args_to_trace = repr_upload_file(args) args_to_trace = args if not self._arg_filter: return args_to_trace return self._arg_filter(args_to_trace, info)
ariadne
positive
def testInitializerGlorotNormal(self): hparams = common_test_utils.create_test_hparams(encoder_type='uni', num_layers=1, attention='', attention_architecture='', use_residual=False, init_op='glorot_normal') with self.test_session() as sess: <DeepExtract> train_mode = tf.contrib.learn.ModeKeys.TRAIN (train_iterator, src_vocab_table, tgt_vocab_table) = common_test_utils.create_test_iterator(hparams, train_mode) train_m = model.Model(hparams, train_mode, train_iterator, src_vocab_table, tgt_vocab_table, scope='dynamic_seq2seq') sess.run(tf.global_variables_initializer()) sess.run(tf.tables_initializer()) sess.run(train_iterator.initializer) train_m = train_m </DeepExtract> <DeepExtract> for _ in range(num_steps): (_, output_tuple) = train_m.train(sess) loss = output_tuple.train_loss print('{} {}-th step loss is: '.format('InitializerGlorotNormal', num_steps), loss) expected_loss = self.expected_train_values['InitializerGlorotNormal' + '/loss'] self.actual_train_values['InitializerGlorotNormal' + '/loss'] = loss self.assertAllClose(expected_loss, loss) </DeepExtract>
def testInitializerGlorotNormal(self): hparams = common_test_utils.create_test_hparams(encoder_type='uni', num_layers=1, attention='', attention_architecture='', use_residual=False, init_op='glorot_normal') with self.test_session() as sess: train_mode = tf.contrib.learn.ModeKeys.TRAIN (train_iterator, src_vocab_table, tgt_vocab_table) = common_test_utils.create_test_iterator(hparams, train_mode) train_m = model.Model(hparams, train_mode, train_iterator, src_vocab_table, tgt_vocab_table, scope='dynamic_seq2seq') sess.run(tf.global_variables_initializer()) sess.run(tf.tables_initializer()) sess.run(train_iterator.initializer) train_m = train_m for _ in range(num_steps): (_, output_tuple) = train_m.train(sess) loss = output_tuple.train_loss print('{} {}-th step loss is: '.format('InitializerGlorotNormal', num_steps), loss) expected_loss = self.expected_train_values['InitializerGlorotNormal' + '/loss'] self.actual_train_values['InitializerGlorotNormal' + '/loss'] = loss self.assertAllClose(expected_loss, loss) </DeepExtract>
DAPPLE
positive
def __next__(self): self.count += 1 if self.count == self.nF: raise StopIteration img_path = self.files[self.count] img0 = cv2.imread(img_path) assert img0 is not None, 'Failed to load ' + img_path <DeepExtract> shape = img0.shape[:2] ratio = min(float(self.height) / shape[0], float(self.width) / shape[1]) new_shape = (round(shape[1] * ratio), round(shape[0] * ratio)) dw = (self.width - new_shape[0]) / 2 dh = (self.height - new_shape[1]) / 2 (top, bottom) = (round(dh - 0.1), round(dh + 0.1)) (left, right) = (round(dw - 0.1), round(dw + 0.1)) img0 = cv2.resize(img0, new_shape, interpolation=cv2.INTER_AREA) img0 = cv2.copyMakeBorder(img0, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) (img0, _, _, _) = (img0, ratio, dw, dh) </DeepExtract> img = img[:, :, ::-1].transpose(2, 0, 1) img = np.ascontiguousarray(img, dtype=np.float32) img /= 255.0 return (img_path, img, img0)
def __next__(self): self.count += 1 if self.count == self.nF: raise StopIteration img_path = self.files[self.count] img0 = cv2.imread(img_path) assert img0 is not None, 'Failed to load ' + img_path shape = img0.shape[:2] ratio = min(float(self.height) / shape[0], float(self.width) / shape[1]) new_shape = (round(shape[1] * ratio), round(shape[0] * ratio)) dw = (self.width - new_shape[0]) / 2 dh = (self.height - new_shape[1]) / 2 (top, bottom) = (round(dh - 0.1), round(dh + 0.1)) (left, right) = (round(dw - 0.1), round(dw + 0.1)) img0 = cv2.resize(img0, new_shape, interpolation=cv2.INTER_AREA) img0 = cv2.copyMakeBorder(img0, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) (img0, _, _, _) = (img0, ratio, dw, dh) img = img[:, :, ::-1].transpose(2, 0, 1) img = np.ascontiguousarray(img, dtype=np.float32) img /= 255.0 return (img_path, img, img0)
AlphAction
positive
def get_nested_property(top_object, property_path: str, separator: str=None, to_update: list=None): if separator is None: separator = '.' elif separator and (not isinstance(separator, str)): raise TypeError('Separator must be a string') top_property_split = property_path.split(separator, 1) if to_update is None: to_update: list = [] if len(top_property_split) == 1 and hasattr(top_object, top_property_split[0]) and (not isinstance(top_object, list)): to_update.append((top_object, top_property_split[0], getattr(top_object, top_property_split[0]))) if len(top_property_split) == 1 and isinstance(top_object, list): for item in top_object: <DeepExtract> if separator is None: separator = '.' elif separator and (not isinstance(separator, str)): raise TypeError('Separator must be a string') top_property_split = top_property_split[0].split(separator, 1) if to_update is None: to_update: list = [] if len(top_property_split) == 1 and hasattr(item, top_property_split[0]) and (not isinstance(item, list)): to_update.append((item, top_property_split[0], getattr(item, top_property_split[0]))) if len(top_property_split) == 1 and isinstance(item, list): for item in item: get_nested_property(item, top_property_split[0], separator=separator, to_update=to_update) if len(top_property_split) > 1 and hasattr(item, top_property_split[0]): return get_nested_property(getattr(item, top_property_split[0]), top_property_split[-1], separator=separator, to_update=to_update) return to_update </DeepExtract> if len(top_property_split) > 1 and hasattr(top_object, top_property_split[0]): return get_nested_property(getattr(top_object, top_property_split[0]), top_property_split[-1], separator=separator, to_update=to_update) return to_update
def get_nested_property(top_object, property_path: str, separator: str=None, to_update: list=None): if separator is None: separator = '.' elif separator and (not isinstance(separator, str)): raise TypeError('Separator must be a string') top_property_split = property_path.split(separator, 1) if to_update is None: to_update: list = [] if len(top_property_split) == 1 and hasattr(top_object, top_property_split[0]) and (not isinstance(top_object, list)): to_update.append((top_object, top_property_split[0], getattr(top_object, top_property_split[0]))) if len(top_property_split) == 1 and isinstance(top_object, list): for item in top_object: if separator is None: separator = '.' elif separator and (not isinstance(separator, str)): raise TypeError('Separator must be a string') top_property_split = top_property_split[0].split(separator, 1) if to_update is None: to_update: list = [] if len(top_property_split) == 1 and hasattr(item, top_property_split[0]) and (not isinstance(item, list)): to_update.append((item, top_property_split[0], getattr(item, top_property_split[0]))) if len(top_property_split) == 1 and isinstance(item, list): for item in item: get_nested_property(item, top_property_split[0], separator=separator, to_update=to_update) if len(top_property_split) > 1 and hasattr(item, top_property_split[0]): return get_nested_property(getattr(item, top_property_split[0]), top_property_split[-1], separator=separator, to_update=to_update) return to_update if len(top_property_split) > 1 and hasattr(top_object, top_property_split[0]): return get_nested_property(getattr(top_object, top_property_split[0]), top_property_split[-1], separator=separator, to_update=to_update) return to_update
ecs_composex
positive
@controller_data(command_call_limit={'NOOP': 7, 'EXPN': 5, '*': 25}) def test_different_limits_custom_default(self, plain_controller, client): assert plain_controller.smtpd._call_limit_default > CALL_LIMIT_DEFAULT srv_ip_port = (plain_controller.hostname, plain_controller.port) <DeepExtract> (code, _) = client.ehlo('example.com') assert code == 250 func = getattr(client, 'noop') expected = ok_expected or S.S250_OK for _ in range(0, 7): assert func(*args) == expected assert func(*args) == S.S421_TOO_MANY('noop'.upper().encode()) with pytest.raises(SMTPServerDisconnected): client.noop() </DeepExtract> client.connect(*srv_ip_port) <DeepExtract> (code, _) = client.ehlo('example.com') assert code == 250 func = getattr(client, 'expn') expected = S.S502_EXPN_NOTIMPL or S.S250_OK for _ in range(0, 5): assert func(*args) == expected assert func(*args) == S.S421_TOO_MANY('expn'.upper().encode()) with pytest.raises(SMTPServerDisconnected): client.noop() </DeepExtract> client.connect(*srv_ip_port) <DeepExtract> (code, _) = client.ehlo('example.com') assert code == 250 func = getattr(client, 'vrfy') expected = S.S252_CANNOT_VRFY or S.S250_OK for _ in range(0, 25): assert func(*args) == expected assert func(*args) == S.S421_TOO_MANY('vrfy'.upper().encode()) with pytest.raises(SMTPServerDisconnected): client.noop() </DeepExtract>
@controller_data(command_call_limit={'NOOP': 7, 'EXPN': 5, '*': 25}) def test_different_limits_custom_default(self, plain_controller, client): assert plain_controller.smtpd._call_limit_default > CALL_LIMIT_DEFAULT srv_ip_port = (plain_controller.hostname, plain_controller.port) (code, _) = client.ehlo('example.com') assert code == 250 func = getattr(client, 'noop') expected = ok_expected or S.S250_OK for _ in range(0, 7): assert func(*args) == expected assert func(*args) == S.S421_TOO_MANY('noop'.upper().encode()) with pytest.raises(SMTPServerDisconnected): client.noop() client.connect(*srv_ip_port) (code, _) = client.ehlo('example.com') assert code == 250 func = getattr(client, 'expn') expected = S.S502_EXPN_NOTIMPL or S.S250_OK for _ in range(0, 5): assert func(*args) == expected assert func(*args) == S.S421_TOO_MANY('expn'.upper().encode()) with pytest.raises(SMTPServerDisconnected): client.noop() client.connect(*srv_ip_port) (code, _) = client.ehlo('example.com') assert code == 250 func = getattr(client, 'vrfy') expected = S.S252_CANNOT_VRFY or S.S250_OK for _ in range(0, 25): assert func(*args) == expected assert func(*args) == S.S421_TOO_MANY('vrfy'.upper().encode()) with pytest.raises(SMTPServerDisconnected): client.noop() </DeepExtract>
aiosmtpd
positive
def visit_Tuple(self, node): <DeepExtract> if len('(') == 0: return if len(self.blame_stack) == 0: if self.last is not None: self.last = None self.line_info.append((len(self.line), self.last)) elif self.last != self.blame_stack[-1]: self.last = self.blame_stack[-1] self.line_info.append((len(self.line), self.last)) self.line += '(' </DeepExtract> for elt in node.elts: <DeepExtract> if elt is None: return None if isinstance(elt, tuple): return tuple([self.visit(n) for n in elt]) try: self.blame_stack.append((elt.lineno, elt.col_offset)) info = True except AttributeError: info = False visitor = getattr(self, 'visit_%s' % elt.__class__.__name__, None) if visitor is None: raise Exception('No handler for ``{}`` ({}).'.format(elt.__class__.__name__, repr(elt))) ret = visitor(elt) if info: self.blame_stack.pop() return ret </DeepExtract> <DeepExtract> if len(', ') == 0: return if len(self.blame_stack) == 0: if self.last is not None: self.last = None self.line_info.append((len(self.line), self.last)) elif self.last != self.blame_stack[-1]: self.last = self.blame_stack[-1] self.line_info.append((len(self.line), self.last)) self.line += ', ' </DeepExtract> <DeepExtract> if len(')') == 0: return if len(self.blame_stack) == 0: if self.last is not None: self.last = None self.line_info.append((len(self.line), self.last)) elif self.last != self.blame_stack[-1]: self.last = self.blame_stack[-1] self.line_info.append((len(self.line), self.last)) self.line += ')' </DeepExtract>
def visit_Tuple(self, node): if len('(') == 0: return if len(self.blame_stack) == 0: if self.last is not None: self.last = None self.line_info.append((len(self.line), self.last)) elif self.last != self.blame_stack[-1]: self.last = self.blame_stack[-1] self.line_info.append((len(self.line), self.last)) self.line += '(' for elt in node.elts: if elt is None: return None if isinstance(elt, tuple): return tuple([self.visit(n) for n in elt]) try: self.blame_stack.append((elt.lineno, elt.col_offset)) info = True except AttributeError: info = False visitor = getattr(self, 'visit_%s' % elt.__class__.__name__, None) if visitor is None: raise Exception('No handler for ``{}`` ({}).'.format(elt.__class__.__name__, repr(elt))) ret = visitor(elt) if info: self.blame_stack.pop() return ret if len(', ') == 0: return if len(self.blame_stack) == 0: if self.last is not None: self.last = None self.line_info.append((len(self.line), self.last)) elif self.last != self.blame_stack[-1]: self.last = self.blame_stack[-1] self.line_info.append((len(self.line), self.last)) self.line += ', ' if len(')') == 0: return if len(self.blame_stack) == 0: if self.last is not None: self.last = None self.line_info.append((len(self.line), self.last)) elif self.last != self.blame_stack[-1]: self.last = self.blame_stack[-1] self.line_info.append((len(self.line), self.last)) self.line += ')' </DeepExtract>
chameleon
positive
@classmethod def poll(cls, context): <DeepExtract> r = [] for o in context.scene.objects: if o.type == 'CAMERA': r.append(o) r.sort(key=lambda c: c.name) cams = r </DeepExtract> if len(cams) > 1: return True return False
@classmethod def poll(cls, context): r = [] for o in context.scene.objects: if o.type == 'CAMERA': r.append(o) r.sort(key=lambda c: c.name) cams = r if len(cams) > 1: return True return False
bpy
positive
def init_video_file_capture(video_path: str, output_path: str): """ Creates a video capture object from a video file. Args: video_path: User-specified video file path. output_path: Optional path to save the processed video. Returns: Video capture object to capture frames, video writer object to write processed frames to file, plus total frame count of video source to iterate through. """ if not os.path.exists(video_path): raise FileNotFoundError(f'Video file not found for: {video_path}') video = cv2.VideoCapture(video_path) if not video.isOpened: raise RuntimeError(f'Failed to open video capture from file: {video_path}') <DeepExtract> (_, ext) = os.path.splitext(video_path) if output_path is not None: assert os.path.isdir(output_path) (i, filename) = (0, os.path.join(output_path if output_path is not None else str(), f'object_detection_demo{ext}')) while os.path.exists(filename): i += 1 filename = os.path.join(output_path if output_path is not None else str(), f'object_detection_demo({i}){ext}') video_writer = cv2.VideoWriter(filename=filename, fourcc=get_source_encoding_int(video), fps=int(video.get(cv2.CAP_PROP_FPS)), frameSize=(int(video.get(cv2.CAP_PROP_FRAME_WIDTH)), int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)))) video_writer = video_writer </DeepExtract> iter_frame_count = range(int(video.get(cv2.CAP_PROP_FRAME_COUNT))) return (video, video_writer, iter_frame_count)
def init_video_file_capture(video_path: str, output_path: str): """ Creates a video capture object from a video file. Args: video_path: User-specified video file path. output_path: Optional path to save the processed video. Returns: Video capture object to capture frames, video writer object to write processed frames to file, plus total frame count of video source to iterate through. """ if not os.path.exists(video_path): raise FileNotFoundError(f'Video file not found for: {video_path}') video = cv2.VideoCapture(video_path) if not video.isOpened: raise RuntimeError(f'Failed to open video capture from file: {video_path}') (_, ext) = os.path.splitext(video_path) if output_path is not None: assert os.path.isdir(output_path) (i, filename) = (0, os.path.join(output_path if output_path is not None else str(), f'object_detection_demo{ext}')) while os.path.exists(filename): i += 1 filename = os.path.join(output_path if output_path is not None else str(), f'object_detection_demo({i}){ext}') video_writer = cv2.VideoWriter(filename=filename, fourcc=get_source_encoding_int(video), fps=int(video.get(cv2.CAP_PROP_FPS)), frameSize=(int(video.get(cv2.CAP_PROP_FRAME_WIDTH)), int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)))) video_writer = video_writer iter_frame_count = range(int(video.get(cv2.CAP_PROP_FRAME_COUNT))) return (video, video_writer, iter_frame_count)
aXeleRate
positive
def test(): if 0: from pandas import DataFrame X = np.linspace(0.01, 1.0, 10) Y = np.log(X) Y -= Y.min() Y /= Y.max() Y *= 0.95 df = DataFrame({'X': X, 'Y': Y}) P = Pareto(df, 'X', 'Y') data = [] for val in np.linspace(0, 1, 15): data.append(dict(val=val, x=P.lookup_x(val), y=P.lookup_y(val))) pl.axvline(val, alpha=0.5) pl.axhline(val, alpha=0.5) dd = DataFrame(data) pl.scatter(dd.y, dd.val, lw=0, c='r') pl.scatter(dd.val, dd.x, lw=0, c='g') print(dd) P.show_frontier(c='r', lw=4) pl.show() (X, Y) = np.random.normal(0, 1, size=(2, 30)) for maxX in [0, 1]: for maxY in [0, 1]: pl.figure() pl.title('max x: %s, max y: %s' % (maxX, maxY)) pl.scatter(X, Y, lw=0) <DeepExtract> if ax is None: ax = pl.gca() sty = {'c': 'b', 'alpha': 0.3, 'zorder': 0} sty.update(style) if interpolation == 'linear-convex': from scipy.spatial import ConvexHull X = np.array(X) Y = np.array(Y) hull = ConvexHull(np.array([X, Y]).T) X = X[hull.vertices] Y = Y[hull.vertices] f = pareto_frontier(X, Y, maxX=maxX, maxY=maxY) if not f: print(colors.yellow % '[warn] Empty frontier') return if dots: (xs, ys) = list(zip(*f)) ax.scatter(xs, ys, lw=0, alpha=0.5, c=sty['c']) XMIN = min(min(X), XMIN) if XMIN is not None else min(X) XMAX = max(max(X), XMAX) if XMAX is not None else max(X) YMIN = min(min(Y), YMIN) if YMIN is not None else min(Y) YMAX = max(max(Y), YMAX) if YMAX is not None else max(Y) if maxX and maxY: f = [(XMAX, YMIN)] + f + [(XMIN, YMAX)] elif not maxX and maxY: f = [(XMIN, YMIN)] + f + [(XMAX, YMAX)] elif maxX and (not maxY): f = [(XMAX, YMAX)] + f + [(XMIN, YMIN)] else: f = [(XMIN, YMAX)] + f + [(XMAX, YMIN)] if interpolation == 'pessimistic': pts = np.array([x for ((a, b), (c, d)) in window(f, 2) for x in [[a, b], [c, b], [c, b], [c, d]]]) elif interpolation in {'linear', 'linear-convex'}: pts = np.array([x for ((a, b), (c, d)) in window(f, 2) for x in [[a, b], [c, d]]]) ax.plot(pts[:, 0], pts[:, 1], label=label, **sty) return pts </DeepExtract> pl.show()
def test(): if 0: from pandas import DataFrame X = np.linspace(0.01, 1.0, 10) Y = np.log(X) Y -= Y.min() Y /= Y.max() Y *= 0.95 df = DataFrame({'X': X, 'Y': Y}) P = Pareto(df, 'X', 'Y') data = [] for val in np.linspace(0, 1, 15): data.append(dict(val=val, x=P.lookup_x(val), y=P.lookup_y(val))) pl.axvline(val, alpha=0.5) pl.axhline(val, alpha=0.5) dd = DataFrame(data) pl.scatter(dd.y, dd.val, lw=0, c='r') pl.scatter(dd.val, dd.x, lw=0, c='g') print(dd) P.show_frontier(c='r', lw=4) pl.show() (X, Y) = np.random.normal(0, 1, size=(2, 30)) for maxX in [0, 1]: for maxY in [0, 1]: pl.figure() pl.title('max x: %s, max y: %s' % (maxX, maxY)) pl.scatter(X, Y, lw=0) if ax is None: ax = pl.gca() sty = {'c': 'b', 'alpha': 0.3, 'zorder': 0} sty.update(style) if interpolation == 'linear-convex': from scipy.spatial import ConvexHull X = np.array(X) Y = np.array(Y) hull = ConvexHull(np.array([X, Y]).T) X = X[hull.vertices] Y = Y[hull.vertices] f = pareto_frontier(X, Y, maxX=maxX, maxY=maxY) if not f: print(colors.yellow % '[warn] Empty frontier') return if dots: (xs, ys) = list(zip(*f)) ax.scatter(xs, ys, lw=0, alpha=0.5, c=sty['c']) XMIN = min(min(X), XMIN) if XMIN is not None else min(X) XMAX = max(max(X), XMAX) if XMAX is not None else max(X) YMIN = min(min(Y), YMIN) if YMIN is not None else min(Y) YMAX = max(max(Y), YMAX) if YMAX is not None else max(Y) if maxX and maxY: f = [(XMAX, YMIN)] + f + [(XMIN, YMAX)] elif not maxX and maxY: f = [(XMIN, YMIN)] + f + [(XMAX, YMAX)] elif maxX and (not maxY): f = [(XMAX, YMAX)] + f + [(XMIN, YMIN)] else: f = [(XMIN, YMAX)] + f + [(XMAX, YMIN)] if interpolation == 'pessimistic': pts = np.array([x for ((a, b), (c, d)) in window(f, 2) for x in [[a, b], [c, b], [c, b], [c, d]]]) elif interpolation in {'linear', 'linear-convex'}: pts = np.array([x for ((a, b), (c, d)) in window(f, 2) for x in [[a, b], [c, d]]]) ax.plot(pts[:, 0], pts[:, 1], label=label, **sty) return pts pl.show()
arsenal
positive
def setup(self, host: Optional[str]=None, port: Optional[int]=None, elasticsearch_targets: Optional[List[str]]=None) -> None: """Setup Kibana Args: host: The IP or hostname to listen on port: The port to listen on elasticsearch_targets: A list of Elasticsearch urls Returns: None """ sysctl = systemctl.SystemCtl() self.logger.debug(f'Creating directory: {self.configuration_directory}') utilities.makedirs(self.configuration_directory) self.logger.debug(f'Creating directory: {self.install_directory}') utilities.makedirs(self.install_directory) self.logger.debug(f'Creating directory: {self.log_directory}') utilities.makedirs(self.log_directory) <DeepExtract> kibana_tarball_extracted = f'{const.INSTALL_CACHE}/{self.local_mirror_root}' config_paths = ['config/kibana.yml', 'config/node.options'] install_paths = ['bin/', 'data/', 'node/', 'node_modules/', 'plugins/', 'src/', 'package.json'] for conf in config_paths: self.copy_file_or_directory_to_destination(f'{kibana_tarball_extracted}/{conf}', self.configuration_directory) for inst in install_paths: self.copy_file_or_directory_to_destination(f'{kibana_tarball_extracted}/{inst}', self.install_directory) </DeepExtract> <DeepExtract> self.create_update_env_variable('KIBANA_PATH_CONF', self.configuration_directory) self.create_update_env_variable('KIBANA_HOME', self.install_directory) self.create_update_env_variable('KIBANA_LOGS', self.log_directory) </DeepExtract> self.copy_file_or_directory_to_destination(f'{const.DEFAULT_CONFIGS}/kibana/kibana.yml', self.configuration_directory) kb_main_config = config.ConfigManager(self.configuration_directory) if not host: host = utilities.get_primary_ip_address() if not port: port = 5601 if not elasticsearch_targets: elasticsearch_targets = [f'https://{utilities.get_primary_ip_address()}:9200'] self.logger.debug(f'Elasticsearch Targets = {elasticsearch_targets}') kb_main_config.host = host kb_main_config.port = port self.logger.debug(f'Kibana will listen on {kb_main_config.host}:{kb_main_config.port}') kb_main_config.elasticsearch_targets = elasticsearch_targets self.logger.info('Applying configuration.') kb_main_config.commit() utilities.set_ownership_of_file(self.configuration_directory, user='dynamite', group='dynamite') utilities.set_ownership_of_file(self.install_directory, user='dynamite', group='dynamite') utilities.set_ownership_of_file(self.log_directory, user='dynamite', group='dynamite') self.logger.info(f'Installing service -> {const.DEFAULT_CONFIGS}/systemd/kibana.service') sysctl.install_and_enable(f'{const.DEFAULT_CONFIGS}/systemd/kibana.service') self.logger.info('Installing "BaseViews" Kibana package') task = install_dynamite_base_views.InstallKibanaDynamiteBaseViewsPackage(username='admin', password='admin', target=f'http://{host}:{port}') task.download_and_install()
def setup(self, host: Optional[str]=None, port: Optional[int]=None, elasticsearch_targets: Optional[List[str]]=None) -> None: """Setup Kibana Args: host: The IP or hostname to listen on port: The port to listen on elasticsearch_targets: A list of Elasticsearch urls Returns: None """ sysctl = systemctl.SystemCtl() self.logger.debug(f'Creating directory: {self.configuration_directory}') utilities.makedirs(self.configuration_directory) self.logger.debug(f'Creating directory: {self.install_directory}') utilities.makedirs(self.install_directory) self.logger.debug(f'Creating directory: {self.log_directory}') utilities.makedirs(self.log_directory) kibana_tarball_extracted = f'{const.INSTALL_CACHE}/{self.local_mirror_root}' config_paths = ['config/kibana.yml', 'config/node.options'] install_paths = ['bin/', 'data/', 'node/', 'node_modules/', 'plugins/', 'src/', 'package.json'] for conf in config_paths: self.copy_file_or_directory_to_destination(f'{kibana_tarball_extracted}/{conf}', self.configuration_directory) for inst in install_paths: self.copy_file_or_directory_to_destination(f'{kibana_tarball_extracted}/{inst}', self.install_directory) self.create_update_env_variable('KIBANA_PATH_CONF', self.configuration_directory) self.create_update_env_variable('KIBANA_HOME', self.install_directory) self.create_update_env_variable('KIBANA_LOGS', self.log_directory) self.copy_file_or_directory_to_destination(f'{const.DEFAULT_CONFIGS}/kibana/kibana.yml', self.configuration_directory) kb_main_config = config.ConfigManager(self.configuration_directory) if not host: host = utilities.get_primary_ip_address() if not port: port = 5601 if not elasticsearch_targets: elasticsearch_targets = [f'https://{utilities.get_primary_ip_address()}:9200'] self.logger.debug(f'Elasticsearch Targets = {elasticsearch_targets}') kb_main_config.host = host kb_main_config.port = port self.logger.debug(f'Kibana will listen on {kb_main_config.host}:{kb_main_config.port}') kb_main_config.elasticsearch_targets = elasticsearch_targets self.logger.info('Applying configuration.') kb_main_config.commit() utilities.set_ownership_of_file(self.configuration_directory, user='dynamite', group='dynamite') utilities.set_ownership_of_file(self.install_directory, user='dynamite', group='dynamite') utilities.set_ownership_of_file(self.log_directory, user='dynamite', group='dynamite') self.logger.info(f'Installing service -> {const.DEFAULT_CONFIGS}/systemd/kibana.service') sysctl.install_and_enable(f'{const.DEFAULT_CONFIGS}/systemd/kibana.service') self.logger.info('Installing "BaseViews" Kibana package') task = install_dynamite_base_views.InstallKibanaDynamiteBaseViewsPackage(username='admin', password='admin', target=f'http://{host}:{port}') task.download_and_install()
dynamite-nsm
positive
def fully_connected(inputs, num_inputs, num_outputs, scope, use_xavier=True, stddev=0.001, weight_decay=None, activation_fn=tf.nn.relu, bn=False, bn_decay=None, is_training=None): """ Fully connected layer with non-linear operation. Args: inputs: 2-D tensor BxN num_outputs: int Returns: Variable tensor of size B x num_outputs. """ with tf.variable_scope(scope) as sc: <DeepExtract> if use_xavier: initializer = tf.contrib.layers.xavier_initializer() else: initializer = tf.truncated_normal_initializer(stddev=stddev) var = _variable_on_cpu('weights', [num_inputs, num_outputs], initializer) if weight_decay is not None: weight_decay = tf.multiply(tf.nn.l2_loss(var), weight_decay, name='weight_loss') tf.add_to_collection('losses', weight_decay) weights = var </DeepExtract> outputs = tf.matmul(inputs, weights) <DeepExtract> with tf.device('/cpu:0'): dtype = tf.float16 if use_fp16 else tf.float32 var = tf.get_variable('biases', [num_outputs], initializer=tf.constant_initializer(0.0), dtype=dtype) biases = var </DeepExtract> outputs = tf.nn.bias_add(outputs, biases) if bn: <DeepExtract> outputs = batch_norm_template(outputs, is_training, 'bn', [0], bn_decay) </DeepExtract> if activation_fn is not None: outputs = activation_fn(outputs) return outputs
def fully_connected(inputs, num_inputs, num_outputs, scope, use_xavier=True, stddev=0.001, weight_decay=None, activation_fn=tf.nn.relu, bn=False, bn_decay=None, is_training=None): """ Fully connected layer with non-linear operation. Args: inputs: 2-D tensor BxN num_outputs: int Returns: Variable tensor of size B x num_outputs. """ with tf.variable_scope(scope) as sc: if use_xavier: initializer = tf.contrib.layers.xavier_initializer() else: initializer = tf.truncated_normal_initializer(stddev=stddev) var = _variable_on_cpu('weights', [num_inputs, num_outputs], initializer) if weight_decay is not None: weight_decay = tf.multiply(tf.nn.l2_loss(var), weight_decay, name='weight_loss') tf.add_to_collection('losses', weight_decay) weights = var outputs = tf.matmul(inputs, weights) with tf.device('/cpu:0'): dtype = tf.float16 if use_fp16 else tf.float32 var = tf.get_variable('biases', [num_outputs], initializer=tf.constant_initializer(0.0), dtype=dtype) biases = var outputs = tf.nn.bias_add(outputs, biases) if bn: outputs = batch_norm_template(outputs, is_training, 'bn', [0], bn_decay) if activation_fn is not None: outputs = activation_fn(outputs) return outputs
Danesfield
positive
def create_loader_and_network(sample_data, name): <DeepExtract> roidb = [] for _ in range(np.random.randint(4, 10)): roidb.append({'data': sample_data}) roidb = roidb </DeepExtract> loader = RoIDataLoader(roidb) <DeepExtract> logger = logging.getLogger(__name__) blob_names = loader.get_output_names() net = core.Net('dequeue_net_train') net.type = 'dag' for gpu_id in range(cfg.NUM_GPUS): with core.NameScope('gpu_{}'.format(gpu_id)): with core.DeviceScope(muji.OnGPU(gpu_id)): for blob_name in blob_names: blob = core.ScopedName(blob_name) workspace.CreateBlob(blob) net.DequeueBlobs(loader._blobs_queue_name, blob_names) logger.info('Protobuf:\n' + str(net.Proto())) net = net </DeepExtract> loader.register_sigint_handler() loader.start(prefill=False) return (loader, net)
def create_loader_and_network(sample_data, name): roidb = [] for _ in range(np.random.randint(4, 10)): roidb.append({'data': sample_data}) roidb = roidb loader = RoIDataLoader(roidb) logger = logging.getLogger(__name__) blob_names = loader.get_output_names() net = core.Net('dequeue_net_train') net.type = 'dag' for gpu_id in range(cfg.NUM_GPUS): with core.NameScope('gpu_{}'.format(gpu_id)): with core.DeviceScope(muji.OnGPU(gpu_id)): for blob_name in blob_names: blob = core.ScopedName(blob_name) workspace.CreateBlob(blob) net.DequeueBlobs(loader._blobs_queue_name, blob_names) logger.info('Protobuf:\n' + str(net.Proto())) net = net loader.register_sigint_handler() loader.start(prefill=False) return (loader, net)
AIC2018_iamai
positive
def call(self) -> None: if len(self.args) < 2: self.write("tar: You must specify one of the `-Acdtrux' options\n") self.write("Try `tar --help' or `tar --usage' for more information.\n") return filename = self.args[1] extract = False if 'x' in self.args[0]: extract = True verbose = False if 'v' in self.args[0]: verbose = True path = self.fs.resolve_path(filename, self.protocol.cwd) if not path or not self.protocol.fs.exists(path): self.write(f'tar: {filename}: Cannot open: No such file or directory\n') self.write('tar: Error is not recoverable: exiting now\n') self.write('tar: Child returned status 2\n') self.write('tar: Error exit delayed from previous errors\n') return hpf = self.fs.getfile(path) if not hpf[A_REALFILE]: self.write('tar: this does not look like a tar archive\n') self.write('tar: skipping to next header\n') self.write('tar: error exit delayed from previous errors\n') return try: t = tarfile.open(hpf[A_REALFILE]) except Exception: self.write('tar: this does not look like a tar archive\n') self.write('tar: skipping to next header\n') self.write('tar: error exit delayed from previous errors\n') return for f in t: dest = self.fs.resolve_path(f.name.strip('/'), self.protocol.cwd) if verbose: self.write(f'{f.name}\n') if not extract or not len(dest): continue if f.isdir(): self.fs.mkdir(dest, 0, 0, 4096, f.mode, f.mtime) elif f.isfile(): <DeepExtract> (components, d) = (os.path.dirname(dest).split('/'), []) while len(components): d.append(components.pop(0)) p = '/'.join(d) if p and (not self.fs.exists(p)): self.fs.mkdir(p, 0, 0, 4096, f.mode, f.mtime) </DeepExtract> self.fs.mkfile(dest, 0, 0, f.size, f.mode, f.mtime) else: log.msg(f'tar: skipping [{f.name}]')
def call(self) -> None: if len(self.args) < 2: self.write("tar: You must specify one of the `-Acdtrux' options\n") self.write("Try `tar --help' or `tar --usage' for more information.\n") return filename = self.args[1] extract = False if 'x' in self.args[0]: extract = True verbose = False if 'v' in self.args[0]: verbose = True path = self.fs.resolve_path(filename, self.protocol.cwd) if not path or not self.protocol.fs.exists(path): self.write(f'tar: {filename}: Cannot open: No such file or directory\n') self.write('tar: Error is not recoverable: exiting now\n') self.write('tar: Child returned status 2\n') self.write('tar: Error exit delayed from previous errors\n') return hpf = self.fs.getfile(path) if not hpf[A_REALFILE]: self.write('tar: this does not look like a tar archive\n') self.write('tar: skipping to next header\n') self.write('tar: error exit delayed from previous errors\n') return try: t = tarfile.open(hpf[A_REALFILE]) except Exception: self.write('tar: this does not look like a tar archive\n') self.write('tar: skipping to next header\n') self.write('tar: error exit delayed from previous errors\n') return for f in t: dest = self.fs.resolve_path(f.name.strip('/'), self.protocol.cwd) if verbose: self.write(f'{f.name}\n') if not extract or not len(dest): continue if f.isdir(): self.fs.mkdir(dest, 0, 0, 4096, f.mode, f.mtime) elif f.isfile(): (components, d) = (os.path.dirname(dest).split('/'), []) while len(components): d.append(components.pop(0)) p = '/'.join(d) if p and (not self.fs.exists(p)): self.fs.mkdir(p, 0, 0, 4096, f.mode, f.mtime) self.fs.mkfile(dest, 0, 0, f.size, f.mode, f.mtime) else: log.msg(f'tar: skipping [{f.name}]')
cowrie
positive
def test_wln_candidate_ranking(): if torch.cuda.is_available(): device = torch.device('cuda:0') else: device = torch.device('cpu') <DeepExtract> edges = (np.array([0, 1, 2]), np.array([1, 2, 2])) reactant_g = dgl.graph(edges) reactant_node_feats = torch.arange(reactant_g.num_nodes()).float().reshape(-1, 1) reactant_edge_feats = torch.arange(reactant_g.num_edges()).float().reshape(-1, 1) product_g = [] batch_num_candidate_products = [] for i in range(1, 2): product_g.extend([dgl.graph(edges) for _ in range(i)]) batch_num_candidate_products.append(i) product_g = dgl.batch(product_g) product_node_feats = torch.arange(product_g.num_nodes()).float().reshape(-1, 1) product_edge_feats = torch.arange(product_g.num_edges()).float().reshape(-1, 1) product_scores = torch.randn(sum(batch_num_candidate_products), 1) (reactant_g, reactant_node_feats, reactant_edge_feats, product_g, product_node_feats, product_edge_feats, product_scores, num_candidate_products) = (reactant_g, reactant_node_feats, reactant_edge_feats, product_g, product_node_feats, product_edge_feats, product_scores, batch_num_candidate_products) </DeepExtract> reactant_g = reactant_g.to(device) product_g = product_g.to(device) (reactant_node_feats, reactant_edge_feats) = (reactant_node_feats.to(device), reactant_edge_feats.to(device)) (product_node_feats, product_edge_feats, product_scores) = (product_node_feats.to(device), product_edge_feats.to(device), product_scores.to(device)) <DeepExtract> batch_size = 2 edges = (np.array([0, 1, 2]), np.array([1, 2, 2])) reactant_g = [] for _ in range(batch_size): reactant_g.append(dgl.graph(edges)) reactant_g = dgl.batch(reactant_g) reactant_node_feats = torch.arange(reactant_g.num_nodes()).float().reshape(-1, 1) reactant_edge_feats = torch.arange(reactant_g.num_edges()).float().reshape(-1, 1) product_g = [] batch_num_candidate_products = [] for i in range(1, batch_size + 1): product_g.extend([dgl.graph(edges) for _ in range(i)]) batch_num_candidate_products.append(i) product_g = dgl.batch(product_g) product_node_feats = torch.arange(product_g.num_nodes()).float().reshape(-1, 1) product_edge_feats = torch.arange(product_g.num_edges()).float().reshape(-1, 1) product_scores = torch.randn(sum(batch_num_candidate_products), 1) (batch_reactant_g, batch_reactant_node_feats, batch_reactant_edge_feats, batch_product_g, batch_product_node_feats, batch_product_edge_feats, batch_product_scores, batch_num_candidate_products) = (reactant_g, reactant_node_feats, reactant_edge_feats, product_g, product_node_feats, product_edge_feats, product_scores, batch_num_candidate_products) </DeepExtract> batch_reactant_g = batch_reactant_g.to(device) batch_product_g = batch_product_g.to(device) batch_reactant_node_feats = batch_reactant_node_feats.to(device) batch_reactant_edge_feats = batch_reactant_edge_feats.to(device) batch_product_node_feats = batch_product_node_feats.to(device) batch_product_edge_feats = batch_product_edge_feats.to(device) batch_product_scores = batch_product_scores.to(device) model = WLNReactionRanking(node_in_feats=1, edge_in_feats=1).to(device) assert model(reactant_g, reactant_node_feats, reactant_edge_feats, product_g, product_node_feats, product_edge_feats, product_scores, num_candidate_products).shape == torch.Size([sum(num_candidate_products), 1]) assert model(batch_reactant_g, batch_reactant_node_feats, batch_reactant_edge_feats, batch_product_g, batch_product_node_feats, batch_product_edge_feats, batch_product_scores, batch_num_candidate_products).shape == torch.Size([sum(batch_num_candidate_products), 1]) model = WLNReactionRanking(node_in_feats=1, edge_in_feats=1, node_hidden_feats=100, num_encode_gnn_layers=2).to(device) assert model(reactant_g, reactant_node_feats, reactant_edge_feats, product_g, product_node_feats, product_edge_feats, product_scores, num_candidate_products).shape == torch.Size([sum(num_candidate_products), 1]) assert model(batch_reactant_g, batch_reactant_node_feats, batch_reactant_edge_feats, batch_product_g, batch_product_node_feats, batch_product_edge_feats, batch_product_scores, batch_num_candidate_products).shape == torch.Size([sum(batch_num_candidate_products), 1])
def test_wln_candidate_ranking(): if torch.cuda.is_available(): device = torch.device('cuda:0') else: device = torch.device('cpu') edges = (np.array([0, 1, 2]), np.array([1, 2, 2])) reactant_g = dgl.graph(edges) reactant_node_feats = torch.arange(reactant_g.num_nodes()).float().reshape(-1, 1) reactant_edge_feats = torch.arange(reactant_g.num_edges()).float().reshape(-1, 1) product_g = [] batch_num_candidate_products = [] for i in range(1, 2): product_g.extend([dgl.graph(edges) for _ in range(i)]) batch_num_candidate_products.append(i) product_g = dgl.batch(product_g) product_node_feats = torch.arange(product_g.num_nodes()).float().reshape(-1, 1) product_edge_feats = torch.arange(product_g.num_edges()).float().reshape(-1, 1) product_scores = torch.randn(sum(batch_num_candidate_products), 1) (reactant_g, reactant_node_feats, reactant_edge_feats, product_g, product_node_feats, product_edge_feats, product_scores, num_candidate_products) = (reactant_g, reactant_node_feats, reactant_edge_feats, product_g, product_node_feats, product_edge_feats, product_scores, batch_num_candidate_products) reactant_g = reactant_g.to(device) product_g = product_g.to(device) (reactant_node_feats, reactant_edge_feats) = (reactant_node_feats.to(device), reactant_edge_feats.to(device)) (product_node_feats, product_edge_feats, product_scores) = (product_node_feats.to(device), product_edge_feats.to(device), product_scores.to(device)) batch_size = 2 edges = (np.array([0, 1, 2]), np.array([1, 2, 2])) reactant_g = [] for _ in range(batch_size): reactant_g.append(dgl.graph(edges)) reactant_g = dgl.batch(reactant_g) reactant_node_feats = torch.arange(reactant_g.num_nodes()).float().reshape(-1, 1) reactant_edge_feats = torch.arange(reactant_g.num_edges()).float().reshape(-1, 1) product_g = [] batch_num_candidate_products = [] for i in range(1, batch_size + 1): product_g.extend([dgl.graph(edges) for _ in range(i)]) batch_num_candidate_products.append(i) product_g = dgl.batch(product_g) product_node_feats = torch.arange(product_g.num_nodes()).float().reshape(-1, 1) product_edge_feats = torch.arange(product_g.num_edges()).float().reshape(-1, 1) product_scores = torch.randn(sum(batch_num_candidate_products), 1) (batch_reactant_g, batch_reactant_node_feats, batch_reactant_edge_feats, batch_product_g, batch_product_node_feats, batch_product_edge_feats, batch_product_scores, batch_num_candidate_products) = (reactant_g, reactant_node_feats, reactant_edge_feats, product_g, product_node_feats, product_edge_feats, product_scores, batch_num_candidate_products) batch_reactant_g = batch_reactant_g.to(device) batch_product_g = batch_product_g.to(device) batch_reactant_node_feats = batch_reactant_node_feats.to(device) batch_reactant_edge_feats = batch_reactant_edge_feats.to(device) batch_product_node_feats = batch_product_node_feats.to(device) batch_product_edge_feats = batch_product_edge_feats.to(device) batch_product_scores = batch_product_scores.to(device) model = WLNReactionRanking(node_in_feats=1, edge_in_feats=1).to(device) assert model(reactant_g, reactant_node_feats, reactant_edge_feats, product_g, product_node_feats, product_edge_feats, product_scores, num_candidate_products).shape == torch.Size([sum(num_candidate_products), 1]) assert model(batch_reactant_g, batch_reactant_node_feats, batch_reactant_edge_feats, batch_product_g, batch_product_node_feats, batch_product_edge_feats, batch_product_scores, batch_num_candidate_products).shape == torch.Size([sum(batch_num_candidate_products), 1]) model = WLNReactionRanking(node_in_feats=1, edge_in_feats=1, node_hidden_feats=100, num_encode_gnn_layers=2).to(device) assert model(reactant_g, reactant_node_feats, reactant_edge_feats, product_g, product_node_feats, product_edge_feats, product_scores, num_candidate_products).shape == torch.Size([sum(num_candidate_products), 1]) assert model(batch_reactant_g, batch_reactant_node_feats, batch_reactant_edge_feats, batch_product_g, batch_product_node_feats, batch_product_edge_feats, batch_product_scores, batch_num_candidate_products).shape == torch.Size([sum(batch_num_candidate_products), 1])
dgl-lifesci
positive
def eval_trained_model(config_name, ckpt_dir, out_dir, images_glob, tfds_arguments: helpers.TFDSArguments, max_images=None): """Evaluate a trained model.""" config = configs.get_config(config_name) hific = model.HiFiC(config, helpers.ModelMode.EVALUATION) dataset = hific.build_input(batch_size=1, crop_size=None, images_glob=images_glob, tfds_arguments=tfds_arguments) <DeepExtract> if not images_glob: image_names = {} image_names = {i: os.path.splitext(os.path.basename(p))[0] for (i, p) in enumerate(sorted(glob.glob(images_glob)))} </DeepExtract> iterator = tf.data.make_one_shot_iterator(dataset) get_next_image = iterator.get_next() input_image = get_next_image['input_image'] (output_image, bitstring) = hific.build_model(**get_next_image) input_image = tf.cast(tf.round(input_image[0, ...]), tf.uint8) output_image = tf.cast(tf.round(output_image[0, ...]), tf.uint8) os.makedirs(out_dir, exist_ok=True) accumulated_metrics = collections.defaultdict(list) with tf.Session() as sess: hific.restore_trained_model(sess, ckpt_dir) hific.prepare_for_arithmetic_coding(sess) for i in itertools.count(0): if max_images and i == max_images: break try: (inp_np, otp_np, bitstring_np) = sess.run([input_image, output_image, bitstring]) (h, w, c) = inp_np.shape assert c == 3 <DeepExtract> packed = tfc.PackedTensors() packed.pack(tensors=bitstring, arrays=bitstring_np) bpp = len(packed.string) * 8 / h * w </DeepExtract> metrics = {'psnr': get_psnr(inp_np, otp_np), 'bpp_real': bpp} metrics_str = ' / '.join((f'{metric}: {value:.5f}' for (metric, value) in metrics.items())) print(f'Image {i: 4d}: {metrics_str}, saving in {out_dir}...') for (metric, value) in metrics.items(): accumulated_metrics[metric].append(value) name = image_names.get(i, f'img_{i:010d}') Image.fromarray(inp_np).save(os.path.join(out_dir, f'{name}_inp.png')) Image.fromarray(otp_np).save(os.path.join(out_dir, f'{name}_otp_{bpp:.3f}.png')) except tf.errors.OutOfRangeError: print('No more inputs.') break print('\n'.join((f'{metric}: {np.mean(values)}' for (metric, values) in accumulated_metrics.items()))) print('Done!')
def eval_trained_model(config_name, ckpt_dir, out_dir, images_glob, tfds_arguments: helpers.TFDSArguments, max_images=None): """Evaluate a trained model.""" config = configs.get_config(config_name) hific = model.HiFiC(config, helpers.ModelMode.EVALUATION) dataset = hific.build_input(batch_size=1, crop_size=None, images_glob=images_glob, tfds_arguments=tfds_arguments) if not images_glob: image_names = {} image_names = {i: os.path.splitext(os.path.basename(p))[0] for (i, p) in enumerate(sorted(glob.glob(images_glob)))} iterator = tf.data.make_one_shot_iterator(dataset) get_next_image = iterator.get_next() input_image = get_next_image['input_image'] (output_image, bitstring) = hific.build_model(**get_next_image) input_image = tf.cast(tf.round(input_image[0, ...]), tf.uint8) output_image = tf.cast(tf.round(output_image[0, ...]), tf.uint8) os.makedirs(out_dir, exist_ok=True) accumulated_metrics = collections.defaultdict(list) with tf.Session() as sess: hific.restore_trained_model(sess, ckpt_dir) hific.prepare_for_arithmetic_coding(sess) for i in itertools.count(0): if max_images and i == max_images: break try: (inp_np, otp_np, bitstring_np) = sess.run([input_image, output_image, bitstring]) (h, w, c) = inp_np.shape assert c == 3 packed = tfc.PackedTensors() packed.pack(tensors=bitstring, arrays=bitstring_np) bpp = len(packed.string) * 8 / h * w metrics = {'psnr': get_psnr(inp_np, otp_np), 'bpp_real': bpp} metrics_str = ' / '.join((f'{metric}: {value:.5f}' for (metric, value) in metrics.items())) print(f'Image {i: 4d}: {metrics_str}, saving in {out_dir}...') for (metric, value) in metrics.items(): accumulated_metrics[metric].append(value) name = image_names.get(i, f'img_{i:010d}') Image.fromarray(inp_np).save(os.path.join(out_dir, f'{name}_inp.png')) Image.fromarray(otp_np).save(os.path.join(out_dir, f'{name}_otp_{bpp:.3f}.png')) except tf.errors.OutOfRangeError: print('No more inputs.') break print('\n'.join((f'{metric}: {np.mean(values)}' for (metric, values) in accumulated_metrics.items()))) print('Done!')
compression
positive
def test_on_pagexml_mixed_color_as_color(self): <DeepExtract> cfp = CrossFoldTrainerParams(trainer=default_pagexml_trainer_params(img_suffix='*.png', channels=3), n_folds=3, max_parallel_models=1, single_fold=[0]) checkpoint = os.path.join(this_dir, 'models', 'best.ckpt') if pretrained == 'one': cfp.weights = [checkpoint] elif pretrained == 'all': cfp.weights = [checkpoint] * cfp.n_folds elif pretrained == 'none': pass else: raise NotImplementedError if with_augmentation: for dp in cfp.trainer.scenario.data.pre_proc.processors_of_type(AugmentationProcessorParams): dp.n_augmentations = 1 cfp = cfp </DeepExtract> with tempfile.TemporaryDirectory() as d: cfp.best_models_dir = d main(cfp)
def test_on_pagexml_mixed_color_as_color(self): cfp = CrossFoldTrainerParams(trainer=default_pagexml_trainer_params(img_suffix='*.png', channels=3), n_folds=3, max_parallel_models=1, single_fold=[0]) checkpoint = os.path.join(this_dir, 'models', 'best.ckpt') if pretrained == 'one': cfp.weights = [checkpoint] elif pretrained == 'all': cfp.weights = [checkpoint] * cfp.n_folds elif pretrained == 'none': pass else: raise NotImplementedError if with_augmentation: for dp in cfp.trainer.scenario.data.pre_proc.processors_of_type(AugmentationProcessorParams): dp.n_augmentations = 1 cfp = cfp with tempfile.TemporaryDirectory() as d: cfp.best_models_dir = d main(cfp)
calamari
positive
def im_detect_mask_hflip(model, im, target_scale, target_max_size, boxes): """Performs mask detection on the horizontally flipped image. Function signature is the same as for im_detect_mask_aug. """ im_hf = im[:, ::-1, :] boxes_hf = box_utils.flip_boxes(boxes, im.shape[1]) <DeepExtract> (im_blob, im_scale, _im_info) = blob_utils.get_image_blob(im_hf, target_scale, target_max_size) workspace.FeedBlob(core.ScopedName('data'), im_blob) workspace.RunNet(model.conv_body_net.Proto().name) im_scale = im_scale </DeepExtract> <DeepExtract> M = cfg.MRCNN.RESOLUTION if boxes_hf.shape[0] == 0: pred_masks = np.zeros((0, M, M), np.float32) masks_hf = pred_masks inputs = {'mask_rois': _get_rois_blob(boxes_hf, im_scale)} if cfg.FPN.MULTILEVEL_ROIS: _add_multilevel_rois_for_test(inputs, 'mask_rois') for (k, v) in inputs.items(): workspace.FeedBlob(core.ScopedName(k), v) workspace.RunNet(model.mask_net.Proto().name) pred_masks = workspace.FetchBlob(core.ScopedName('mask_fcn_probs')).squeeze() if cfg.MRCNN.CLS_SPECIFIC_MASK: pred_masks = pred_masks.reshape([-1, cfg.MODEL.NUM_CLASSES, M, M]) else: pred_masks = pred_masks.reshape([-1, 1, M, M]) masks_hf = pred_masks </DeepExtract> masks_inv = masks_hf[:, :, :, ::-1] return masks_inv
def im_detect_mask_hflip(model, im, target_scale, target_max_size, boxes): """Performs mask detection on the horizontally flipped image. Function signature is the same as for im_detect_mask_aug. """ im_hf = im[:, ::-1, :] boxes_hf = box_utils.flip_boxes(boxes, im.shape[1]) (im_blob, im_scale, _im_info) = blob_utils.get_image_blob(im_hf, target_scale, target_max_size) workspace.FeedBlob(core.ScopedName('data'), im_blob) workspace.RunNet(model.conv_body_net.Proto().name) im_scale = im_scale M = cfg.MRCNN.RESOLUTION if boxes_hf.shape[0] == 0: pred_masks = np.zeros((0, M, M), np.float32) masks_hf = pred_masks inputs = {'mask_rois': _get_rois_blob(boxes_hf, im_scale)} if cfg.FPN.MULTILEVEL_ROIS: _add_multilevel_rois_for_test(inputs, 'mask_rois') for (k, v) in inputs.items(): workspace.FeedBlob(core.ScopedName(k), v) workspace.RunNet(model.mask_net.Proto().name) pred_masks = workspace.FetchBlob(core.ScopedName('mask_fcn_probs')).squeeze() if cfg.MRCNN.CLS_SPECIFIC_MASK: pred_masks = pred_masks.reshape([-1, cfg.MODEL.NUM_CLASSES, M, M]) else: pred_masks = pred_masks.reshape([-1, 1, M, M]) masks_hf = pred_masks masks_inv = masks_hf[:, :, :, ::-1] return masks_inv
AIC2018_iamai
positive
@retrying.retry(wait_fixed=1000, stop_max_delay=timeout_seconds * 1000, retry_on_result=lambda res: not res) def fn(): try: <DeepExtract> tasks = shakedown.get_service_tasks(service_name) matching_tasks = [t for t in tasks if t['name'].startswith(prefix)] task_ids = [t['id'] for t in matching_tasks] </DeepExtract> except dcos.errors.DCOSHTTPException: log.info('Failed to get task ids for service {}'.format(service_name)) task_ids = [] prefix_clause = '' if prefix: prefix_clause = ' starting with "{}"'.format(prefix) old_set = set(old_task_ids) new_set = set(task_ids) newly_launched_set = new_set.difference(old_set) old_remaining_set = old_set.intersection(new_set) all_updated = len(newly_launched_set) == len(new_set) and len(old_remaining_set) == 0 and (len(new_set) >= len(old_set)) if all_updated: log.info('All of the tasks{} have updated\n- Old tasks: {}\n- New tasks: {}'.format(prefix_clause, old_set, new_set)) return all_updated log.info('Waiting for tasks%s to have updated ids:\n- Old tasks (remaining): %s\n- New tasks (launched): %s', prefix_clause, old_remaining_set, newly_launched_set)
@retrying.retry(wait_fixed=1000, stop_max_delay=timeout_seconds * 1000, retry_on_result=lambda res: not res) def fn(): try: tasks = shakedown.get_service_tasks(service_name) matching_tasks = [t for t in tasks if t['name'].startswith(prefix)] task_ids = [t['id'] for t in matching_tasks] except dcos.errors.DCOSHTTPException: log.info('Failed to get task ids for service {}'.format(service_name)) task_ids = [] prefix_clause = '' if prefix: prefix_clause = ' starting with "{}"'.format(prefix) old_set = set(old_task_ids) new_set = set(task_ids) newly_launched_set = new_set.difference(old_set) old_remaining_set = old_set.intersection(new_set) all_updated = len(newly_launched_set) == len(new_set) and len(old_remaining_set) == 0 and (len(new_set) >= len(old_set)) if all_updated: log.info('All of the tasks{} have updated\n- Old tasks: {}\n- New tasks: {}'.format(prefix_clause, old_set, new_set)) return all_updated log.info('Waiting for tasks%s to have updated ids:\n- Old tasks (remaining): %s\n- New tasks (launched): %s', prefix_clause, old_remaining_set, newly_launched_set)
dcos-jenkins-service
positive
def observe(self, budget: float, objective: float) -> None: """Observe a new objective value. Args: budget (float): the budget used to obtain the objective (e.g., the number of epochs). objective (float): the objective value to observe (e.g, the accuracy). """ <DeepExtract> objective = objective </DeepExtract> self.observed_budgets.append(budget) self.observed_objectives.append(objective)
def observe(self, budget: float, objective: float) -> None: """Observe a new objective value. Args: budget (float): the budget used to obtain the objective (e.g., the number of epochs). objective (float): the objective value to observe (e.g, the accuracy). """ objective = objective self.observed_budgets.append(budget) self.observed_objectives.append(objective)
deephyper
positive